I've found some code about a cuda matrix vector product in a previous topic : Matrix-vector multiplication in CUDA: benchmarking & performance I was firstly wondering why the author didn't used shared memory for dA (the matrix) ?
And then, why the column major ordering is faster than row major ordering ?
Here is the code :
template<typename T>
__global__ void matvec_kernel(const T * __restrict__ dA, const T * __restrict__ dx, T * __restrict__ dy, const unsigned int nRows, const unsigned int nCols)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ T x_shared[BLOCK_SIZE];
T y_val = 0.0;
#pragma unroll
for (unsigned int m = 0; m < ((nCols + BLOCK_SIZE - 1)/ BLOCK_SIZE); ++m)
{
if ((m * BLOCK_SIZE + threadIdx.x) < nCols) x_shared[threadIdx.x] = dx[threadIdx.x + m * BLOCK_SIZE];
else x_shared[threadIdx.x] = 0.f;
__syncthreads();
#pragma unroll
for (unsigned int e = 0; e < BLOCK_SIZE; ++e) {
// --- Column-major ordering - faster
y_val += dA[tid + (e + BLOCK_SIZE * m) * nRows] * x_shared[e];
// --- Row-major ordering - slower
//y_val += dA[tid * nCols + (e + BLOCK_SIZE * m)] * x_shared[e];
}
__syncthreads();
}
if (tid < nRows) dy[tid] = y_val;
}
I'm thinking on these two questions for 1 day now, and that's why i'm here.
Thanks a lot !