1

i'm a beginner in cuda programming. I'm trying an own easy code but it's not working and I don't know what else to do.

My code:

#include <mpi.h>
#include <cuda.h>
#include <stdio.h>
#include <sys/wait.h>
// Prototypes
__global__ void helloWorld(char*);
__device__ int  getGlobalIdx_2D_2D();

// Host function

int main(int argc, char** argv)
{
    unsigned int i, N, gridX, gridY, blockX, blockY;
    N = 4096000;

    char *str = (char *) malloc(N*sizeof(char));
    for(i=0; i < N; i++) str[i]='c';

    MPI_Init (&argc, &argv);

    char *d_str;
    size_t size = (size_t) N*sizeof(char);
    cudaMalloc((void**)&d_str, size);
    cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice);

    gridX = 100;
    gridY = 10;
    blockX = blockY = 64;
    dim3 dimGrid(gridX, gridY);  // 4096 chars per block
    dim3 dimBlock(blockX, blockY); // one thread per character, 2D
    printf("dimGrid(%d, %d)\t", gridX, gridY);
    printf("dimBlock(%d, %d)\t", blockX, blockY);
    helloWorld<<< dimGrid, dimBlock >>>(d_str);

    cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost);
    cudaThreadSynchronize();

    MPI_Barrier (MPI_COMM_WORLD);

    cudaFree(d_str);

    printf("\nRes:\n");
    for(i = 0; i < N; i++) printf("\t[%u] %c\n", i, str[i]);

    MPI_Finalize ();

    free(str);
    return 0.0;
}

// Device kernel
__global__ void helloWorld(char* str)
{
    // determine where in the thread grid we are
    int pos = getGlobalIdx_2D_2D();
    if (pos % 2 == 0) str[pos] -= 2;
    else str[pos] += 8;
}

__device__ int getGlobalIdx_2D_2D()
{
    int blockId = blockIdx.x + blockIdx.y * gridDim.x;
    int threadId = blockId * (blockDim.x * blockDim.y) +
                     (threadIdx.y * blockDim.x) + threadIdx.x;
    return threadId;
}

My desired output is: jajajajajajaja... x4096000

I've read that '%' operation is not efficient, but I don't think is the problem there.

Thanks!

1 Answers1

0

You are performing absolutely no CUDA error checking, it is really beneficial to do so. Once you enable it you can find that block dimensions 64 x 64 are invalid as it results into 4096 threads within one block, which is not a valid configuration.

Community
  • 1
  • 1
Michal Hosala
  • 5,570
  • 1
  • 22
  • 49
  • I used cudaThreadSynchronize as in the HelloWorld example, so i didn't have any reason. I started using CUDA error checking and the problem was that 64x64 threads per block is not a valid configuration as explained in this thread (http://stackoverflow.com/questions/16125389/invalid-configuration-argument-error-for-the-call-of-cuda-kernel) – Genís Moreno Jan 12 '16 at 10:09