In my cuda device code I am doing a check where I subtracting the thread's id and the blockDim to see weather or not the data I might want to use is in range. But when this number goes bellow 0 it seems to wrap back around to be max instead.
#include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
float input[] =
{
1.5f, 2.5f, 3.5f,
4.5f, 5.5f, 6.5f,
7.5f, 8.5f, 9.5f,
};
__global__ void underflowCausingFunction(float* in, float* out)
{
int id = (blockDim.x * blockIdx.x) + threadIdx.x;
out[id] = id - blockDim.x;
}
int main()
{
float* in;
float* out;
cudaMalloc(&in, sizeof(float) * 9);
cudaMemcpy(in, input, sizeof(float) * 9, cudaMemcpyHostToDevice);
cudaMalloc(&out, sizeof(float) * 9);
underflowCausingFunction<<<3, 3>>>(in, out);
float recivedOut[9];
cudaMemcpy(recivedOut, out, sizeof(float) * 9, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
std::cout << recivedOut[0] << " " << recivedOut[1] << " " << recivedOut[2] << "\n"
<< recivedOut[3] << " " << recivedOut[4] << " " << recivedOut[5] << "\n"
<< recivedOut[6] << " " << recivedOut[7] << " " << recivedOut[8] << "\n";
cudaFree(in);
cudaFree(out);
std::cin.get();
}
The output of this is:
4.29497e+09 4.29497e+09 4.29497e+09
0 1 2
3 4 5
I'm not sure why it's acting like an unsigned int. If it is relevant I am using GTX 970 and the NVCC compiler that comes with the visual studio plugin. If somebody could explain what's happening or what I'm doing on wrong that would be great.