-1

Somehow when I modify d_updated_water_flow_map in below code, d_terrain_height_map gets modified too / instead.

Changing the order of allocation for the two arrays fixes the problem, but I assume this is only masking the root-cause of the issue.

cudaCheck(cudaMalloc((void **)&d_water_flow_map, SIZE * 4)); 
cudaCheck(cudaMalloc((void **)&d_updated_water_flow_map, SIZE * 4)); // changing this array also changes d_terrain_height_map
cudaCheck(cudaMalloc((void **)&d_terrain_height_map, SIZE));  

I am compiling kernel into a DLL and call it from below python file inside Blender 3D python interpreter. All of the values are 32 bit floats.

cu_include.h

#pragma once  

#ifdef MATHLIBRARY_EXPORTS  
#define MATHLIBRARY_API __declspec(dllexport)   
#else  
#define MATHLIBRARY_API __declspec(dllimport)   
#endif  


extern "C" __declspec(dllexport)
void init(float *t_height_map,
float *w_height_map,
float *s_height_map,
int SIZE_X,
int SIZE_Y);

extern "C" __declspec(dllexport)
void run_hydro_erosion(int cycles,
float t_step,
float min_tilt_angle,
float SEDIMENT_CAP,
float DISSOLVE_CONST,
float DEPOSIT_CONST,
int SIZE_X,
int SIZE_Y,
float PIPE_LENGTH,
float ADJACENT_LENGTH,
float TIME_STEP,
float MIN_TILT_ANGLE);

extern "C" __declspec(dllexport)
void free_mem();

extern "C" __declspec(dllexport)
void procedural_rain(float *water_height_map, float *rain_map, int SIZE_X, int SIZE_Y);

erosion_kernel.dll

#include "cu_include.h"

// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <algorithm>
#include <random>

// includes CUDA
#include <cuda_runtime.h>

using namespace std;

#define FLOW_RIGHT 0
#define FLOW_UP 1
#define FLOW_LEFT 2
#define FLOW_DOWN 3
#define X_VEL 0
#define Y_VEL 1
#define LEFT_CELL row, col - 1
#define RIGHT_CELL row, col + 1
#define ABOVE_CELL row - 1, col
#define BELOW_CELL row + 1, col

// CUDA API error checking macro
#define T 1024
#define M 1536
#define blockSize 1024
#define cudaCheck(error) \
  if (error != cudaSuccess) { \
    printf("Fatal error: %s at %s:%d\n", \
      cudaGetErrorString(error), \
      __FILE__, __LINE__); \
    exit(1); \
              }


__global__ void update_water_flow(float *water_height_map, float *water_flow_map, float *d_updated_water_flow_map, int SIZE_X, int SIZE_Y)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    int col = index % SIZE_X;
    int row = index / SIZE_X; 

    index = row * (SIZE_X * 4) + col * 4;   // 3D index
    d_updated_water_flow_map[index + FLOW_RIGHT] = 0;
    d_updated_water_flow_map[index + FLOW_UP] = 0;
    d_updated_water_flow_map[index + FLOW_LEFT] = 0;
    d_updated_water_flow_map[index + FLOW_DOWN] = 0;

}

static float *terrain_height_map;
static float *water_height_map;
static float *sediment_height_map;

void init(float *t_height_map,
    float *w_height_map,
    float *s_height_map,
    int SIZE_X,
    int SIZE_Y)
{
    /* set vars HOST*/
    terrain_height_map = t_height_map;
    water_height_map = w_height_map;
    sediment_height_map = s_height_map;
}

void run_hydro_erosion(int cycles,
    float t_step,
    float min_tilt_angle,
    float SEDIMENT_CAP,
    float DISSOLVE_CONST,
    float DEPOSIT_CONST,
    int SIZE_X,
    int SIZE_Y,
    float PIPE_LENGTH,
    float ADJACENT_LENGTH,
    float TIME_STEP,
    float MIN_TILT_ANGLE)
{ 
    int numBlocks = (SIZE_X * SIZE_Y + (blockSize - 1)) / blockSize;
    int SIZE = SIZE_X * SIZE_Y * sizeof(float);

    float *d_terrain_height_map, *d_updated_terrain_height_map;
    float *d_water_height_map, *d_updated_water_height_map;
    float *d_sediment_height_map, *d_updated_sediment_height_map;

    float *d_suspended_sediment_level;
    float *d_updated_suspended_sediment_level;
    float *d_water_flow_map;
    float *d_updated_water_flow_map;
    float *d_prev_water_height_map;
    float *d_water_velocity_vec;
    float *d_rain_map;

    cudaCheck(cudaMalloc(&d_water_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_updated_water_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_prev_water_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_water_flow_map, SIZE * 4));
    cudaCheck(cudaMalloc(&d_updated_water_flow_map, SIZE * 4)); // changing this array also changes d_terrain_height_map
    cudaCheck(cudaMalloc(&d_terrain_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_updated_terrain_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_sediment_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_updated_sediment_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_suspended_sediment_level, SIZE));
    cudaCheck(cudaMalloc(&d_updated_suspended_sediment_level, SIZE));
    cudaCheck(cudaMalloc(&d_rain_map, SIZE));
    cudaCheck(cudaMalloc(&d_water_velocity_vec, SIZE * 2));

    cudaCheck(cudaMemcpy(d_terrain_height_map, terrain_height_map, SIZE, cudaMemcpyHostToDevice));
    cudaCheck(cudaMemcpy(d_water_height_map, water_height_map, SIZE, cudaMemcpyHostToDevice));
    cudaCheck(cudaMemcpy(d_sediment_height_map, sediment_height_map, SIZE, cudaMemcpyHostToDevice));

    cout << "init terrain_height_map" << endl;
    for (int i = 0; i < SIZE_X * SIZE_Y; i++) {
        cout << terrain_height_map[i] << ", ";
        if (i % SIZE_X == 0 && i != 0) cout << endl;
    }

    /* launch the kernel on the GPU */
    float *temp;
    while (cycles--) {
        update_water_flow << < numBlocks, blockSize >> >(d_water_height_map, d_water_flow_map, d_updated_water_flow_map, SIZE_X, SIZE_Y); 
        temp = d_water_flow_map;
        d_water_flow_map = d_updated_water_flow_map;
        d_updated_water_flow_map = temp;        
    }
    cudaCheck(cudaMemcpy(terrain_height_map, d_terrain_height_map, SIZE, cudaMemcpyDeviceToHost)); 


    cout << "updated terrain" << endl;
    for (int i = 0; i < SIZE_X * SIZE_Y; i++) {
        cout << terrain_height_map[i] << ", ";
        if (i % SIZE_X == 0 && i != 0) cout << endl;
    } 
} 

Python file

import bpy
import numpy
import ctypes
import random

width = 4
height = 4

size_x = width
size_y = height
N = size_x * size_y

scrpt_cycles = 1
kernel_cycles = 1
time_step = 0.005 
pipe_length = 1.0
adjacent_length = 1.0
min_tilt_angle = 10
sediment_cap = 0.01
dissolve_const = 0.01
deposit_const = 0.01

# initialize arrays
ter_height_map = numpy.ones((N), dtype=numpy.float32)
water_height_map = numpy.zeros((N), dtype=numpy.float32)
sed_height_map = numpy.zeros((N), dtype=numpy.float32)
rain_map = numpy.ones((N), dtype=numpy.float32)


# load terrain height from image
for i in range(0, len(ter_height_map)):
    ter_height_map[i] = 1


# import DLL
E = ctypes.cdll.LoadLibrary("E:/Programming/CUDA/erosion/Release/erosion_kernel.dll")

# initialize device memory
E.init( ctypes.c_void_p(ter_height_map.ctypes.data), 
        ctypes.c_void_p(water_height_map.ctypes.data),
        ctypes.c_void_p(sed_height_map.ctypes.data),
        ctypes.c_int(size_x),
        ctypes.c_int(size_y))


# run erosion
while(scrpt_cycles):
    scrpt_cycles = scrpt_cycles - 1  
    E.run_hydro_erosion(ctypes.c_int(kernel_cycles),
                        ctypes.c_float(time_step),
                        ctypes.c_float(min_tilt_angle), 
                        ctypes.c_float(sediment_cap), 
                        ctypes.c_float(dissolve_const), 
                        ctypes.c_float(deposit_const),
                        ctypes.c_int(size_x),
                        ctypes.c_int(size_y),
                        ctypes.c_float(pipe_length),
                        ctypes.c_float(adjacent_length),
                        ctypes.c_float(time_step),
                        ctypes.c_float(min_tilt_angle))

Wrong output:

enter image description here

Expected output (after I comment out update_water_flow):

//update_water_flow << < numBlocks, blockSize >> >(d_water_height_map, d_water_flow_map, d_updated_water_flow_map, SIZE_X, SIZE_Y); 

enter image description here

Graphics card: GTX460M

Robert Crovella
  • 143,785
  • 11
  • 213
  • 257
VSB
  • 232
  • 1
  • 4
  • 13
  • Without an [MCVE] this question is s waste of everyone's time, including yours. – talonmies Aug 05 '17 at 09:31
  • @talonmies I have added a working, minimal example! Sorry about that.. – VSB Aug 05 '17 at 13:21
  • 1
    This is illegal in CUDA: `__device__ float *d_updated_water_flow_map; ... cudaCheck(cudaMalloc(&d_updated_water_flow_map, SIZE * 4));` and is a significant difference between the version of the code you have now posted compared to the version that was previously posted (which had `d_updated_water_flow_map` as an ordinary host stack variable). You cannot take the address of a `__device__` variable in host code (even in a call/parameter for `cudaMalloc`) and this is not the right method to create a `__device__` pointer variable. – Robert Crovella Aug 05 '17 at 13:46
  • @RobertCrovella I changed the code back to original design. I was trying something different and forgot to change the variables back to how they were originally. I tested and the issue is still happening. Modifying d_updated_water_flow_map still modifies d_terrain_height_map – VSB Aug 05 '17 at 13:54
  • run your code outside of blender with `cuda-memcheck`. I was able to do that just by removing `import bpy` from your python script and then running `cuda-memcheck python test.py` When I do that, `cuda-memcheck` reports invalid memory access errors in your kernel. – Robert Crovella Aug 05 '17 at 14:49
  • You need some thread-check code in your kernel, before you access any data. Something like this: `if ((row >= SIZE_Y) || (col >= SIZE_X)) return;`, right before the lines that update `d_updated_water_flow_map` – Robert Crovella Aug 05 '17 at 15:07
  • When you say "Somehow when I modify d_updated_water_flow_map in below code, d_terrain_height_map gets modified too / instead.", do you mean that the *pointer value* of `d_terrain_height_map` get modified, or are you referring to the data it points to? If you are referring to the data it points to, can you provide sample output when you run the python script, showing what you get, and also indicating what you expect it to be? Because when I run it, I get all ones for `init terrain_height_map` and also all ones for `updated terrain`, and this seems sensible to me. – Robert Crovella Aug 05 '17 at 15:08
  • @RobertCrovella I have added outputs. When I modify values inside `d_updated_water_flow_map`, the values inside `d_terrain_height_map` get modified instead. – VSB Aug 05 '17 at 15:20
  • add the thread-check code to your kernel. You are launching more threads (1024) than you need in this case, and those extra threads are writing out-of-bounds – Robert Crovella Aug 05 '17 at 15:39
  • @RobertCrovella thanks a lot!!! I feel really dumb... Completely forgot about that.. and thanks for cuda-memcheck I will make sure to use that next time I have a memory issue! – VSB Aug 05 '17 at 15:43
  • So did that fix it? – Robert Crovella Aug 05 '17 at 15:44
  • @RobertCrovella yup!!!!!!! – VSB Aug 05 '17 at 16:58

1 Answers1

1

(Note that the code in this answer also gives a complete recipe/example for how to use CUDA code (e.g. CUDA device kernels) in a library that is shared with a python application using python ctypes. If you wish to use CUDA library functionality, the answer here provides an example, using python ctypes.)

The problem here is that the kernel was writing out-of-bounds, and apparently the compiler/runtime located the allocations close enough in device memory, that exceeding the bounds on the first allocation caused the code to write into the second allocation:

cudaCheck(cudaMalloc(&d_updated_water_flow_map, SIZE * 4)); // changing this array also changes d_terrain_height_map
cudaCheck(cudaMalloc(&d_terrain_height_map, SIZE));

The out-of-bounds accesses are coming about because the kernel launch involves more than enough threads (it is launching 1024 threads in this case) whereas we really only "need" SIZE_X*SIZE_Y threads (i.e. 16 in this example):

#define blockSize 1024
...
int numBlocks = (SIZE_X * SIZE_Y + (blockSize - 1)) / blockSize;
...
update_water_flow << < numBlocks, blockSize >> >(d_water_height_map, d_water_flow_map, d_updated_water_flow_map, SIZE_X, SIZE_Y); 

This is of course "typical" in CUDA programming, to launch more than enough threads, but its important when doing this to include a "thread check" in the kernel, to prevent any "extra" threads from making any illegal, out-of-bounds accesses. In this case one possible kernel thread check might be like this:

if ((row >= SIZE_Y) || (col >= SIZE_X)) return;

Here's a fully-worked example based on the provided code (albeit on linux, and removing the blender dependency in the python code), showing the before-and-after effect. Note that we can run even a code like this with cuda-memcheck, which would have pointed out the out-of-bounds accesses in this case (omitted from the first example below, for clarity):

$ cat t383.cu
extern "C"
void init(float *t_height_map,
float *w_height_map,
float *s_height_map,
int SIZE_X,
int SIZE_Y);

extern "C"
void run_hydro_erosion(int cycles,
float t_step,
float min_tilt_angle,
float SEDIMENT_CAP,
float DISSOLVE_CONST,
float DEPOSIT_CONST,
int SIZE_X,
int SIZE_Y,
float PIPE_LENGTH,
float ADJACENT_LENGTH,
float TIME_STEP,
float MIN_TILT_ANGLE);

extern "C"
void free_mem();

extern "C"
void procedural_rain(float *water_height_map, float *rain_map, int SIZE_X, int SIZE_Y);

// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <algorithm>
#include <random>

// includes CUDA
#include <cuda_runtime.h>

using namespace std;

#define FLOW_RIGHT 0
#define FLOW_UP 1
#define FLOW_LEFT 2
#define FLOW_DOWN 3
#define X_VEL 0
#define Y_VEL 1
#define LEFT_CELL row, col - 1
#define RIGHT_CELL row, col + 1
#define ABOVE_CELL row - 1, col
#define BELOW_CELL row + 1, col

// CUDA API error checking macro
#define T 1024
#define M 1536
#define blockSize 1024
#define cudaCheck(error) \
  if (error != cudaSuccess) { \
    printf("Fatal error: %s at %s:%d\n", \
      cudaGetErrorString(error), \
      __FILE__, __LINE__); \
    exit(1); \
              }


__global__ void update_water_flow(float *water_height_map, float *water_flow_map, float *d_updated_water_flow_map, int SIZE_X, int SIZE_Y)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    int col = index % SIZE_X;
    int row = index / SIZE_X;

    index = row * (SIZE_X * 4) + col * 4;   // 3D index
#ifdef FIX
    if ((row >= SIZE_Y) || (col >= SIZE_X)) return;
#endif
    d_updated_water_flow_map[index + FLOW_RIGHT] = 0;
    d_updated_water_flow_map[index + FLOW_UP] = 0;
    d_updated_water_flow_map[index + FLOW_LEFT] = 0;
    d_updated_water_flow_map[index + FLOW_DOWN] = 0;

}

static float *terrain_height_map;
static float *water_height_map;
static float *sediment_height_map;

void init(float *t_height_map,
    float *w_height_map,
    float *s_height_map,
    int SIZE_X,
    int SIZE_Y)
{
    /* set vars HOST*/
    terrain_height_map = t_height_map;
    water_height_map = w_height_map;
    sediment_height_map = s_height_map;
}

void run_hydro_erosion(int cycles,
    float t_step,
    float min_tilt_angle,
    float SEDIMENT_CAP,
    float DISSOLVE_CONST,
    float DEPOSIT_CONST,
    int SIZE_X,
    int SIZE_Y,
    float PIPE_LENGTH,
    float ADJACENT_LENGTH,
    float TIME_STEP,
    float MIN_TILT_ANGLE)
{
    int numBlocks = (SIZE_X * SIZE_Y + (blockSize - 1)) / blockSize;
    int SIZE = SIZE_X * SIZE_Y * sizeof(float);

    float *d_terrain_height_map, *d_updated_terrain_height_map;
    float *d_water_height_map, *d_updated_water_height_map;
    float *d_sediment_height_map, *d_updated_sediment_height_map;

    float *d_suspended_sediment_level;
    float *d_updated_suspended_sediment_level;
    float *d_water_flow_map;
    float *d_updated_water_flow_map;
    float *d_prev_water_height_map;
    float *d_water_velocity_vec;
    float *d_rain_map;

    cudaCheck(cudaMalloc(&d_water_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_updated_water_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_prev_water_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_water_flow_map, SIZE * 4));
    cudaCheck(cudaMalloc(&d_updated_water_flow_map, SIZE * 4)); // changing this array also changes d_terrain_height_map
    cudaCheck(cudaMalloc(&d_terrain_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_updated_terrain_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_sediment_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_updated_sediment_height_map, SIZE));
    cudaCheck(cudaMalloc(&d_suspended_sediment_level, SIZE));
    cudaCheck(cudaMalloc(&d_updated_suspended_sediment_level, SIZE));
    cudaCheck(cudaMalloc(&d_rain_map, SIZE));
    cudaCheck(cudaMalloc(&d_water_velocity_vec, SIZE * 2));

    cudaCheck(cudaMemcpy(d_terrain_height_map, terrain_height_map, SIZE, cudaMemcpyHostToDevice));
    cudaCheck(cudaMemcpy(d_water_height_map, water_height_map, SIZE, cudaMemcpyHostToDevice));
    cudaCheck(cudaMemcpy(d_sediment_height_map, sediment_height_map, SIZE, cudaMemcpyHostToDevice));

    cout << "init terrain_height_map" << endl;
    for (int i = 0; i < SIZE_X * SIZE_Y; i++) {
        cout << terrain_height_map[i] << ", ";
        if (i % SIZE_X == 0 && i != 0) cout << endl;
    }

    /* launch the kernel on the GPU */
    float *temp;
    while (cycles--) {
        update_water_flow << < numBlocks, blockSize >> >(d_water_height_map, d_water_flow_map, d_updated_water_flow_map, SIZE_X, SIZE_Y);
        temp = d_water_flow_map;
        d_water_flow_map = d_updated_water_flow_map;
        d_updated_water_flow_map = temp;
    }
    cudaCheck(cudaMemcpy(terrain_height_map, d_terrain_height_map, SIZE, cudaMemcpyDeviceToHost));


    cout << "updated terrain" << endl;
    for (int i = 0; i < SIZE_X * SIZE_Y; i++) {
        cout << terrain_height_map[i] << ", ";
        if (i % SIZE_X == 0 && i != 0) cout << endl;
    }
}
$ cat t383.py
import numpy
import ctypes
import random

width = 4
height = 4

size_x = width
size_y = height
N = size_x * size_y

scrpt_cycles = 1
kernel_cycles = 1
time_step = 0.005
pipe_length = 1.0
adjacent_length = 1.0
min_tilt_angle = 10
sediment_cap = 0.01
dissolve_const = 0.01
deposit_const = 0.01

# initialize arrays
ter_height_map = numpy.ones((N), dtype=numpy.float32)
water_height_map = numpy.zeros((N), dtype=numpy.float32)
sed_height_map = numpy.zeros((N), dtype=numpy.float32)
rain_map = numpy.ones((N), dtype=numpy.float32)


# load terrain height from image
for i in range(0, len(ter_height_map)):
    ter_height_map[i] = 1


# import DLL
E = ctypes.cdll.LoadLibrary("./t383.so")

# initialize device memory
E.init( ctypes.c_void_p(ter_height_map.ctypes.data),
        ctypes.c_void_p(water_height_map.ctypes.data),
        ctypes.c_void_p(sed_height_map.ctypes.data),
        ctypes.c_int(size_x),
        ctypes.c_int(size_y))


# run erosion
while(scrpt_cycles):
    scrpt_cycles = scrpt_cycles - 1
    E.run_hydro_erosion(ctypes.c_int(kernel_cycles),
                        ctypes.c_float(time_step),
                        ctypes.c_float(min_tilt_angle),
                        ctypes.c_float(sediment_cap),
                        ctypes.c_float(dissolve_const),
                        ctypes.c_float(deposit_const),
                        ctypes.c_int(size_x),
                        ctypes.c_int(size_y),
                        ctypes.c_float(pipe_length),
                        ctypes.c_float(adjacent_length),
                        ctypes.c_float(time_step),
                        ctypes.c_float(min_tilt_angle))
$ nvcc -Xcompiler -fPIC -std=c++11 -shared -arch=sm_61 -o t383.so t383.cu
$ python t383.py
init terrain_height_map
1, 1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, updated terrain
0, 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 
$ nvcc -Xcompiler -fPIC -std=c++11 -shared -arch=sm_61 -o t383.so t383.cu -DFIX
$ cuda-memcheck python t383.py
========= CUDA-MEMCHECK
init terrain_height_map
1, 1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, updated terrain
1, 1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 
========= ERROR SUMMARY: 0 errors
$

If we compile the previous example without the fix, but run it with cuda-memcheck we will get output indicating the out-of-bounds accesses:

$nvcc -Xcompiler -fPIC -std=c++11 -shared -arch=sm_61 -o t383.so t383.cu
$ cuda-memcheck python t383.py
========= CUDA-MEMCHECK
init terrain_height_map
1, 1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
========= Invalid __global__ write of size 4
=========     at 0x000002f0 in update_water_flow(float*, float*, float*, int, int)
=========     by thread (31,0,0) in block (0,0,0)
=========     Address 0x1050d6009f0 is out of bounds
=========     Saved host backtrace up to driver entry point at kernel launch time
=========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 (cuLaunchKernel + 0x2c5) [0x204505]
=========     Host Frame:./t383.so [0x1c291]
=========     Host Frame:./t383.so [0x39e33]
=========     Host Frame:./t383.so [0x6879]
=========     Host Frame:./t383.so (_Z43__device_stub__Z17update_water_flowPfS_S_iiPfS_S_ii + 0xe3) [0x6747]
=========     Host Frame:./t383.so (_Z17update_water_flowPfS_S_ii + 0x38) [0x6781]
=========     Host Frame:./t383.so (run_hydro_erosion + 0x8f2) [0x648b]
=========     Host Frame:/usr/lib/x86_64-linux-gnu/libffi.so.6 (ffi_call_unix64 + 0x4c) [0x5adc]
=========     Host Frame:/usr/lib/x86_64-linux-gnu/libffi.so.6 (ffi_call + 0x1fc) [0x540c]
=========     Host Frame:/usr/lib/python2.7/lib-dynload/_ctypes.x86_64-linux-gnu.so (_ctypes_callproc + 0x48e) [0x145fe]
=========     Host Frame:/usr/lib/python2.7/lib-dynload/_ctypes.x86_64-linux-gnu.so [0x15f9e]
=========     Host Frame:python (PyEval_EvalFrameEx + 0x98d) [0x1244dd]
=========     Host Frame:python [0x167d14]
=========     Host Frame:python (PyRun_FileExFlags + 0x92) [0x65bf4]
=========     Host Frame:python (PyRun_SimpleFileExFlags + 0x2ee) [0x6612d]
=========     Host Frame:python (Py_Main + 0xb5e) [0x66d92]
=========     Host Frame:/lib/x86_64-linux-gnu/libc.so.6 (__libc_start_main + 0xf5) [0x21f45]
=========     Host Frame:python [0x177c2e]
=========
========= Invalid __global__ write of size 4
=========     at 0x000002f0 in update_water_flow(float*, float*, float*, int, int)
=========     by thread (30,0,0) in block (0,0,0)
=========     Address 0x1050d6009e0 is out of bounds
=========     Saved host backtrace up to driver entry point at kernel launch time
=========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 (cuLaunchKernel + 0x2c5) [0x204505]
=========     Host Frame:./t383.so [0x1c291]
=========     Host Frame:./t383.so [0x39e33]
=========     Host Frame:./t383.so [0x6879]
=========     Host Frame:./t383.so (_Z43__device_stub__Z17update_water_flowPfS_S_iiPfS_S_ii + 0xe3) [0x6747]
=========     Host Frame:./t383.so (_Z17update_water_flowPfS_S_ii + 0x38) [0x6781]
=========     Host Frame:./t383.so (run_hydro_erosion + 0x8f2) [0x648b]
=========     Host Frame:/usr/lib/x86_64-linux-gnu/libffi.so.6 (ffi_call_unix64 + 0x4c) [0x5adc]
=========     Host Frame:/usr/lib/x86_64-linux-gnu/libffi.so.6 (ffi_call + 0x1fc) [0x540c]
=========     Host Frame:/usr/lib/python2.7/lib-dynload/_ctypes.x86_64-linux-gnu.so (_ctypes_callproc + 0x48e) [0x145fe]
=========     Host Frame:/usr/lib/python2.7/lib-dynload/_ctypes.x86_64-linux-gnu.so [0x15f9e]
=========     Host Frame:python (PyEval_EvalFrameEx + 0x98d) [0x1244dd]
=========     Host Frame:python [0x167d14]
=========     Host Frame:python (PyRun_FileExFlags + 0x92) [0x65bf4]
=========     Host Frame:python (PyRun_SimpleFileExFlags + 0x2ee) [0x6612d]
=========     Host Frame:python (Py_Main + 0xb5e) [0x66d92]
=========     Host Frame:/lib/x86_64-linux-gnu/libc.so.6 (__libc_start_main + 0xf5) [0x21f45]
=========     Host Frame:python [0x177c2e]
=========
... (output truncated for brevity of presentation)
========= ERROR SUMMARY: 18 errors
$
Robert Crovella
  • 143,785
  • 11
  • 213
  • 257
  • Thanks a lot again! Btw, I tried running `cuda-memcheck` I get the following two errors:`========= Internal Memcheck Error: Device not supported` and `========= Internal Memcheck Error: Initialization failed` – VSB Aug 05 '17 at 19:45
  • which CUDA version, which driver version, and which GPU? You may be running a version of `cuda-memcheck` which is too old for your GPU or does not match the installed CUDA version. – Robert Crovella Aug 05 '17 at 19:48
  • GeForce GTX 460M, driver: 384.76, not sure which CUDA version, does this help: [link](https://www.geforce.com/hardware/notebook-gpus/geforce-gtx-460m/specifications) – VSB Aug 05 '17 at 19:58
  • The CUDA version matters. What do you get if you type `cuda-memcheck --version` – Robert Crovella Aug 05 '17 at 20:03
  • `CUDA-MEMCHECK version 8.0.61 ID:(41)` – VSB Aug 05 '17 at 20:14
  • which version of windows? – Robert Crovella Aug 05 '17 at 20:57
  • This appears to me to be an incompatibility between the CUDA 8.0.61 version of `cuda-memcheck` and your R384 driver. If you want `cuda-memcheck` to work properly, I believe you'll have to roll back your driver to a R375 driver, such as the 376.51 driver that came bundled with the CUDA 8.0.61 installer. This issue should be unique to the fact that you are using a very old Fermi GPU (cc2.0, GTX 460M). Newer GPUs such as cc3.0 or newer should not experience this. – Robert Crovella Aug 05 '17 at 21:42
  • I hate to comeback here again for the same issue, but I am having the same issue with the same array again but different device function. This time I checked the index ten times. I have stripped the device function down to the minimal amount to reproduce the error, but I cannot narrow it down. Is it possible that the device function runs out of memory to do local operations? Or that it has too many instructions? – VSB Aug 07 '17 at 00:18
  • I placed the test files [here](https://github.com/iperson/stackhelp) and made comments on what changes can make the issue go away. – VSB Aug 07 '17 at 00:36
  • It will be a horrible cluttered mess if you start to discuss a new issue with this question, and SO isn't really designed that way. Have a new question? Ask a new question. – Robert Crovella Aug 07 '17 at 00:48