1

I can't figure out why the following code (chi2 distance) takes longer when compiled with OMP. Following this question I released the GIL but still no improvement whatsoever.

np::ndarray additive_chi2_kernel(const np::ndarray& _h0, 
              const np::ndarray& _h1) {

auto dtype = np::dtype::get_builtin<float>();

auto h0 = _h0.astype(dtype);
auto h1 = _h1.astype(dtype);

enter code here

assert (h0.get_nd() == 2 && h1.get_nd() == 2);

float* ptr_h0  = reinterpret_cast<float*>(h0.get_data());
float* ptr_h1  = reinterpret_cast<float*>(h1.get_data());

int M0 = h0.shape(0);
int M1 = h1.shape(0);
int N  = h1.shape(1);

float* result = new float[M0*M1]();

auto save_state = PyEval_SaveThread();

#pragma omp parallel
for(int m0 = 0; m0 < M0; ++m0) {
    for(int m1 = 0; m1 < M1; ++m1) {
        float error = 0;
        for(int i = 0; i < N; ++i) {
            float sum  = ptr_h0[m0*N+i] + ptr_h1[m1*N+i];
            if (sum != 0){
                float diff = ptr_h0[m0*N+i] - ptr_h1[m1*N+i];
                error += (diff*diff/sum);
            }
        }
        result[m0*M1+m1] = error;
    }
}

  PyEval_RestoreThread(save_state);

    np::ndarray D = np::from_data(result,np::dtype::get_builtin<float>(),
      py::make_tuple(M0,M1),py::make_tuple(sizeof(float)*M1,
       sizeof(float)), py::object());

return D;

}

Boost wrapper is:

     BOOST_PYTHON_MODULE(vgic) {
        // Initialize numpy

       PyEval_InitThreads();
        np::initialize();
        py::def("additive_chi2_kernel",additive_chi2_kernel);
     }

Compiler flags: -std=c++11 -Wall -fopenmp -O3 -fPIC

All threads are active as it should

     PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND                                                                                                                                                                 
   20706 memecs    20   0 3980080 818004  42880 R  3179  0.6   7:34.04 python

but the running time is higher than on a single core. For M0=100,M1=1000,N=1000 I get

OMP: 2.214 seconds
SINGLE-CORE: 1.175 seconds.

Possible problems?

Community
  • 1
  • 1
memecs
  • 7,196
  • 7
  • 34
  • 49

1 Answers1

3

You should use #pragma omp parallel for to divide the outer loop among the threads. #pragma omp parallel only spawns a group of threads, but then each one is calculating everything.

user0815
  • 1,376
  • 7
  • 8