I'm tried to improve performance in some routine via OpenMP(parallel for) and SSE intrinsics:
void Tester::ProcessParallel()//ProcessParallel is member of Tester class
{
//Initialize
auto OutMapLen = this->_OutMapLen;
auto KernelBatchLen = this->_KernelBatchLen;
auto OutMapHeig = this->_OutMapHeig;
auto OutMapWid = this->_OutMapWid;
auto InpMapWid = this->_InpMapWid;
auto NumInputMaps = this->_NumInputMaps;
auto InpMapLen = this->_InpMapLen;
auto KernelLen = this->_KernelLen;
auto KernelHeig = this->_KernelHeig;
auto KernelWid = this->_KernelWid;
auto input_local = this->input;
auto output_local = this->output;
auto weights_local = this->weights;
auto biases_local = this->biases;
auto klim = this->_klim;
#pragma omp parallel for firstprivate(OutMapLen,KernelBatchLen,OutMapHeig,OutMapWid,InpMapWid,NumInputMaps,InpMapLen,KernelLen,KernelHeig,KernelWid,input_local,output_local,weights_local,biases_local,klim)
for(auto i=0; i<_NumOutMaps; ++i)
{
auto output_map = output_local + i*OutMapLen;
auto kernel_batch = weights_local + i*KernelBatchLen;
auto bias = biases_local + i;
for(auto j=0; j<OutMapHeig; ++j)
{
auto output_map_row = output_map + j*OutMapWid;
auto inp_row_idx = j*InpMapWid;
for(auto k=0; k<OutMapWid; ++k)
{
auto output_nn = output_map_row + k;
*output_nn = *bias;
auto inp_cursor_idx = inp_row_idx + k;
for(int _i=0; _i<NumInputMaps; ++_i)
{
auto input_cursor = input_local + _i*InpMapLen + inp_cursor_idx;
auto kernel = kernel_batch + _i*KernelLen;
for(int _j=0; _j<KernelHeig; ++_j)
{
auto kernel_row_idx = _j*KernelWid;
auto inp_row_cur_idx = _j*InpMapWid;
int _k=0;
for(; _k<klim; _k+=4)//unroll and vectorize
{
float buf;
__m128 wgt = _mm_loadu_ps(kernel+kernel_row_idx+_k);
__m128 inp = _mm_loadu_ps(input_cursor+inp_row_cur_idx+_k);
__m128 prd = _mm_dp_ps(wgt, inp, 0xf1);
_mm_store_ss(&buf, prd);
*output_nn += buf;
}
for(; _k<KernelWid; ++_k)//residual loop
*output_nn += *(kernel+kernel_row_idx+_k) * *(input_cursor+inp_row_cur_idx+_k);
}
}
}
}
}
}
Pure unrolling and SSE-vectorization (without OpenMP) of last nested loop improves total performance ~1.3 times - it's pretty nice result. Howewer, pure OpenMP parallelization (without unrolling/vectorization) of external loop gives only ~2.1 performance gain on 8-core processor (core i7 2600K). In total, both SSE vectorization and OpenMP parallel_for shows 2.3-2.7 times performance gain. How can I boost OpenMP parallelization effect in the code above?
Interesting: if replace "klim" variable - bound in unrolling last loop - with scalar constant, say, 4, total performance gain rises to 3.5.