Most of the recent processors support AVX
technology. It provides a vector containing 4 doubles (256-bit registers). Thus, a solution for this optimization might be using AVX. For this, I've implemented it using x86intrin.h
library which is a part of GCC
compiler. I also used OpenMP
to make the solution multi-threaded.
//gcc -Wall -fopenmp -O2 -march=native -o "MatrixVectorMultiplication" "MatrixVectorMultiplication.c"
//gcc 7.2, Skylake Corei7-6700 HQ
//The performance improvement is significant (5232 Cycle in my machine) but MKL is not available to test
#include <stdio.h>
#include <x86intrin.h>
double A[20][1024] __attribute__(( aligned(32))) = {{1.0, 2.0, 3.0, 3.5, 1.0, 2.0, 3.0, 3.5}, {4.0, 5.0, 6.0, 6.5,4.0, 5.0, 6.0, 6.5},{7.0, 8.0, 9.0, 9.5, 4.0, 5.0, 6.0, 6.5 }};//The 32 is for 256-bit registers of AVX
double B[1024] __attribute__(( aligned(32))) = {2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0 }; //the vector
double C[20][1024] __attribute__(( aligned(32)));//the results are stored here
int main()
{
int i,j;
__m256d vec_C1, vec_C2, vec_C3, vec_C4;
//begin_rdtsc
//get the start time here
#pragma omp parallel for
for(i=0; i<20;i++){
for(j=0; j<1024; j+=16){
vec_C1 = _mm256_mul_pd(_mm256_load_pd(&A[i][j]), _mm256_load_pd(&B[j]));
_mm256_store_pd(&C[i][j], vec_C1);
vec_C2 = _mm256_mul_pd(_mm256_load_pd(&A[i][j+4]), _mm256_load_pd(&B[j+4]));
_mm256_store_pd(&C[i][j+4], vec_C2);
vec_C3 = _mm256_mul_pd(_mm256_load_pd(&A[i][j+8]), _mm256_load_pd(&B[j+8]));
_mm256_store_pd(&C[i][j+8], vec_C3);
vec_C4 = _mm256_mul_pd(_mm256_load_pd(&A[i][j+12]), _mm256_load_pd(&B[j+12]));
_mm256_store_pd(&C[i][j+12], vec_C4);
}
}
//end_rdtsc
//calculate the elapsead time
//print the results
for(i=0; i<20;i++){
for(j=0; j<1024; j++){
//printf(" %lf", C[i][j]);
}
//printf("\n");
}
return 0;
}