I'm trying to speed up an algorithm which performs a series of lookup tables. I'd like to use SSE2 or AVX2. I've tried using the _mm256_i32gather_epi32 command but it is 31% slower. Does anyone have any suggestions to any improvements or a different approach?
Timings: C code = 234 Gathers = 340
static const int32_t g_tables[2][64]; // values between 0 and 63
template <int8_t which, class T>
static void lookup_data(int16_t * dst, T * src)
{
const int32_t * lut = g_tables[which];
// Leave this code for Broadwell or Skylake since it's 31% slower than C code
// (gather is 12 for Haswell, 7 for Broadwell and 5 for Skylake)
#if 0
if (sizeof(T) == sizeof(int16_t)) {
__m256i avx0, avx1, avx2, avx3, avx4, avx5, avx6, avx7;
__m128i sse0, sse1, sse2, sse3, sse4, sse5, sse6, sse7;
__m256i mask = _mm256_set1_epi32(0xffff);
avx0 = _mm256_loadu_si256((__m256i *)(lut));
avx1 = _mm256_loadu_si256((__m256i *)(lut + 8));
avx2 = _mm256_loadu_si256((__m256i *)(lut + 16));
avx3 = _mm256_loadu_si256((__m256i *)(lut + 24));
avx4 = _mm256_loadu_si256((__m256i *)(lut + 32));
avx5 = _mm256_loadu_si256((__m256i *)(lut + 40));
avx6 = _mm256_loadu_si256((__m256i *)(lut + 48));
avx7 = _mm256_loadu_si256((__m256i *)(lut + 56));
avx0 = _mm256_i32gather_epi32((int32_t *)(src), avx0, 2);
avx1 = _mm256_i32gather_epi32((int32_t *)(src), avx1, 2);
avx2 = _mm256_i32gather_epi32((int32_t *)(src), avx2, 2);
avx3 = _mm256_i32gather_epi32((int32_t *)(src), avx3, 2);
avx4 = _mm256_i32gather_epi32((int32_t *)(src), avx4, 2);
avx5 = _mm256_i32gather_epi32((int32_t *)(src), avx5, 2);
avx6 = _mm256_i32gather_epi32((int32_t *)(src), avx6, 2);
avx7 = _mm256_i32gather_epi32((int32_t *)(src), avx7, 2);
avx0 = _mm256_and_si256(avx0, mask);
avx1 = _mm256_and_si256(avx1, mask);
avx2 = _mm256_and_si256(avx2, mask);
avx3 = _mm256_and_si256(avx3, mask);
avx4 = _mm256_and_si256(avx4, mask);
avx5 = _mm256_and_si256(avx5, mask);
avx6 = _mm256_and_si256(avx6, mask);
avx7 = _mm256_and_si256(avx7, mask);
sse0 = _mm_packus_epi32(_mm256_castsi256_si128(avx0), _mm256_extracti128_si256(avx0, 1));
sse1 = _mm_packus_epi32(_mm256_castsi256_si128(avx1), _mm256_extracti128_si256(avx1, 1));
sse2 = _mm_packus_epi32(_mm256_castsi256_si128(avx2), _mm256_extracti128_si256(avx2, 1));
sse3 = _mm_packus_epi32(_mm256_castsi256_si128(avx3), _mm256_extracti128_si256(avx3, 1));
sse4 = _mm_packus_epi32(_mm256_castsi256_si128(avx4), _mm256_extracti128_si256(avx4, 1));
sse5 = _mm_packus_epi32(_mm256_castsi256_si128(avx5), _mm256_extracti128_si256(avx5, 1));
sse6 = _mm_packus_epi32(_mm256_castsi256_si128(avx6), _mm256_extracti128_si256(avx6, 1));
sse7 = _mm_packus_epi32(_mm256_castsi256_si128(avx7), _mm256_extracti128_si256(avx7, 1));
_mm_storeu_si128((__m128i *)(dst), sse0);
_mm_storeu_si128((__m128i *)(dst + 8), sse1);
_mm_storeu_si128((__m128i *)(dst + 16), sse2);
_mm_storeu_si128((__m128i *)(dst + 24), sse3);
_mm_storeu_si128((__m128i *)(dst + 32), sse4);
_mm_storeu_si128((__m128i *)(dst + 40), sse5);
_mm_storeu_si128((__m128i *)(dst + 48), sse6);
_mm_storeu_si128((__m128i *)(dst + 56), sse7);
}
else
#endif
{
for (int32_t i = 0; i < 64; i += 4)
{
*dst++ = src[*lut++];
*dst++ = src[*lut++];
*dst++ = src[*lut++];
*dst++ = src[*lut++];
}
}
}