The idea is to start by considering the __m256i
row of 32 chars as 8 32-bit integers.
Then matrix __m256i matrix[8]
can be seen as an 8x8 integer matrix.
The solution is now obtained by transposing this 8x8 integer matrix, followed by
some byte shuffling within the __m256i
register.
For the 8x8 integer transpose we
use the 'standard' 8x8 float transpose from Z boson's answer.
With some casting from si256
to ps
, and vice versa we can use it for our 8x8 integer case.
Function transpose_matrix_32_8()
below implements the 8x32 char transpose.
/* gcc -O3 -m64 -Wall -mavx2 -march=haswell transpose8_32b.c */
#include <immintrin.h>
#include <stdio.h>
void print_matrix(__m256i*, int rows, int cols);
inline __m256i permute_row(__m256i row){
row = _mm256_shuffle_epi8(row, _mm256_set_epi8(15,11,7,3 ,14,10,6,2 ,13,9,5,1 ,12,8,4,0 ,15,11,7,3 ,14,10,6,2 ,13,9,5,1 ,12,8,4,0));
row = _mm256_permutevar8x32_epi32(row, _mm256_set_epi32(7,3,6,2,5,1,4,0));
return row;
}
void transpose_matrix_32_8(__m256i* matrix){
/* Start with 8x8 _epi32 transpose by casting the epi32 (si256) to _ps and use */
/* the 8x8 float transpose from https://stackoverflow.com/a/25627536/2439725 */
/* See that answer for alternative 8x8 float transposes with slightly less shuffles. */
__m256 row0 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[0]));
__m256 row1 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[1]));
__m256 row2 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[2]));
__m256 row3 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[3]));
__m256 row4 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[4]));
__m256 row5 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[5]));
__m256 row6 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[6]));
__m256 row7 = _mm256_castsi256_ps(_mm256_loadu_si256(&matrix[7]));
__m256 __t0 = _mm256_unpacklo_ps(row0, row1);
__m256 __t1 = _mm256_unpackhi_ps(row0, row1);
__m256 __t2 = _mm256_unpacklo_ps(row2, row3);
__m256 __t3 = _mm256_unpackhi_ps(row2, row3);
__m256 __t4 = _mm256_unpacklo_ps(row4, row5);
__m256 __t5 = _mm256_unpackhi_ps(row4, row5);
__m256 __t6 = _mm256_unpacklo_ps(row6, row7);
__m256 __t7 = _mm256_unpackhi_ps(row6, row7);
__m256 __tt0 = _mm256_shuffle_ps(__t0,__t2,_MM_SHUFFLE(1,0,1,0));
__m256 __tt1 = _mm256_shuffle_ps(__t0,__t2,_MM_SHUFFLE(3,2,3,2));
__m256 __tt2 = _mm256_shuffle_ps(__t1,__t3,_MM_SHUFFLE(1,0,1,0));
__m256 __tt3 = _mm256_shuffle_ps(__t1,__t3,_MM_SHUFFLE(3,2,3,2));
__m256 __tt4 = _mm256_shuffle_ps(__t4,__t6,_MM_SHUFFLE(1,0,1,0));
__m256 __tt5 = _mm256_shuffle_ps(__t4,__t6,_MM_SHUFFLE(3,2,3,2));
__m256 __tt6 = _mm256_shuffle_ps(__t5,__t7,_MM_SHUFFLE(1,0,1,0));
__m256 __tt7 = _mm256_shuffle_ps(__t5,__t7,_MM_SHUFFLE(3,2,3,2));
row0 = _mm256_permute2f128_ps(__tt0, __tt4, 0x20);
row1 = _mm256_permute2f128_ps(__tt1, __tt5, 0x20);
row2 = _mm256_permute2f128_ps(__tt2, __tt6, 0x20);
row3 = _mm256_permute2f128_ps(__tt3, __tt7, 0x20);
row4 = _mm256_permute2f128_ps(__tt0, __tt4, 0x31);
row5 = _mm256_permute2f128_ps(__tt1, __tt5, 0x31);
row6 = _mm256_permute2f128_ps(__tt2, __tt6, 0x31);
row7 = _mm256_permute2f128_ps(__tt3, __tt7, 0x31);
/* End of 8x8 float transpose, cast the results back to _si256: */
__m256i row0i = _mm256_castps_si256(row0);
__m256i row1i = _mm256_castps_si256(row1);
__m256i row2i = _mm256_castps_si256(row2);
__m256i row3i = _mm256_castps_si256(row3);
__m256i row4i = _mm256_castps_si256(row4);
__m256i row5i = _mm256_castps_si256(row5);
__m256i row6i = _mm256_castps_si256(row6);
__m256i row7i = _mm256_castps_si256(row7);
/* Now we only need a simple row permutation to get the right result: */
row0i = permute_row(row0i);
row1i = permute_row(row1i);
row2i = permute_row(row2i);
row3i = permute_row(row3i);
row4i = permute_row(row4i);
row5i = permute_row(row5i);
row6i = permute_row(row6i);
row7i = permute_row(row7i);
_mm256_storeu_si256(&matrix[0], row0i);
_mm256_storeu_si256(&matrix[1], row1i);
_mm256_storeu_si256(&matrix[2], row2i);
_mm256_storeu_si256(&matrix[3], row3i);
_mm256_storeu_si256(&matrix[4], row4i);
_mm256_storeu_si256(&matrix[5], row5i);
_mm256_storeu_si256(&matrix[6], row6i);
_mm256_storeu_si256(&matrix[7], row7i);
}
int main(){
unsigned char input[256];
__m256i matrix[8];
/* Define char array with entries from 0 to 255 */
for (int i=0;i<256;i++){
input[i]=i;
}
/* Copy the char array to matrix */
for (int i=0;i<8;i++){
matrix[i]=_mm256_loadu_si256((__m256i*)&input[i*32]);
}
print_matrix(matrix, 8, 32);
transpose_matrix_32_8(matrix);
print_matrix(matrix, 32, 8);
return 0;
}
void print_matrix(__m256i* matrix, int rows, int cols){
unsigned char* v;
int i, j, k;
/* Access matrix as chars */
v = (unsigned char*)matrix;
i = 0;
/* Print the chars v[i] , i = 0, 1, 2, 3,..., 255 */
/* rows and cols only controls the positions of the new lines printf("\n") */
for (k = 0; k < rows; k++){
for (j = 0; j < cols; j++){
printf("%4hhu", v[i]);
i = i + 1;
}
printf("\n");
}
printf("\n");
}
The output is:
$ ./a.out
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
0 32 64 96 128 160 192 224
1 33 65 97 129 161 193 225
2 34 66 98 130 162 194 226
3 35 67 99 131 163 195 227
4 36 68 100 132 164 196 228
5 37 69 101 133 165 197 229
6 38 70 102 134 166 198 230
7 39 71 103 135 167 199 231
8 40 72 104 136 168 200 232
9 41 73 105 137 169 201 233
10 42 74 106 138 170 202 234
11 43 75 107 139 171 203 235
12 44 76 108 140 172 204 236
13 45 77 109 141 173 205 237
14 46 78 110 142 174 206 238
15 47 79 111 143 175 207 239
16 48 80 112 144 176 208 240
17 49 81 113 145 177 209 241
18 50 82 114 146 178 210 242
19 51 83 115 147 179 211 243
20 52 84 116 148 180 212 244
21 53 85 117 149 181 213 245
22 54 86 118 150 182 214 246
23 55 87 119 151 183 215 247
24 56 88 120 152 184 216 248
25 57 89 121 153 185 217 249
26 58 90 122 154 186 218 250
27 59 91 123 155 187 219 251
28 60 92 124 156 188 220 252
29 61 93 125 157 189 221 253
30 62 94 126 158 190 222 254
31 63 95 127 159 191 223 255
This 8x32 byte or char transpose takes 40 'shuffle' instructions.
These shuffle instruction are all handled by port 5 on cpus such as Intel Skylake.
This implies that the throughput is limited by one transpose per 40 cpu cycles.
This is not necessarily a problem if the surrounding code has many instructions
that are executed by the other execution ports.
However, in some cases it might be better to balance the work more across the
different execution ports.
See also Z boson's answer for 2 interesting alternative solutions of the 8x8
transpose: These alternatives take more instructions, but less shuffles.
Below is an alternative AVX2 8x32 byte or char transpose with only 24 'shuffle' instructions,
but with more load, blend, insert, and shift instructions/micro-ops.
The throughput of this solution is limited by one transpose per 24 cpu cycles on Intel Skylake.
inline __m256i permute_row_v3(__m256i row){
row = _mm256_shuffle_epi8(row, _mm256_set_epi8(15,7,11,3 ,14,6,10,2 ,13,5,9,1 ,12,4,8,0 ,15,7,11,3 ,14,6,10,2 ,13,5,9,1 ,12,4,8,0));
row = _mm256_permutevar8x32_epi32(row, _mm256_set_epi32(7,3,6,2,5,1,4,0));
return row;
}
void transpose_matrix_32_8_v3(__m256i* matrix){
/* Alternative 32x8 transpose with 24 shuffles but more instructions / micro-ops */
__m128i* i_matrix = (__m128i*)matrix;
__m256i w0 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 0])), _mm_loadu_si128(&i_matrix[ 8]), 1);
__m256i w1 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 2])), _mm_loadu_si128(&i_matrix[10]), 1);
__m256i w2 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 4])), _mm_loadu_si128(&i_matrix[12]), 1);
__m256i w3 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 6])), _mm_loadu_si128(&i_matrix[14]), 1);
__m256i w4 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 1])), _mm_loadu_si128(&i_matrix[ 9]), 1);
__m256i w5 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 3])), _mm_loadu_si128(&i_matrix[11]), 1);
__m256i w6 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 5])), _mm_loadu_si128(&i_matrix[13]), 1);
__m256i w7 = _mm256_inserti128_si256(_mm256_castsi128_si256(_mm_loadu_si128(&i_matrix[ 7])), _mm_loadu_si128(&i_matrix[15]), 1);
__m256i t0 = _mm256_unpacklo_epi64(w0, w1);
__m256i t1 = _mm256_unpacklo_epi64(w2, w3);
__m256i t2 = _mm256_unpackhi_epi64(w0, w1);
__m256i t3 = _mm256_unpackhi_epi64(w2, w3);
__m256i t4 = _mm256_unpacklo_epi64(w4, w5);
__m256i t5 = _mm256_unpacklo_epi64(w6, w7);
__m256i t6 = _mm256_unpackhi_epi64(w4, w5);
__m256i t7 = _mm256_unpackhi_epi64(w6, w7);
w0 = _mm256_srli_epi64(t0, 32);
w1 = _mm256_slli_epi64(t1, 32);
w2 = _mm256_srli_epi64(t2, 32);
w3 = _mm256_slli_epi64(t3, 32);
w4 = _mm256_srli_epi64(t4, 32);
w5 = _mm256_slli_epi64(t5, 32);
w6 = _mm256_srli_epi64(t6, 32);
w7 = _mm256_slli_epi64(t7, 32);
__m256i r0 = _mm256_blend_epi32(t0, w1, 0b10101010 );
__m256i r1 = _mm256_blend_epi32(t1, w0, 0b01010101 );
__m256i r2 = _mm256_blend_epi32(t2, w3, 0b10101010 );
__m256i r3 = _mm256_blend_epi32(t3, w2, 0b01010101 );
__m256i r4 = _mm256_blend_epi32(t4, w5, 0b10101010 );
__m256i r5 = _mm256_blend_epi32(t5, w4, 0b01010101 );
__m256i r6 = _mm256_blend_epi32(t6, w7, 0b10101010 );
__m256i r7 = _mm256_blend_epi32(t7, w6, 0b01010101 );
w0 = permute_row_v3(r0);
w1 = permute_row_v3(r1);
w2 = permute_row_v3(r2);
w3 = permute_row_v3(r3);
w4 = permute_row_v3(r4);
w5 = permute_row_v3(r5);
w6 = permute_row_v3(r6);
w7 = permute_row_v3(r7);
_mm256_storeu_si256(&matrix[0], w0);
_mm256_storeu_si256(&matrix[1], w1);
_mm256_storeu_si256(&matrix[2], w2);
_mm256_storeu_si256(&matrix[3], w3);
_mm256_storeu_si256(&matrix[4], w4);
_mm256_storeu_si256(&matrix[5], w5);
_mm256_storeu_si256(&matrix[6], w6);
_mm256_storeu_si256(&matrix[7], w7);
}