Compact AVX2 register so selected integers are con

2019-06-27 05:20发布

问题:

This question already has an answer here:

  • AVX2 what is the most efficient way to pack left based on a mask? 4 answers

In the question Optimizing Array Compaction, the top answer states:

SSE/AVX registers with latest instruction sets allow a better approach. We can use the result of PMOVMSKB directly, transforming it to the control register for something like PSHUFB.

Is this possible with Haswell (AVX2)? Or does it require one of the flavors of AVX512?

I've got a AVX2 vector containing int32s, and a corresponding vector of the result of a compare. I want to shuffle it somehow so that the elements with the corresponding msb set in the mask (compare true) are contiguous in the low end of the vector.

The best I can see is get a bit mask with _mm256_movemask_ps/vmovmskps (no *d variant?) and then use that in a 256 AVX2 vector lookup table to get a shuffle mask for the cross-lane _mm256_permutevar8x32_epi32/vpermd

回答1:

The first thing to do is find a fast scalar function. Here is a version which does not use a branch.

inline int compact(int *x, int *y, const int n) {
    int cnt = 0;
    for(int i=0; i<n; i++) {
        int cut = x[i]!=0;
        y[cnt] = cut*x[i];
        cnt += cut;
    }
    return cnt;
}

The best result with SIMD probably depends on the distribution of zeros. If it's sparse or dense . The following code should work well for distribution which are sparse or dense. For example long runs of zeros and non-zeros. If the distribution is more even I don't know if this code will have any benefit. But it will give the correct result anyway.

Here is a AVX2 version I tested.

int compact_AVX2(int *x, int *y, int n) {
    int i =0, cnt = 0;
    for(i=0; i<n-8; i+=8) {
        __m256i x4 = _mm256_loadu_si256((__m256i*)&x[i]);
        __m256i cmp = _mm256_cmpeq_epi32(x4, _mm256_setzero_si256());
        int mask = _mm256_movemask_epi8(cmp);
        if(mask == -1) continue; //all zeros
        if(mask) {
            cnt += compact(&x[i],&y[cnt], 8);
        }
        else {
            _mm256_storeu_si256((__m256i*)&y[cnt], x4);
            cnt +=8;
        }       
    }
    cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 8
    return cnt;
}

Here is the SSE2 version I tested.

int compact_SSE2(int *x, int *y, int n) {
    int i =0, cnt = 0;
    for(i=0; i<n-4; i+=4) {
        __m128i x4 = _mm_loadu_si128((__m128i*)&x[i]);
        __m128i cmp = _mm_cmpeq_epi32(x4, _mm_setzero_si128());
        int mask = _mm_movemask_epi8(cmp);
        if(mask == 0xffff) continue; //all zeroes
        if(mask) {
            cnt += compact(&x[i],&y[cnt], 4);
        }
        else {
            _mm_storeu_si128((__m128i*)&y[cnt], x4);
            cnt +=4;
        }       
    }
    cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 4
    return cnt;
}

Here is a full test

#include <stdio.h>
#include <stdlib.h>
#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)
#include <x86intrin.h>                
#else
#include <immintrin.h>                
#endif

#define N 50

inline int compact(int *x, int *y, const int n) {
    int cnt = 0;
    for(int i=0; i<n; i++) {
        int cut = x[i]!=0;
        y[cnt] = cut*x[i];
        cnt += cut;
    }
    return cnt;
}

int compact_SSE2(int *x, int *y, int n) {
        int i =0, cnt = 0;
        for(i=0; i<n-4; i+=4) {
            __m128i x4 = _mm_loadu_si128((__m128i*)&x[i]);
            __m128i cmp = _mm_cmpeq_epi32(x4, _mm_setzero_si128());
            int mask = _mm_movemask_epi8(cmp);
            if(mask == 0xffff) continue; //all zeroes
            if(mask) {
                cnt += compact(&x[i],&y[cnt], 4);
            }
            else {
                _mm_storeu_si128((__m128i*)&y[cnt], x4);
                cnt +=4;
            }       
        }
        cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 4
        return cnt;
    }

int compact_AVX2(int *x, int *y, int n) {
    int i =0, cnt = 0;
    for(i=0; i<n-8; i+=8) {
        __m256i x4 = _mm256_loadu_si256((__m256i*)&x[i]);
        __m256i cmp = _mm256_cmpeq_epi32(x4, _mm256_setzero_si256());
        int mask = _mm256_movemask_epi8(cmp);
        if(mask == -1) continue; //all zeros
        if(mask) {
            cnt += compact(&x[i],&y[cnt], 8);
        }
        else {
            _mm256_storeu_si256((__m256i*)&y[cnt], x4);
            cnt +=8;
        }       
    }
    cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 8
    return cnt;
}

int main() {
    int x[N], y[N];
    for(int i=0; i<N; i++) x[i] = rand()%10;
    //int cnt = compact_SSE2(x,y,N);
    int cnt = compact_AVX2(x,y,N);
    for(int i=0; i<N; i++) printf("%d ", x[i]); printf("\n");
    for(int i=0; i<cnt; i++) printf("%d ", y[i]); printf("\n");
}