I'm trying to implement the checksum computation code(2's complement addition) for NEON, using intrinsic. The current checksum computation is being carried out on ARM.
My implementation fetches 128-bits at once from the memory into NEON registers and does SIMD (addition), and result is folded to a 16-bit number from a 128-bit number.
Everything looks to be working fine, but my NEON implementation is consuming more time that of the ARM version.
ARM version takes: 0.860000 s NEON version takes: 1.260000 s
Note:
- Profiled using utilities from "time.h"
- The checksum function called 10,000 times from a sample application, and time computed after complete run of all the functions
Other details:
- Used GNU tool-chain(arm-none-linux-gnueabi-gcc) for compiling the intrinsic code and not arm tool-chain.
- Linux platform.
- C-intrinsic code.
Questions:
Why does NEON version take more time than that of the ARM version? (Although I have taken care that intrinsic with minimum cycles in the batch is used)
How do achieve what I want to achieve? (efficiency with NEON)
Could someone point to me or share some sample implementations(pseudo-code/algorithms/code, not the theoretical implementation papers or talks) which uses the inter-operations of ARM-NEON together?
Any help would be much appreciated.
Here's my code:
uint16_t do_csum(const unsigned char * buff, int len)
{
int odd, count, i;
uint32x4_t result = veorq_u32( result, result), sum = veorq_u32( sum, sum);
uint16x4_t data, data_hi, data_low, data8;
uint16x8_t dataq;
uint16_t result16, disp[20] = {0,0,0,0,0,0,0,0,0,0};
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
uint8x8_t data1 = veor_u8( data1, data1);
data1 = (uint16x4_t)vld1_lane_u8((uint8_t *)buff, data1, 0); //result = *buff << 8;
data1 = (uint16x4_t)vshl_n_u16( data1, 8);
len--;
buff++;
result = vaddw_u16(result, data1);
}
count = len >> 1; /* nr of 16-bit words.. */
if (count) {
if (2 & (unsigned long) buff) {
uint16x4_t data2 = veor_u16( data2, data2);
data2 = (uint16x4_t) vld1_lane_u16((uint16_t *)buff, data2, 0); //result += *(unsigned short *) buff;
count--;
len -= 2;
buff += 2;
result = vaddw_u16( result, data2);
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
if (4 & (unsigned long) buff) {
uint32x2_t data4 = (uint16x4_t) vld1_lane_u32((uint32_t *) buff, data4, 0);
count--;
len -= 4;
buff += 4;
result = vaddw_u16( result, data4);
}
count >>= 1; /* nr of 64-bit words.. */
if (count) {
if (8 & (unsigned long) buff) {
uint64x1_t data8 = vld1_u64((uint64_t *) buff);
count--;
len -= 8;
buff += 8;
result = vaddw_u16( result,(uint16x4_t)data8);
}
count >>= 1; /* nr of 128-bit words.. */
if (count) {
do {
dataq = (uint16x8_t)vld1q_u64((uint64_t *) buff); // VLD1.64 {d0, d1}, [r0]
count--;
buff += 16;
sum = vpaddlq_u16(dataq);
vst1q_u16( disp, dataq); // VST1.16 {d0, d1}, [r0]
result = vaddq_u32( sum, result);
} while (count);
}
if (len & 8) {
uint64x1_t data8 = vld1_u64((uint64_t *) buff);
buff += 8;
result = vaddw_u16( result, (uint16x4_t)data8);
}
}
if (len & 4) {
uint32x2_t data4 = veor_u32( data4, data4);
data4 = (uint16x4_t)vld1_lane_u32((uint32_t *) buff, data4, 0);//result += *(unsigned int *) buff;
buff += 4;
result = vaddw_u16( result,(uint16x4_t) data4);
}
}
if (len & 2) {
uint16x4_t data2 = veor_u16( data2, data2);
data2 = (uint16x4_t) vld1_lane_u16((uint16_t *)buff, data2, 0); //result += *(unsigned short *) buff;
buff += 2;
result = vaddw_u16( result, data2);
}
}
if (len & 1){
uint8x8_t data1 = veor_u8( data1, data1);
data1 = (uint16x4_t) vld1_lane_u8((uint8_t *)buff, data1, 0); //result = *buff << 8;
result = vaddw_u8( result, data1);
}
result16 = from128to16(result);
if (odd)
result16 = ((result16 >> 8) & 0xff) | ((result16 & 0xff) << 8);
out:
return result16;
}