How can I accurately benchmark unaligned access sp

2020-01-24 09:21发布

In an answer, I've stated that unaligned access has almost the same speed as aligned access a long time (on x86/x86_64). I didn't have any numbers to back up this statement, so I've created a benchmark for it.

Do you see any flaws in this benchmark? Can you improve on it (I mean, to increase GB/sec, so it reflects the truth better)?

#include <sys/time.h>
#include <stdio.h>

template <int N>
__attribute__((noinline))
void loop32(const char *v) {
    for (int i=0; i<N; i+=160) {
        __asm__ ("mov     (%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x04(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x08(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x0c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x10(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x14(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x18(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x1c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x20(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x24(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x28(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x2c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x30(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x34(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x38(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x3c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x40(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x44(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x48(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x4c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x50(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x54(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x58(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x5c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x60(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x64(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x68(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x6c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x70(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x74(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x78(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x7c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x80(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x84(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x88(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x8c(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x90(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x94(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x98(%0), %%eax" : : "r"(v) :"eax");
        __asm__ ("mov 0x9c(%0), %%eax" : : "r"(v) :"eax");
        v += 160;
    }
}

template <int N>
__attribute__((noinline))
void loop64(const char *v) {
    for (int i=0; i<N; i+=160) {
        __asm__ ("mov     (%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x08(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x10(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x18(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x20(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x28(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x30(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x38(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x40(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x48(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x50(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x58(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x60(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x68(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x70(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x78(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x80(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x88(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x90(%0), %%rax" : : "r"(v) :"rax");
        __asm__ ("mov 0x98(%0), %%rax" : : "r"(v) :"rax");
        v += 160;
    }
}

template <int N>
__attribute__((noinline))
void loop128a(const char *v) {
    for (int i=0; i<N; i+=160) {
        __asm__ ("movaps     (%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x10(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x20(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x30(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x40(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x50(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x60(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x70(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x80(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movaps 0x90(%0), %%xmm0" : : "r"(v) :"xmm0");
        v += 160;
    }
}

template <int N>
__attribute__((noinline))
void loop128u(const char *v) {
    for (int i=0; i<N; i+=160) {
        __asm__ ("movups     (%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x10(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x20(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x30(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x40(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x50(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x60(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x70(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x80(%0), %%xmm0" : : "r"(v) :"xmm0");
        __asm__ ("movups 0x90(%0), %%xmm0" : : "r"(v) :"xmm0");
        v += 160;
    }
}

long long int t() {
    struct timeval tv;
    gettimeofday(&tv, 0);
    return (long long int)tv.tv_sec*1000000 + tv.tv_usec;
}

int main() {
    const int ITER = 10;
    const int N = 1600000000;

    char *data = reinterpret_cast<char *>(((reinterpret_cast<unsigned long long>(new char[N+32])+15)&~15));
    for (int i=0; i<N+16; i++) data[i] = 0;

    {
        long long int t0 = t();
        for (int i=0; i<ITER*100000; i++) {
            loop32<N/100000>(data);
        }
        long long int t1 = t();
        for (int i=0; i<ITER*100000; i++) {
            loop32<N/100000>(data+1);
        }
        long long int t2 = t();
        for (int i=0; i<ITER; i++) {
            loop32<N>(data);
        }
        long long int t3 = t();
        for (int i=0; i<ITER; i++) {
            loop32<N>(data+1);
        }
        long long int t4 = t();

        printf(" 32-bit, cache: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3f%%\n", (double)N*ITER/(t1-t0)/1000, (double)N*ITER/(t2-t1)/1000, 100.0*(t2-t1)/(t1-t0)-100.0f);
        printf(" 32-bit,   mem: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3f%%\n", (double)N*ITER/(t3-t2)/1000, (double)N*ITER/(t4-t3)/1000, 100.0*(t4-t3)/(t3-t2)-100.0f);
    }
    {
        long long int t0 = t();
        for (int i=0; i<ITER*100000; i++) {
            loop64<N/100000>(data);
        }
        long long int t1 = t();
        for (int i=0; i<ITER*100000; i++) {
            loop64<N/100000>(data+1);
        }
        long long int t2 = t();
        for (int i=0; i<ITER; i++) {
            loop64<N>(data);
        }
        long long int t3 = t();
        for (int i=0; i<ITER; i++) {
            loop64<N>(data+1);
        }
        long long int t4 = t();

        printf(" 64-bit, cache: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3f%%\n", (double)N*ITER/(t1-t0)/1000, (double)N*ITER/(t2-t1)/1000, 100.0*(t2-t1)/(t1-t0)-100.0f);
        printf(" 64-bit,   mem: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3f%%\n", (double)N*ITER/(t3-t2)/1000, (double)N*ITER/(t4-t3)/1000, 100.0*(t4-t3)/(t3-t2)-100.0f);
    }
    {
        long long int t0 = t();
        for (int i=0; i<ITER*100000; i++) {
            loop128a<N/100000>(data);
        }
        long long int t1 = t();
        for (int i=0; i<ITER*100000; i++) {
            loop128u<N/100000>(data+1);
        }
        long long int t2 = t();
        for (int i=0; i<ITER; i++) {
            loop128a<N>(data);
        }
        long long int t3 = t();
        for (int i=0; i<ITER; i++) {
            loop128u<N>(data+1);
        }
        long long int t4 = t();

        printf("128-bit, cache: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3f%%\n", (double)N*ITER/(t1-t0)/1000, (double)N*ITER/(t2-t1)/1000, 100.0*(t2-t1)/(t1-t0)-100.0f);
        printf("128-bit,   mem: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3f%%\n", (double)N*ITER/(t3-t2)/1000, (double)N*ITER/(t4-t3)/1000, 100.0*(t4-t3)/(t3-t2)-100.0f);
    }
}

3条回答
Explosion°爆炸
2楼-- · 2020-01-24 09:48

Timing method. I probably would have set it up so the test was selected by a command-line arg, so I could time it with perf stat ./unaligned-test, and get perf counter results instead of just wall-clock times for each test. That way, I wouldn't have to care about turbo / power-saving, since I could measure in core clock cycles. (Not the same thing as gettimeofday / rdtsc reference cycles unless you disable turbo and other frequency-variation.)


You're only testing throughput, not latency, because none of the loads are dependent.

Your cache numbers will be worse than your memory numbers, but you maybe won't realize that it's because your cache numbers may be due to bottlenecking on the number of split-load registers that handle loads/stores that cross a cache-line boundary. For sequential-read, the outer levels of cache are still always just going to see a sequence of requests for whole cache lines. It's only the execution units getting data from L1D that have to care about alignment. To test misalignment for the non-cached case, you could do scattered loads, so cache-line splits would need to bring two cache lines into L1.

Cache lines are 64B wide1, so you're always testing a mix of cache-line splits and within-a-cache-line accesses. Testing always-split loads would bottleneck harder on the split-load microarchitectural resources. (Actually, depending on your CPU, the cache-fetch width might be narrower than the line size. Recent Intel CPUs can fetch any unaligned chunk from inside a cache line, but that's because they have special hardware to make that fast. Other CPUs may only be at their fastest when fetching within a naturally-aligned 16B chunk or something. @BeeOnRope says that AMD CPUs may care about 16B and 32B boundaries.)

You're not testing store->load forwarding at all. For existing tests, and a nice way to visualize results for different alignments, see this stuffedcow.net blog post: Store-to-Load Forwarding and Memory Disambiguation in x86 Processors.

Passing data through memory is an important use-case, and misalignment + cache-line splits can interfere with store-forwarding on some CPUs. To properly test this, make sure you test different misalignments, not just 1:15 (vector) or 1:3 (integer). (You currently only test a +1 offset relative to 16B-alignment).

I forget if it's just for store-forwarding, or for regular loads, but there may be less penalty when a load is split evenly across a cache-line boundary (an 8:8 vector, and maybe also 4:4 or 2:2 integer splits). You should test this. (I might be thinking of P4 lddqu or Core 2 movqdu)

Intel's optimization manual has big tables of misalignment vs. store-forwarding from a wide store to narrow reloads that are fully contained in it. On some CPUs, this works in more cases when the wide store was naturally-aligned, even if it doesn't cross any cache-line boundaries. (Maybe on SnB/IvB, since they use a banked L1 cache with 16B banks, and splits across those can affect store forwarding. I didn't re-check the manual, but if you really want to test this experimentally, that's something you should be looking for.)


Which reminds me, misaligned loads are more likely to provoke cache-bank conflicts on SnB/IvB (because one load can touch two banks). But you won't see this loading from a single stream, because accessing the same bank in the same line twice in one cycle is fine. It's only accessing the same bank in different lines that can't happen in the same cycle. (e.g. when two memory accesses are a multiple of 128B apart.)

You don't make any attempt to test 4k page-splits. They's slower than regular cache-line splits, because they also need two TLB checks. (Skylake improved them from ~100 cycle penalty to ~5 cycle penalty beyond the normal load-use latency, though)

You fail to test movups on aligned addresses, so you wouldn't detect that movups is slower than movaps on Core2 and earlier even when the memory is aligned at runtime. (I think unaligned mov loads up to 8 bytes were fine even in Core2, as long as they didn't cross a cache-line boundary. IDK how old a CPU you'd have to look at to find a problem with non-vector loads within a cache line. It would be a 32-bit only CPU, but you could still test 8B loads with MMX or SSE, or even x87. P5 Pentium and later guarantee that aligned 8B loads/stores are atomic, but P6 and newer guarantee that cached 8B loads/stores are atomic as long as no cache-line boundary is crossed. Unlike AMD where 8B boundaries matter for atomicity guarantees even in cacheable memory. Why is integer assignment on a naturally aligned variable atomic on x86?)

Go look at Agner Fog's stuff to learn more about how unaligned loads can be slower, and cook up tests to exercise those cases. Actually, Agner may not be the best resource for that, since his microarch guide mostly focuses on getting uops through the pipeline. Just a brief mention of the cost of cache-line splits, nothing in-depth about throughput vs. latency.

See also: Cacheline splits, take two, from Dark Shikari's blog (x264 lead developer), talking about unaligned load strategies on Core2: it was worth it to check for alignment and use a different strategy for the block.


Footnotes:

  1. 64B cache lines is a safe assumption these days. Pentium 3 and earlier had 32B lines. P4 had 64B lines but they were often transferred in 128B-aligned pairs. I thought I remembered reading that P4 actually had 128B lines in L2 or L3, but maybe that was just a distortion of 64B lines transferred in pairs. 7-CPU definitely says 64B lines in both levels of cache for a P4 130nm.

See also uarch-bench results for Skylake. Apparently someone has already written a tester that checks every possible misalignment relative to a cache-line boundary.


My testing on Skylake desktop (i7-6700k):

Addressing mode affects load-use latency, exactly as Intel documents in their optimization manual. I tested with integer mov rax, [rax+...], and with movzx/sx (in that case using the loaded value as an index, since it's too narrow to be a pointer).

;;;  Linux x86-64 NASM/YASM source.  Assemble into a static binary
;; public domain, originally written by peter@cordes.ca.
;; Share and enjoy.  If it breaks, you get to keep both pieces.

;;; This kind of grew while I was testing and thinking of things to test
;;; I left in some of the comments, but took out most of them and summarized the results outside this code block
;;; When I thought of something new to test, I'd edit, save, and up-arrow my assemble-and-run shell command
;;; Then edit the result into a comment in the source.

section .bss

ALIGN   2 * 1<<20   ; 2MB = 4096*512.  Uses hugepages in .bss but not in .data.  I checked in /proc/<pid>/smaps
buf:    resb 16 * 1<<20

section .text
global _start
_start:
    mov     esi, 128

;   mov             edx, 64*123 + 8
;   mov             edx, 64*123 + 0
;   mov             edx, 64*64 + 0
    xor             edx,edx
   ;; RAX points into buf, 16B into the last 4k page of a 2M hugepage

    mov             eax, buf + (2<<20)*0 + 4096*511 + 64*0 + 16
    mov             ecx, 25000000

%define ADDR(x)  x                     ; SKL: 4c
;%define ADDR(x)  x + rdx              ; SKL: 5c
;%define ADDR(x)  128+60 + x + rdx*2   ; SKL: 11c cache-line split
;%define ADDR(x)  x-8                 ; SKL: 5c
;%define ADDR(x)  x-7                 ; SKL: 12c for 4k-split (even if it's in the middle of a hugepage)
; ... many more things and a block of other result-recording comments taken out

%define dst rax



        mov             [ADDR(rax)], dst
align 32
.loop:
        mov             dst, [ADDR(rax)]
        mov             dst, [ADDR(rax)]
        mov             dst, [ADDR(rax)]
        mov             dst, [ADDR(rax)]
    dec         ecx
    jnz .loop

        xor edi,edi
        mov eax,231
    syscall

Then run with

asm-link load-use-latency.asm && disas load-use-latency && 
    perf stat -etask-clock,cycles,L1-dcache-loads,instructions,branches -r4 ./load-use-latency

+ yasm -felf64 -Worphan-labels -gdwarf2 load-use-latency.asm
+ ld -o load-use-latency load-use-latency.o
 (disassembly output so my terminal history has the asm with the perf results)

 Performance counter stats for './load-use-latency' (4 runs):

     91.422838      task-clock:u (msec)       #    0.990 CPUs utilized            ( +-  0.09% )
   400,105,802      cycles:u                  #    4.376 GHz                      ( +-  0.00% )
   100,000,013      L1-dcache-loads:u         # 1093.819 M/sec                    ( +-  0.00% )
   150,000,039      instructions:u            #    0.37  insn per cycle           ( +-  0.00% )
    25,000,031      branches:u                #  273.455 M/sec                    ( +-  0.00% )

   0.092365514 seconds time elapsed                                          ( +-  0.52% )

In this case, I was testing mov rax, [rax], naturally-aligned, so cycles = 4*L1-dcache-loads. 4c latency. I didn't disable turbo or anything like that. Since nothing is going off the core, core clock cycles is the best way to measure.

  • [base + 0..2047]: 4c load-use latency, 11c cache-line split, 11c 4k-page split (even when inside the same hugepage). See Is there a penalty when base+offset is in a different page than the base? for more details: if base+disp turns out to be in a different page than base, the load uop has to be replayed.
  • any other addressing mode: 5c latency, 11c cache-line split, 12c 4k-split (even inside a hugepage). This includes [rax - 16]. It's not disp8 vs. disp32 that makes the difference.

So: hugepages don't help avoid page-split penalties (at least not when both pages are hot in the TLB). A cache-line split makes addressing mode irrelevant, but "fast" addressing modes have 1c lower latency for normal and page-split loads.

4k-split handling is fantastically better than before, see @harold's numbers where Haswell has ~32c latency for a 4k-split. (And older CPUs may be even worse than that. I thought pre-SKL it was supposed to be ~100 cycle penalty.)

Throughput (regardless of addressing mode), measured by using a destination other than rax so the loads are independent:

  • no split: 0.5c.
  • CL-split: 1c.
  • 4k-split: ~3.8 to 3.9c (much better than pre-Skylake CPUs)

Same throughput/latency for movzx/movsx (including WORD splits), as expected because they're handled in the load port (unlike some AMD CPUs, where there's also an ALU uop).

Cache-line split loads get replayed from the RS (Reservation Station). counters for uops_dispatched_port.port_2 + port_3 = 2x number of mov rdi, [rdi], in another test using basically the same loop. (This was a dependent-load case, not throughput limited.) You can't detect a split load until after AGU.

Presumably when a load uop finds out that it needs data from a 2nd line, it looks for a split register (the buffer that Intel CPUs use to handle split loads), and puts the needed part of the data from the first line into that split reg. And also signals back to the RS that it needs to be replayed. (This is guesswork.)

See also Weird performance effects from nearby dependent stores in a pointer-chasing loop on IvyBridge. Adding an extra load speeds it up? for more about uop replays. (But note that's for uops dependent on a load, not the load uop itself. I think a cache miss load sets up everything to use the data when it does arrive, without having to be replayed itself. The issue is that the scheduler aggressively schedules uops consuming the data to dispatch in the cycle when load data might arrive from L2 cache, instead of waiting one extra cycle to see if it did or not. In that Q&A, the dependent uops are also mostly loads.)

So I think even if neither cache line is present, the split-load replay should happen within a few cycles, so demand-load requests for both sides of the split can be in flight at once.


SKL has two hardware page-walk units, which is probably related to the massive improvement in 4k-split performance. Even when there are no TLB misses, presumably older CPUs had to account for the fact that there might be.

It's interesting that the 4k-split throughput is non-integer. I think my measurements had enough precision and repeatability to say this. Remember this is with every load being a 4k-split, and no other work going on (except for being inside a small dec/jnz loop). If you ever have this in real code, you're doing something really wrong.

I don't have any solid guesses at why it might be non-integer, but clearly there's a lot that has to happen microarchitecturally for a 4k-split. It's still a cache-line split, and it has to check the TLB twice.

查看更多
冷血范
3楼-- · 2020-01-24 10:00

I'm putting my little bit improved benchmark here. Still measures throughput only (and only unaligned offset 1). Based on the other answers, I've added measuring 64- and 4096-byte splits.

For 4k splits, there's a huge difference! But if the data doesn't cross the 64 byte boundary, there's no speed loss at all (at least for these 2 processors I've tested).

Looking at these numbers (and numbers at other answers), my conclusion is that unaligned access is fast on average (both throughput and latency), but there are cases when it can be much slower. But this doesn't mean that their usage is discouraged.

Raw numbers produced by my benchmark should be taken with a grain of salt (it is highly likely that a properly written asm code outperforms it), but these results mostly agree with harold's answer for Haswell (difference column).

Haswell:

Full:
 32-bit, cache: aligned:  33.2901 GB/sec unaligned:  29.5063 GB/sec, difference: 1.128x
 32-bit,   mem: aligned:  12.1597 GB/sec unaligned:  12.0659 GB/sec, difference: 1.008x
 64-bit, cache: aligned:  66.0368 GB/sec unaligned:  52.8914 GB/sec, difference: 1.249x
 64-bit,   mem: aligned:  16.1317 GB/sec unaligned:  16.0568 GB/sec, difference: 1.005x
128-bit, cache: aligned: 129.8730 GB/sec unaligned:  87.9791 GB/sec, difference: 1.476x
128-bit,   mem: aligned:  16.8150 GB/sec unaligned:  16.8151 GB/sec, difference: 1.000x

JustBoundary64:
 32-bit, cache: aligned:  32.5555 GB/sec unaligned:  16.0175 GB/sec, difference: 2.032x
 32-bit,   mem: aligned:   1.0044 GB/sec unaligned:   1.0001 GB/sec, difference: 1.004x
 64-bit, cache: aligned:  65.2707 GB/sec unaligned:  32.0431 GB/sec, difference: 2.037x
 64-bit,   mem: aligned:   2.0093 GB/sec unaligned:   2.0007 GB/sec, difference: 1.004x
128-bit, cache: aligned: 130.6789 GB/sec unaligned:  64.0851 GB/sec, difference: 2.039x
128-bit,   mem: aligned:   4.0180 GB/sec unaligned:   3.9994 GB/sec, difference: 1.005x

WithoutBoundary64:
 32-bit, cache: aligned:  33.2911 GB/sec unaligned:  33.2916 GB/sec, difference: 1.000x
 32-bit,   mem: aligned:  11.6156 GB/sec unaligned:  11.6223 GB/sec, difference: 0.999x
 64-bit, cache: aligned:  65.9117 GB/sec unaligned:  65.9548 GB/sec, difference: 0.999x
 64-bit,   mem: aligned:  14.3200 GB/sec unaligned:  14.3027 GB/sec, difference: 1.001x
128-bit, cache: aligned: 128.2605 GB/sec unaligned: 128.3342 GB/sec, difference: 0.999x
128-bit,   mem: aligned:  12.6352 GB/sec unaligned:  12.6218 GB/sec, difference: 1.001x

JustBoundary4096:
 32-bit, cache: aligned:  33.5500 GB/sec unaligned:   0.5415 GB/sec, difference: 61.953x
 32-bit,   mem: aligned:   0.4527 GB/sec unaligned:   0.0431 GB/sec, difference: 10.515x
 64-bit, cache: aligned:  67.1141 GB/sec unaligned:   1.0836 GB/sec, difference: 61.937x
 64-bit,   mem: aligned:   0.9112 GB/sec unaligned:   0.0861 GB/sec, difference: 10.582x
128-bit, cache: aligned: 134.2000 GB/sec unaligned:   2.1668 GB/sec, difference: 61.936x
128-bit,   mem: aligned:   1.8165 GB/sec unaligned:   0.1700 GB/sec, difference: 10.687x

Sandy Bridge (processor from 2011)

Full:
 32-bit, cache: aligned:  30.0302 GB/sec unaligned:  26.2587 GB/sec, difference: 1.144x
 32-bit,   mem: aligned:  11.0317 GB/sec unaligned:  10.9358 GB/sec, difference: 1.009x
 64-bit, cache: aligned:  59.2220 GB/sec unaligned:  41.5515 GB/sec, difference: 1.425x
 64-bit,   mem: aligned:  14.5985 GB/sec unaligned:  14.3760 GB/sec, difference: 1.015x
128-bit, cache: aligned: 115.7643 GB/sec unaligned:  45.0905 GB/sec, difference: 2.567x
128-bit,   mem: aligned:  14.8561 GB/sec unaligned:  14.8220 GB/sec, difference: 1.002x

JustBoundary64:
 32-bit, cache: aligned:  15.2127 GB/sec unaligned:   3.1037 GB/sec, difference: 4.902x
 32-bit,   mem: aligned:   0.9870 GB/sec unaligned:   0.6110 GB/sec, difference: 1.615x
 64-bit, cache: aligned:  30.2074 GB/sec unaligned:   6.2258 GB/sec, difference: 4.852x
 64-bit,   mem: aligned:   1.9739 GB/sec unaligned:   1.2194 GB/sec, difference: 1.619x
128-bit, cache: aligned:  60.7265 GB/sec unaligned:  12.4007 GB/sec, difference: 4.897x
128-bit,   mem: aligned:   3.9443 GB/sec unaligned:   2.4460 GB/sec, difference: 1.613x

WithoutBoundary64:
 32-bit, cache: aligned:  30.0348 GB/sec unaligned:  29.9801 GB/sec, difference: 1.002x
 32-bit,   mem: aligned:  10.7067 GB/sec unaligned:  10.6755 GB/sec, difference: 1.003x
 64-bit, cache: aligned:  59.1895 GB/sec unaligned:  59.1925 GB/sec, difference: 1.000x
 64-bit,   mem: aligned:  12.9404 GB/sec unaligned:  12.9307 GB/sec, difference: 1.001x
128-bit, cache: aligned: 116.4629 GB/sec unaligned: 116.0778 GB/sec, difference: 1.003x
128-bit,   mem: aligned:  11.2963 GB/sec unaligned:  11.3533 GB/sec, difference: 0.995x

JustBoundary4096:
 32-bit, cache: aligned:  30.2457 GB/sec unaligned:   0.5626 GB/sec, difference: 53.760x
 32-bit,   mem: aligned:   0.4055 GB/sec unaligned:   0.0275 GB/sec, difference: 14.726x
 64-bit, cache: aligned:  60.6175 GB/sec unaligned:   1.1257 GB/sec, difference: 53.851x
 64-bit,   mem: aligned:   0.8150 GB/sec unaligned:   0.0551 GB/sec, difference: 14.798x
128-bit, cache: aligned: 121.2121 GB/sec unaligned:   2.2455 GB/sec, difference: 53.979x
128-bit,   mem: aligned:   1.6255 GB/sec unaligned:   0.1103 GB/sec, difference: 14.744x

Here's the code:

#include <sys/time.h>
#include <stdio.h>

__attribute__((always_inline))
void load32(const char *v) {
    __asm__ ("mov     %0, %%eax" : : "m"(*v) :"eax");
}

__attribute__((always_inline))
void load64(const char *v) {
    __asm__ ("mov     %0, %%rax" : : "m"(*v) :"rax");
}

__attribute__((always_inline))
void load128a(const char *v) {
    __asm__ ("movaps     %0, %%xmm0" : : "m"(*v) :"xmm0");
}

__attribute__((always_inline))
void load128u(const char *v) {
    __asm__ ("movups     %0, %%xmm0" : : "m"(*v) :"xmm0");
}

struct Full {
    template <int S>
    static float factor() {
        return 1.0f;
    }
    template <void (*LOAD)(const char *), int S, int N>
    static void loop(const char *v) {
        for (int i=0; i<N; i+=S*16) {
            LOAD(v+S* 0);
            LOAD(v+S* 1);
            LOAD(v+S* 2);
            LOAD(v+S* 3);
            LOAD(v+S* 4);
            LOAD(v+S* 5);
            LOAD(v+S* 6);
            LOAD(v+S* 7);
            LOAD(v+S* 8);
            LOAD(v+S* 9);
            LOAD(v+S*10);
            LOAD(v+S*11);
            LOAD(v+S*12);
            LOAD(v+S*13);
            LOAD(v+S*14);
            LOAD(v+S*15);
            v += S*16;
        }
    }
};

struct JustBoundary64 {
    template <int S>
    static float factor() {
        return S/64.0f;
    }
    template <void (*LOAD)(const char *), int S, int N>
    static void loop(const char *v) {
        static_assert(N%(64*16)==0);
        for (int i=0; i<N; i+=64*16) {
            LOAD(v+64* 1-S);
            LOAD(v+64* 2-S);
            LOAD(v+64* 3-S);
            LOAD(v+64* 4-S);
            LOAD(v+64* 5-S);
            LOAD(v+64* 6-S);
            LOAD(v+64* 7-S);
            LOAD(v+64* 8-S);
            LOAD(v+64* 9-S);
            LOAD(v+64*10-S);
            LOAD(v+64*11-S);
            LOAD(v+64*12-S);
            LOAD(v+64*13-S);
            LOAD(v+64*14-S);
            LOAD(v+64*15-S);
            LOAD(v+64*16-S);
            v += 64*16;
        }
    }
};

struct WithoutBoundary64 {
    template <int S>
    static float factor() {
        return (64-S)/64.0f;
    }
    template <void (*LOAD)(const char *), int S, int N>
    static void loop(const char *v) {
        for (int i=0; i<N; i+=S*16) {
            if ((S* 1)&0x3f) LOAD(v+S* 0);
            if ((S* 2)&0x3f) LOAD(v+S* 1);
            if ((S* 3)&0x3f) LOAD(v+S* 2);
            if ((S* 4)&0x3f) LOAD(v+S* 3);
            if ((S* 5)&0x3f) LOAD(v+S* 4);
            if ((S* 6)&0x3f) LOAD(v+S* 5);
            if ((S* 7)&0x3f) LOAD(v+S* 6);
            if ((S* 8)&0x3f) LOAD(v+S* 7);
            if ((S* 9)&0x3f) LOAD(v+S* 8);
            if ((S*10)&0x3f) LOAD(v+S* 9);
            if ((S*11)&0x3f) LOAD(v+S*10);
            if ((S*12)&0x3f) LOAD(v+S*11);
            if ((S*13)&0x3f) LOAD(v+S*12);
            if ((S*14)&0x3f) LOAD(v+S*13);
            if ((S*15)&0x3f) LOAD(v+S*14);
            if ((S*16)&0x3f) LOAD(v+S*15);
            v += S*16;
        }
    }
};

struct JustBoundary4096 {
    template <int S>
    static float factor() {
        return S/4096.0f;
    }
    template <void (*LOAD)(const char *), int S, int N>
    static void loop(const char *v) {
        static_assert(N%(4096*4)==0);
        for (int i=0; i<N; i+=4096*4) {
            LOAD(v+4096*1-S);
            LOAD(v+4096*2-S);
            LOAD(v+4096*3-S);
            LOAD(v+4096*4-S);
            v += 4096*4;
        }
    }
};


long long int t() {
    struct timeval tv;
    gettimeofday(&tv, 0);
    return (long long int)tv.tv_sec*1000000 + tv.tv_usec;
}

template <typename TYPE, void (*LOADa)(const char *), void (*LOADu)(const char *), int S, int N>
void bench(const char *data, int iter, const char *name) {
    long long int t0 = t();
    for (int i=0; i<iter*100000; i++) {
        TYPE::template loop<LOADa, S, N/100000>(data);
    }
    long long int t1 = t();
    for (int i=0; i<iter*100000; i++) {
        TYPE::template loop<LOADu, S, N/100000>(data+1);
    }
    long long int t2 = t();
    for (int i=0; i<iter; i++) {
        TYPE::template loop<LOADa, S, N>(data);
    }
    long long int t3 = t();
    for (int i=0; i<iter; i++) {
        TYPE::template loop<LOADu, S, N>(data+1);
    }
    long long int t4 = t();

    printf("%s-bit, cache: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3fx\n", name, (double)N*iter/(t1-t0)/1000*TYPE::template factor<S>(), (double)N*iter/(t2-t1)/1000*TYPE::template factor<S>(), (float)(t2-t1)/(t1-t0));
    printf("%s-bit,   mem: aligned: %8.4f GB/sec unaligned: %8.4f GB/sec, difference: %0.3fx\n", name, (double)N*iter/(t3-t2)/1000*TYPE::template factor<S>(), (double)N*iter/(t4-t3)/1000*TYPE::template factor<S>(), (float)(t4-t3)/(t3-t2));
}

int main() {
    const int ITER = 10;
    const int N = 1638400000;

    char *data = reinterpret_cast<char *>(((reinterpret_cast<unsigned long long>(new char[N+8192])+4095)&~4095));
    for (int i=0; i<N+8192; i++) data[i] = 0;

    printf("Full:\n");
    bench<Full, load32, load32, 4, N>(data, ITER, " 32");
    bench<Full, load64, load64, 8, N>(data, ITER, " 64");
    bench<Full, load128a, load128u, 16, N>(data, ITER, "128");

    printf("\nJustBoundary64:\n");
    bench<JustBoundary64, load32, load32, 4, N>(data, ITER, " 32");
    bench<JustBoundary64, load64, load64, 8, N>(data, ITER, " 64");
    bench<JustBoundary64, load128a, load128u, 16, N>(data, ITER, "128");

    printf("\nWithoutBoundary64:\n");
    bench<WithoutBoundary64, load32, load32, 4, N>(data, ITER, " 32");
    bench<WithoutBoundary64, load64, load64, 8, N>(data, ITER, " 64");
    bench<WithoutBoundary64, load128a, load128u, 16, N>(data, ITER, "128");

    printf("\nJustBoundary4096:\n");
    bench<JustBoundary4096, load32, load32, 4, N>(data, ITER*10, " 32");
    bench<JustBoundary4096, load64, load64, 8, N>(data, ITER*10, " 64");
    bench<JustBoundary4096, load128a, load128u, 16, N>(data, ITER*10, "128");
}
查看更多
放我归山
4楼-- · 2020-01-24 10:10

Testing 64bit loads for various offsets (code below), my raw results on Haswell are:

aligned L: 4.01115 T: 0.500003
ofs1 L: 4.00919 T: 0.500003
ofs2 L: 4.01494 T: 0.500003
ofs3 L: 4.01403 T: 0.500003
ofs7 L: 4.01073 T: 0.500003
ofs15 L: 4.01937 T: 0.500003
ofs31 L: 4.02107 T: 0.500002
ofs60 L: 9.01482 T: 1
ofs62 L: 9.03644 T: 1
ofs4092 L: 32.3014 T: 31.1967

apply rounding as you see fit, most of them should obviously be rounded down but .3 and .2 (from the page boundary crossing) are perhaps too significant to be noise. This only tested loads with simple addresses, and only "pure loads", no forwarding.

I conclude that alignment within a cache line is not relevant for scalar loads, only crossing cache line boundaries and (especially, and for obvious reasons) crossing page boundaries matters. There seems to be no difference between crossing a cache line boundary exactly in the middle or somewhere else in this case.

AMD occasionally has some funny effects with 16-byte boundaries but I cannot test that.

And here are raw(!) xmm vector results which include the effects of pextrq, so subtract 2 cycles of latency:

aligned L: 8.05247 T: 0.500003
ofs1 L: 8.03223 T: 0.500003
ofs2 L: 8.02899 T: 0.500003
ofs3 L: 8.05598 T: 0.500003
ofs7 L: 8.03579 T: 0.500002
ofs15 L: 8.02787 T: 0.500003
ofs31 L: 8.05002 T: 0.500003
ofs58 L: 13.0404 T: 1
ofs60 L: 13.0825 T: 1
ofs62 L: 13.0935 T: 1
ofs4092 L: 36.345 T: 31.2357

The testing code was

global test_unaligned_l
proc_frame test_unaligned_l
    alloc_stack 8
[endprolog]
    mov r9, rcx
    rdtscp
    mov r8d, eax

    mov ecx, -10000000
    mov rdx, r9
.loop:
    mov rdx, [rdx]
    mov rdx, [rdx]
    add ecx, 1
    jnc .loop

    rdtscp
    sub eax, r8d

    add rsp, 8
    ret
endproc_frame

global test_unaligned_tp
proc_frame test_unaligned_tp
    alloc_stack 8
[endprolog]
    mov r9, rcx
    rdtscp
    mov r8d, eax

    mov ecx, -10000000
    mov rdx, r9
.loop:
    mov rax, [rdx]
    mov rax, [rdx]
    add ecx, 1
    jnc .loop

    rdtscp
    sub eax, r8d

    add rsp, 8
    ret
endproc_frame

For vectors largely similar but with pextrq in the latency test.

With some data prepared at various offsets, for example:

align 64
%rep 31
db 0
%endrep
unaligned31: dq unaligned31
align 4096
%rep 60
db 0
%endrep
unaligned60: dq unaligned60
align 4096
%rep 4092
db 0
%endrep
unaligned4092: dq unaligned4092

To focus a bit more on the new title, I'll describe what this is trying to do and why.

First off, there is a latency test. Loading a million things into eax from some pointer that isn't in eax (as the code in the question does) tests throughput, which is only half of the picture. For scalar loads that is trivial, for vector loads I used pairs of:

movdqu xmm0, [rdx]
pextrq rdx, xmm0, 0

The latency of pextrq is 2, that's why the latency figures for vector loads are all 2 too high as noted.

In order to make it easy to do this latency test, the data is a self-referential pointer. That's a fairly atypical scenario, but it shouldn't affect the timing characteristics of the loads.

The throughput test has two loads per loop instead of one to avoid being bottlenecked by the loop overhead. More loads could be used, but that isn't necessary on Haswell (or anything I can think of, but in theory an µarch with a lower branch throughput or a higher load throughput could exist).

I'm not super careful about fencing in the TSC read or compensating for its overhead (or other overhead). I also didn't disable Turbo, I just let it run at turbo frequency and divided by the ratio between the TSC rate and turbo freq, which could affects timings a bit. All of these effects are all tiny compared to a benchmark on the order of 1E7, and the results can be rounded anyway.

All times were best-of-30, things such as average and variance are pointless on these micro benchmarks since the ground truth is not a random process with parameters that we want to estimate but some fixed integer[1] (or integer multiple of a fraction, for throughput). Almost all noise is positive, except the (relatively theoretical) case of instructions from the benchmark "leaking" in front of the first TSC read (this could even be avoided if necessary), so taking the minimum is appropriate.

Note 1: except crossing a 4k boundary apparently, something strange is happening there.

查看更多
登录 后发表回答