你确定你的意思incrementVector2
是更快吗?
正如dreamcast所指出的,incrementVector1
具有更好的时间局部性,所以它应该更快。
而且,从基准测试来看,1 比 2 快
数组大小为 1000 时快 50 倍。
这是令人惊讶的,因为阵列小到足以成为 [并保持] 高速缓存热。
并且,反汇编代码的大小比(at )incrementVector1
小得多。incrementVector2
-O3
incrementVector1
是 105 字节,incrementVector2
是 203 字节。
这有点令人惊讶,因为我认为尽管存在一些差异,但我不希望出现如此大的差异[见下文]。
这是程序输出:
0.000002238 incrementVector1
0.000019535 incrementVector2
8.729x slower
0.000000280 incrementVector1
0.000022454 incrementVector2
80.193x slower
0.000000452 incrementVector1
0.000019617 incrementVector2
43.400x slower
0.000000377 incrementVector1
0.000020632 incrementVector2
54.727x slower
0.000000361 incrementVector1
0.000022612 incrementVector2
62.637x slower
这是我使用的程序:
#include <stdio.h>
#include <time.h>
typedef long long tsc_t;
typedef int INT4;
tsc_t
tscget(void)
{
struct timespec ts;
tsc_t tsc;
clock_gettime(CLOCK_MONOTONIC,&ts);
tsc = ts.tv_sec;
tsc *= 1000000000;
tsc += ts.tv_nsec;
return tsc;
}
double
tscsec(tsc_t tsc)
{
double sec;
sec = tsc;
sec /= 1e9;
return sec;
}
typedef void (*incfnc_p)(INT4 *v, int n);
void
incrementVector2(INT4 *v, int n)
{
for (int k = 0; k < 100; ++k) {
for (int i = 0; i < n; ++i) {
v[i] = v[i] + 1;
}
}
}
void
incrementVector1(INT4 *v, int n)
{
for (int i = 0; i < n; ++i) {
for (int k = 0; k < 100; ++k) {
v[i] = v[i] + 1;
}
}
}
INT4 v[1000] = { 0 };
#define DOFNC(_fnc) \
dofnc(_fnc,#_fnc)
tsc_t
dofnc(incfnc_p fnc,const char *sym)
{
tsc_t tscbeg;
tsc_t tscend;
tscbeg = tscget();
fnc(v,sizeof(v) / sizeof(v[0]));
tscend = tscget();
tscend -= tscbeg;
printf("%.9f %s\n",tscsec(tscend),sym);
return tscend;
}
void
dotest(void)
{
tsc_t tsc1 = DOFNC(incrementVector1);
tsc_t tsc2 = DOFNC(incrementVector2);
double ratio;
const char *tag;
if (tsc1 > tsc2) {
tag = "faster";
ratio = tsc1;
ratio /= tsc2;
}
else {
tag = "slower";
ratio = tsc2;
ratio /= tsc1;
}
printf("%.3fx %s\n",ratio,tag);
}
int
main(void)
{
for (int testno = 1; testno <= 5; ++testno) {
printf("\n");
dotest();
}
return 0;
}
下面是拆解:
00000000004011c0 <incrementVector2>:
4011c0: 85 f6 test %esi,%esi
4011c2: 0f 8e be 00 00 00 jle 401286 <L06>
4011c8: 89 f2 mov %esi,%edx
4011ca: 41 89 f3 mov %esi,%r11d
4011cd: 66 0f 6f 0d 8b 0e 00 movdqa 0xe8b(%rip),%xmm1 # 402060 <__dso_handle+0x58>
4011d4: 00
4011d5: 49 89 f8 mov %rdi,%r8
4011d8: c1 ea 02 shr $0x2,%edx
4011db: 44 8d 56 ff lea -0x1(%rsi),%r10d
4011df: 41 83 e3 fc and $0xfffffffc,%r11d
4011e3: 41 b9 01 00 00 00 mov $0x1,%r9d
4011e9: 48 c1 e2 04 shl $0x4,%rdx
4011ed: 48 01 fa add %rdi,%rdx
4011f0:L00 41 83 fa 02 cmp $0x2,%r10d
4011f4: 0f 86 8d 00 00 00 jbe 401287 <L07>
4011fa: 48 89 f8 mov %rdi,%rax
4011fd:L01 f3 0f 6f 00 movdqu (%rax),%xmm0
401201: 48 83 c0 10 add $0x10,%rax
401205: 66 0f fe c1 paddd %xmm1,%xmm0
401209: 0f 11 40 f0 movups %xmm0,-0x10(%rax)
40120d: 48 39 d0 cmp %rdx,%rax
401210: 75 eb jne 4011fd <L01>
401212: 44 89 d8 mov %r11d,%eax
401215: 44 39 de cmp %r11d,%esi
401218: 74 22 je 40123c <L03>
40121a:L02 48 63 c8 movslq %eax,%rcx
40121d: 83 04 8f 02 addl $0x2,(%rdi,%rcx,4)
401221: 8d 48 01 lea 0x1(%rax),%ecx
401224: 39 ce cmp %ecx,%esi
401226: 7e 14 jle 40123c <L03>
401228: 48 63 c9 movslq %ecx,%rcx
40122b: 83 c0 02 add $0x2,%eax
40122e: 83 04 8f 02 addl $0x2,(%rdi,%rcx,4)
401232: 39 c6 cmp %eax,%esi
401234: 7e 06 jle 40123c <L03>
401236: 48 98 cltq
401238: 83 04 87 02 addl $0x2,(%rdi,%rax,4)
40123c:L03 41 8d 41 01 lea 0x1(%r9),%eax
401240: 41 83 c1 02 add $0x2,%r9d
401244: 41 83 f9 63 cmp $0x63,%r9d
401248: 75 a6 jne 4011f0 <L00>
40124a: be 65 00 00 00 mov $0x65,%esi
40124f: 4a 8d 7c 97 04 lea 0x4(%rdi,%r10,4),%rdi
401254: 29 c6 sub %eax,%esi
401256: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
40125d: 00 00 00
401260:L04 41 8b 10 mov (%r8),%edx
401263: 8d 42 01 lea 0x1(%rdx),%eax
401266: 01 f2 add %esi,%edx
401268: 0f 1f 84 00 00 00 00 nopl 0x0(%rax,%rax,1)
40126f: 00
401270:L05 89 c1 mov %eax,%ecx
401272: 83 c0 01 add $0x1,%eax
401275: 39 d0 cmp %edx,%eax
401277: 75 f7 jne 401270 <L05>
401279: 41 89 08 mov %ecx,(%r8)
40127c: 49 83 c0 04 add $0x4,%r8
401280: 49 39 f8 cmp %rdi,%r8
401283: 75 db jne 401260 <L04>
401285: c3 retq
401286:L06 c3 retq
401287:L07 31 c0 xor %eax,%eax
401289: eb 8f jmp 40121a <L02>
40128b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401290 <incrementVector1>:
401290: 85 f6 test %esi,%esi
401292: 7e 5f jle 4012f3 <L02>
401294: 8d 46 ff lea -0x1(%rsi),%eax
401297: 83 f8 02 cmp $0x2,%eax
40129a: 76 59 jbe 4012f5 <L04>
40129c: 89 f2 mov %esi,%edx
40129e: 66 0f 6f 0d ca 0d 00 movdqa 0xdca(%rip),%xmm1 # 402070 <__dso_handle+0x68>
4012a5: 00
4012a6: 48 89 f8 mov %rdi,%rax
4012a9: c1 ea 02 shr $0x2,%edx
4012ac: 48 c1 e2 04 shl $0x4,%rdx
4012b0: 48 01 fa add %rdi,%rdx
4012b3:L00 f3 0f 6f 00 movdqu (%rax),%xmm0
4012b7: 48 83 c0 10 add $0x10,%rax
4012bb: 66 0f fe c1 paddd %xmm1,%xmm0
4012bf: 0f 11 40 f0 movups %xmm0,-0x10(%rax)
4012c3: 48 39 d0 cmp %rdx,%rax
4012c6: 75 eb jne 4012b3 <L00>
4012c8: 89 f0 mov %esi,%eax
4012ca: 83 e0 fc and $0xfffffffc,%eax
4012cd: 39 f0 cmp %esi,%eax
4012cf: 74 23 je 4012f4 <L03>
4012d1:L01 48 63 d0 movslq %eax,%rdx
4012d4: 83 04 97 64 addl $0x64,(%rdi,%rdx,4)
4012d8: 8d 50 01 lea 0x1(%rax),%edx
4012db: 39 f2 cmp %esi,%edx
4012dd: 7d 14 jge 4012f3 <L02>
4012df: 48 63 d2 movslq %edx,%rdx
4012e2: 83 c0 02 add $0x2,%eax
4012e5: 83 04 97 64 addl $0x64,(%rdi,%rdx,4)
4012e9: 39 c6 cmp %eax,%esi
4012eb: 7e 06 jle 4012f3 <L02>
4012ed: 48 98 cltq
4012ef: 83 04 87 64 addl $0x64,(%rdi,%rax,4)
4012f3:L02 c3 retq
4012f4:L03 c3 retq
4012f5:L04 31 c0 xor %eax,%eax
4012f7: eb d8 jmp 4012d1 <L01>
4012f9: 0f 1f 80 00 00 00 00 nopl 0x0(%rax)
这是输出-S -fverbose-asm
:
.globl incrementVector2
.type incrementVector2, @function
incrementVector2:
.LFB13:
.cfi_startproc
testl %esi, %esi # n
jle .L19 #,
movl %esi, %edx # n, bnd.1
movl %esi, %r11d # n, niters_vector_mult_vf.2
movdqa .LC1(%rip), %xmm1 #, tmp152
movq %rdi, %r8 # v, ivtmp.33
shrl $2, %edx #,
leal -1(%rsi), %r10d #,
andl $-4, %r11d #, niters_vector_mult_vf.2
# orig.c:38: for (int i = 0; i < n; ++i) {
movl $1, %r9d #, ivtmp.47
salq $4, %rdx #, tmp150
addq %rdi, %rdx # v, _144
.L10:
cmpl $2, %r10d #, _72
jbe .L14 #,
# orig.c:36: {
movq %rdi, %rax # v, ivtmp.36
.L8:
# orig.c:39: v[i] = v[i] + 1;
movdqu (%rax), %xmm0 # MEM[base: _138, offset: 0B], vect__42.13
addq $16, %rax #, ivtmp.36
paddd %xmm1, %xmm0 # tmp152, vect__42.13
# orig.c:39: v[i] = v[i] + 1;
movups %xmm0, -16(%rax) # vect__42.13, MEM[base: _138, offset: 0B]
cmpq %rdx, %rax # _144, ivtmp.36
jne .L8 #,
# orig.c:38: for (int i = 0; i < n; ++i) {
movl %r11d, %eax # niters_vector_mult_vf.2, i
cmpl %r11d, %esi # niters_vector_mult_vf.2, n
je .L9 #,
.L13:
# orig.c:39: v[i] = v[i] + 1;
movslq %eax, %rcx # i, i
# orig.c:39: v[i] = v[i] + 1;
addl $2, (%rdi,%rcx,4) #, *_40
# orig.c:38: for (int i = 0; i < n; ++i) {
leal 1(%rax), %ecx #, i
# orig.c:38: for (int i = 0; i < n; ++i) {
cmpl %ecx, %esi # i, n
jle .L9 #,
# orig.c:39: v[i] = v[i] + 1;
movslq %ecx, %rcx # i, i
# orig.c:38: for (int i = 0; i < n; ++i) {
addl $2, %eax #, i
# orig.c:39: v[i] = v[i] + 1;
addl $2, (%rdi,%rcx,4) #, *_107
# orig.c:38: for (int i = 0; i < n; ++i) {
cmpl %eax, %esi # i, n
jle .L9 #,
# orig.c:39: v[i] = v[i] + 1;
cltq
# orig.c:39: v[i] = v[i] + 1;
addl $2, (%rdi,%rax,4) #, *_58
.L9:
leal 1(%r9), %eax #, _147
addl $2, %r9d #, ivtmp.47
cmpl $99, %r9d #, ivtmp.47
jne .L10 #,
movl $101, %esi #, tmp147
leaq 4(%rdi,%r10,4), %rdi #, _134
subl %eax, %esi # _147, tmp146
.p2align 4,,10
.p2align 3
.L12:
movl (%r8), %edx # MEM[base: _126, offset: 0B], _2
leal 1(%rdx), %eax #, ivtmp.21
addl %esi, %edx # tmp146, _122
.p2align 4,,10
.p2align 3
.L11:
movl %eax, %ecx # ivtmp.21, _25
addl $1, %eax #, ivtmp.21
# orig.c:38: for (int i = 0; i < n; ++i) {
cmpl %edx, %eax # _122, ivtmp.21
jne .L11 #,
movl %ecx, (%r8) # _25, MEM[base: _126, offset: 0B]
addq $4, %r8 #, ivtmp.33
# orig.c:37: for (int k = 0; k < 100; ++k) {
cmpq %rdi, %r8 # _134, ivtmp.33
jne .L12 #,
ret
.L19:
ret
.L14:
# orig.c:38: for (int i = 0; i < n; ++i) {
xorl %eax, %eax # i
jmp .L13 #
.cfi_endproc
.LFE13:
.size incrementVector2, .-incrementVector2
.p2align 4,,15
.globl incrementVector1
.type incrementVector1, @function
incrementVector1:
.LFB14:
.cfi_startproc
# orig.c:47: for (int i = 0; i < n; ++i) {
testl %esi, %esi # n
jle .L20 #,
leal -1(%rsi), %eax #, tmp118
cmpl $2, %eax #, tmp118
jbe .L27 #,
movl %esi, %edx # n, bnd.51
movdqa .LC2(%rip), %xmm1 #, tmp131
movq %rdi, %rax # v, ivtmp.62
shrl $2, %edx #,
salq $4, %rdx #, tmp121
addq %rdi, %rdx # v, _58
.L23:
movdqu (%rax), %xmm0 # MEM[base: _53, offset: 0B], vect__8.57
addq $16, %rax #, ivtmp.62
paddd %xmm1, %xmm0 # tmp131, vect__8.57
movups %xmm0, -16(%rax) # vect__8.57, MEM[base: _53, offset: 0B]
cmpq %rdx, %rax # _58, ivtmp.62
jne .L23 #,
movl %esi, %eax # n, tmp.53
andl $-4, %eax #, tmp.53
cmpl %esi, %eax # n, tmp.53
je .L29 #,
.L22:
# orig.c:49: v[i] = v[i] + 1;
movslq %eax, %rdx # tmp.53, tmp.53
addl $100, (%rdi,%rdx,4) #, *_3
# orig.c:47: for (int i = 0; i < n; ++i) {
leal 1(%rax), %edx #, i
# orig.c:47: for (int i = 0; i < n; ++i) {
cmpl %esi, %edx # n, i
jge .L20 #,
# orig.c:49: v[i] = v[i] + 1;
movslq %edx, %rdx # i, i
# orig.c:47: for (int i = 0; i < n; ++i) {
addl $2, %eax #, i
addl $100, (%rdi,%rdx,4) #, *_47
# orig.c:47: for (int i = 0; i < n; ++i) {
cmpl %eax, %esi # i, n
jle .L20 #,
# orig.c:49: v[i] = v[i] + 1;
cltq
addl $100, (%rdi,%rax,4) #, *_25
.L20:
# orig.c:52: }
ret
.L29:
ret
.L27:
# orig.c:47: for (int i = 0; i < n; ++i) {
xorl %eax, %eax # tmp.53
jmp .L22 #
.cfi_endproc
.LFE14:
.size incrementVector1, .-incrementVector1
.section .rodata.str1.1,"aMS",@progbits,1
.LC3: