diff options
-rw-r--r-- | README.x86-64 | 13 | ||||
-rw-r--r-- | configure.in | 9 | ||||
-rw-r--r-- | cpu_mode_x86_64.c | 27 | ||||
-rw-r--r-- | dotprod.c | 16 | ||||
-rw-r--r-- | encode_rs_8.c | 8 | ||||
-rw-r--r-- | fec.h | 14 | ||||
-rw-r--r-- | makefile.in | 39 | ||||
-rw-r--r-- | peakval.c | 11 | ||||
-rw-r--r-- | sse2bfly27-64.s | 210 | ||||
-rw-r--r-- | sse2bfly29-64.s | 254 | ||||
-rw-r--r-- | sumsq.c | 10 | ||||
-rw-r--r-- | viterbi27.c | 27 | ||||
-rw-r--r-- | viterbi29.c | 26 | ||||
-rw-r--r-- | viterbi39.c | 26 | ||||
-rw-r--r-- | viterbi615.c | 26 |
15 files changed, 696 insertions, 20 deletions
diff --git a/README.x86-64 b/README.x86-64 new file mode 100644 index 0000000..bb4450c --- /dev/null +++ b/README.x86-64 @@ -0,0 +1,13 @@ +This library has been modified to compile natively on x86-64. + +An attempt has been made to adapt the assembly code, but due to unsolved issues with +the fact that shared libraries on x86-64 have to be compiled with PIC, this approach is +not finished. + +This code therefore only uses the portable C implementation, which is certainly slower than +the assembly SSE2 that could ideally be used. + +It could be said that we trade performance against the possibility to compile on x86-64. + +feb, 2012 +Matthias P. Braendli, HB9EGM diff --git a/configure.in b/configure.in index 4e4110b..03d78c4 100644 --- a/configure.in +++ b/configure.in @@ -36,8 +36,15 @@ fi AC_CANONICAL_SYSTEM case $target_cpu in +x86_64) + ARCH_OPTION="-msse2" + MLIBS="dotprod_port.o \ + peakval_port.o \ + sumsq.o sumsq_port.o \ + cpu_mode_x86_64.o" + ;; i386|i486|i586|i686) - ARCH_OPTION="-march=$target_cpu" + ARCH_OPTION="-march=$target_cpu" MLIBS="viterbi27_mmx.o mmxbfly27.o viterbi27_sse.o ssebfly27.o viterbi27_sse2.o sse2bfly27.o \ viterbi29_mmx.o mmxbfly29.o viterbi29_sse.o ssebfly29.o viterbi29_sse2.o sse2bfly29.o \ viterbi39_sse2.o viterbi39_sse.o viterbi39_mmx.o \ diff --git a/cpu_mode_x86_64.c b/cpu_mode_x86_64.c new file mode 100644 index 0000000..758096a --- /dev/null +++ b/cpu_mode_x86_64.c @@ -0,0 +1,27 @@ +/* Determine CPU support for SIMD + * Copyright 2004 Phil Karn, KA9Q + * + * Modified in 2012 by Matthias P. Braendli, HB9EGM + */ +#include <stdio.h> +#include "fec.h" + +/* Various SIMD instruction set names */ +char *Cpu_modes[] = {"Unknown","Portable C","x86 Multi Media Extensions (MMX)", + "x86 Streaming SIMD Extensions (SSE)", + "x86 Streaming SIMD Extensions 2 (SSE2)", + "PowerPC G4/G5 Altivec/Velocity Engine"}; + +enum cpu_mode Cpu_mode; + +void find_cpu_mode(void){ + + int f; + if(Cpu_mode != UNKNOWN) + return; + + /* According to the wikipedia entry x86-64, all x86-64 processors have SSE2 */ + /* The same assumption is also in other source files ! */ + Cpu_mode = SSE2; + fprintf(stderr,"CPU: x86-64, using portable C implementation\n"); +} @@ -41,6 +41,11 @@ void *initdp(signed short coeffs[],int len){ return initdp_sse2(coeffs,len); #endif +#ifdef __x86_64__ + case SSE2: + return initdp_port(coeffs,len); +#endif + #ifdef __VEC__ case ALTIVEC: return initdp_av(coeffs,len); @@ -61,6 +66,12 @@ void freedp(void *p){ case SSE2: return freedp_sse2(p); #endif + +#ifdef __x86_64__ + case SSE2: + return freedp_port(p); +#endif + #ifdef __VEC__ case ALTIVEC: return freedp_av(p); @@ -84,6 +95,11 @@ long dotprod(void *p,signed short a[]){ return dotprod_sse2(p,a); #endif +#ifdef __x86_64__ + case SSE2: + return dotprod_port(p,a); +#endif + #ifdef __VEC__ case ALTIVEC: return dotprod_av(p,a); diff --git a/encode_rs_8.c b/encode_rs_8.c index 5aaecca..d21294c 100644 --- a/encode_rs_8.c +++ b/encode_rs_8.c @@ -34,6 +34,8 @@ void encode_rs_8(data_t *data, data_t *parity,int pad){ } else { /* No SIMD at all */ cpu_mode = PORT; } +#elif __x86_64__ + cpu_mode = SSE2; #elif __VEC__ /* Ask the OS if we have Altivec support */ int selectors[2] = { CTL_HW, HW_VECTORUNIT }; @@ -54,11 +56,17 @@ void encode_rs_8(data_t *data, data_t *parity,int pad){ encode_rs_8_av(data,parity,pad); return; #endif + #if __i386__ case MMX: case SSE: case SSE2: #endif + +#ifdef __x86_64__ + case SSE2: +#endif + default: encode_rs_8_c(data,parity,pad); return; @@ -214,7 +214,6 @@ int init_viterbi615_sse2(void *p,int starting_state); int chainback_viterbi615_sse2(void *p,unsigned char *data,unsigned int nbits,unsigned int endstate); void delete_viterbi615_sse2(void *p); int update_viterbi615_blk_sse2(void *p,unsigned char *syms,int nbits); - #endif void *create_viterbi615_port(int len); @@ -262,9 +261,9 @@ extern enum cpu_mode {UNKNOWN=0,PORT,MMX,SSE,SSE2,ALTIVEC} Cpu_mode; void find_cpu_mode(void); /* Call this once at startup to set Cpu_mode */ /* Determine parity of argument: 1 = odd, 0 = even */ -#ifdef __i386__ +#if defined(__i386__) || defined(__x86_64__) static inline int parityb(unsigned char x){ - __asm__ __volatile__ ("test %1,%1;setpo %0" : "=g"(x) : "r" (x)); + __asm__ __volatile__ ("test %1,%1;setpo %0" : "=q"(x) : "q" (x)); return x; } #else @@ -317,6 +316,12 @@ void freedp_sse2(void *dp); long dotprod_sse2(void *dp,signed short a[]); #endif +#ifdef __x86_64__ +void *initdp_sse2(signed short coeffs[],int len); +void freedp_sse2(void *dp); +long dotprod_sse2(void *dp,signed short a[]); +#endif + #ifdef __VEC__ void *initdp_av(signed short coeffs[],int len); void freedp_av(void *dp); @@ -332,6 +337,9 @@ unsigned long long sumsq_mmx(signed short *in,int cnt); unsigned long long sumsq_sse(signed short *in,int cnt); unsigned long long sumsq_sse2(signed short *in,int cnt); #endif +#ifdef __x86_64__ +unsigned long long sumsq_sse2(signed short *in,int cnt); +#endif #ifdef __VEC__ unsigned long long sumsq_av(signed short *in,int cnt); #endif diff --git a/makefile.in b/makefile.in index 53fdfcb..1a958f3 100644 --- a/makefile.in +++ b/makefile.in @@ -18,11 +18,12 @@ LIBS=@MLIBS@ fec.o sim.o viterbi27.o viterbi27_port.o viterbi29.o viterbi29_port peakval.o peakval_port.o \ sumsq.o sumsq_port.o -CFLAGS=@CFLAGS@ -I. -Wall @ARCH_OPTION@ +CFLAGS=@CFLAGS@ -I. -fPIC -Wall @ARCH_OPTION@ SHARED_LIB=@SH_LIB@ all: libfec.a $(SHARED_LIB) + test: vtest27 vtest29 vtest39 vtest615 rstest dtest sumsq_test peaktest @echo "Correctness tests:" @@ -51,44 +52,44 @@ install: all install -m 644 -p simd-viterbi.3 rs.3 dsp.3 @mandir@/man3 peaktest: peaktest.o libfec.a - gcc -g -o $@ $^ + gcc $(CFLAGS) -g -o $@ $^ sumsq_test: sumsq_test.o libfec.a - gcc -g -o $@ $^ + gcc $(CFLAGS) -g -o $@ $^ dtest: dtest.o libfec.a - gcc -g -o $@ $^ -lm + gcc $(CFLAGS) -g -o $@ $^ -lm vtest27: vtest27.o libfec.a - gcc -g -o $@ $^ -lm + gcc $(CFLAGS) -g -o $@ $^ -lm vtest29: vtest29.o libfec.a - gcc -g -o $@ $^ -lm + gcc $(CFLAGS) -g -o $@ $^ -lm vtest39: vtest39.o libfec.a - gcc -g -o $@ $^ -lm + gcc $(CFLAGS) -g -o $@ $^ -lm vtest615: vtest615.o libfec.a - gcc -g -o $@ $^ -lm + gcc $(CFLAGS) -g -o $@ $^ -lm rstest: rstest.o libfec.a - gcc -g -o $@ $^ + gcc $(CFLAGS) -g -o $@ $^ rs_speedtest: rs_speedtest.o libfec.a - gcc -g -o $@ $^ + gcc $(CFLAGS) -g -o $@ $^ # for some reason, the test programs without args segfault on the PPC with -O2 optimization. Dunno why - compiler bug? vtest27.o: vtest27.c fec.h - gcc -g -c $< + gcc $(CFLAGS) -g -c $< vtest29.o: vtest29.c fec.h - gcc -g -c $< + gcc $(CFLAGS) -g -c $< vtest39.o: vtest39.c fec.h - gcc -g -c $< + gcc $(CFLAGS) -g -c $< vtest615.o: vtest615.c fec.h - gcc -g -c $< + gcc $(CFLAGS) -g -c $< libfec.a: $(LIBS) ar rv $@ $^ @@ -100,7 +101,7 @@ libfec.dylib: $(LIBS) # for Linux et al libfec.so: $(LIBS) - gcc -shared -Xlinker -soname=$@ -o $@ -Wl,-whole-archive $^ -Wl,-no-whole-archive -lc + gcc -fPIC -shared -Xlinker -soname=$@ -o $@ -Wl,-whole-archive $^ -Wl,-no-whole-archive -lc dotprod.o: dotprod.c fec.h @@ -146,7 +147,7 @@ ccsds_tab.c: gen_ccsds ./gen_ccsds > ccsds_tab.c gen_ccsds: gen_ccsds.o init_rs_char.o - gcc -o $@ $^ + gcc $(CFLAGS) -o $@ $^ gen_ccsds.o: gen_ccsds.c gcc $(CFLAGS) -c -o $@ $< @@ -230,8 +231,14 @@ viterbi615_sse2.o: viterbi615_sse2.c fec.h cpu_mode_x86.o: cpu_mode_x86.c fec.h +cpu_mode_x86_64.o: cpu_mode_x86_64.c fec.h + cpu_mode_ppc.o: cpu_mode_ppc.c fec.h +#%.o: %.s +# $(AS) $< -o $@ + + clean: rm -f *.o $(SHARED_LIB) *.a rs_speedtest peaktest sumsq_test dtest vtest27 vtest29 vtest39 vtest615 rstest ccsds_tab.c ccsds_tal.c gen_ccsds gen_ccsds_tal core @@ -12,6 +12,10 @@ int peakval_sse(signed short *b,int cnt); int peakval_sse2(signed short *b,int cnt); #endif +#ifdef __x86_64__ +int peakval_sse2(signed short *b,int cnt); +#endif + #ifdef __VEC__ int peakval_av(signed short *b,int cnt); #endif @@ -31,6 +35,13 @@ int peakval(signed short *b,int cnt){ case SSE2: return peakval_sse2(b,cnt); #endif + +#ifdef __x86_64__ + case SSE2: + return peakval_port(b,cnt); + //return peakval_sse2(b,cnt); +#endif + #ifdef __VEC__ case ALTIVEC: return peakval_av(b,cnt); diff --git a/sse2bfly27-64.s b/sse2bfly27-64.s new file mode 100644 index 0000000..b23c6a9 --- /dev/null +++ b/sse2bfly27-64.s @@ -0,0 +1,210 @@ +/* Intel SIMD (SSE2) implementations of Viterbi ACS butterflies + for 64-state (k=7) convolutional code + Copyright 2003 Phil Karn, KA9Q + This code may be used under the terms of the GNU Lesser General Public License (LGPL) + + Modifications for x86_64, 2012 Matthias P. Braendli, HB9EGM: + - changed registers to x86-64 equivalents + - changed instructions accordingly + - %rip indirect addressing needed for position independent code, + which is required because x86-64 needs dynamic libs to be PIC + + void update_viterbi27_blk_sse2(struct v27 *vp,unsigned char syms[],int nbits) ; +*/ + # SSE2 (128-bit integer SIMD) version + # All X86-64 CPUs include SSE2 + + # These are offsets into struct v27, defined in viterbi27_av.c + .set DP,128 + .set OLDMETRICS,132 + .set NEWMETRICS,136 + .text + .global update_viterbi27_blk_sse2,Branchtab27_sse2 + .type update_viterbi27_blk_sse2,@function + .align 16 + +update_viterbi27_blk_sse2: + pushq %rbp + movq %rsp,%rbp + /* convention different between i386 and x86_64: rsi and rdi belong to called function, not caller */ + /* Let's say we don't care (yet) */ + pushq %rsi + pushq %rdi + pushq %rdx + pushq %rbx + + movq 8(%rbp),%rdx # edx = vp + testq %rdx,%rdx + jnz 0f + movq -1,%rax + jmp err +0: movq OLDMETRICS(%rdx),%rsi # esi -> old metrics + movq NEWMETRICS(%rdx),%rdi # edi -> new metrics + movq DP(%rdx),%rdx # edx -> decisions + +1: movq 16(%rbp),%rax # eax = nbits + decq %rax + jl 2f # passed zero, we're done + movq %rax,16(%rbp) + + xorq %rax,%rax + movq 12(%rbp),%rbx # ebx = syms + movb (%rbx),%al + movd %rax,%xmm6 # xmm6[0] = first symbol + movb 1(%rbx),%al + movd %rax,%xmm5 # xmm5[0] = second symbol + addq $2,%rbx + movq %rbx,12(%rbp) + + punpcklbw %xmm6,%xmm6 # xmm6[1] = xmm6[0] + punpcklbw %xmm5,%xmm5 + pshuflw $0,%xmm6,%xmm6 # copy low word to low 3 + pshuflw $0,%xmm5,%xmm5 + punpcklqdq %xmm6,%xmm6 # propagate to all 16 + punpcklqdq %xmm5,%xmm5 + # xmm6 now contains first symbol in each byte, xmm5 the second + + movdqa thirtyones(%rip),%xmm7 + + # each invocation of this macro does 16 butterflies in parallel + .MACRO butterfly GROUP + # compute branch metrics + movdqa (Branchtab27_sse2+(16*\GROUP))(%rip),%xmm4 + movdqa (Branchtab27_sse2+32+(16*\GROUP))(%rip),%xmm3 + pxor %xmm6,%xmm4 + pxor %xmm5,%xmm3 + + # compute 5-bit branch metric in xmm4 by adding the individual symbol metrics + # This is okay for this + # code because the worst-case metric spread (at high Eb/No) is only 120, + # well within the range of our unsigned 8-bit path metrics, and even within + # the range of signed 8-bit path metrics + pavgb %xmm3,%xmm4 + psrlw $3,%xmm4 + + pand %xmm7,%xmm4 + + movdqa (16*\GROUP)(%esi),%xmm0 # Incoming path metric, high bit = 0 + movdqa ((16*\GROUP)+32)(%esi),%xmm3 # Incoming path metric, high bit = 1 + movdqa %xmm0,%xmm2 + movdqa %xmm3,%xmm1 + paddusb %xmm4,%xmm0 # note use of saturating arithmetic + paddusb %xmm4,%xmm3 # this shouldn't be necessary, but why not? + + # negate branch metrics + pxor %xmm7,%xmm4 + paddusb %xmm4,%xmm1 + paddusb %xmm4,%xmm2 + + # Find survivors, leave in mm0,2 + pminub %xmm1,%xmm0 + pminub %xmm3,%xmm2 + # get decisions, leave in mm1,3 + pcmpeqb %xmm0,%xmm1 + pcmpeqb %xmm2,%xmm3 + + # interleave and store new branch metrics in mm0,2 + movdqa %xmm0,%xmm4 + punpckhbw %xmm2,%xmm0 # interleave second 16 new metrics + punpcklbw %xmm2,%xmm4 # interleave first 16 new metrics + movdqa %xmm0,(32*\GROUP+16)(%rdi) + movdqa %xmm4,(32*\GROUP)(%rdi) + + # interleave decisions & store + movdqa %xmm1,%xmm4 + punpckhbw %xmm3,%xmm1 + punpcklbw %xmm3,%xmm4 + # work around bug in gas due to Intel doc error + .byte 0x66,0x0f,0xd7,0xd9 # pmovmskb %xmm1,%ebx + shlq $16,%rbx + .byte 0x66,0x0f,0xd7,0xc4 # pmovmskb %xmm4,%eax + orq %rax,%rbx + movq %rbx,(4*\GROUP)(%rdx) + .endm + + # invoke macro 2 times for a total of 32 butterflies + butterfly GROUP=0 + butterfly GROUP=1 + + addq $8,%rdx # bump decision pointer + + # See if we have to normalize. This requires an explanation. We don't want + # our path metrics to exceed 255 on the *next* iteration. Since the + # largest branch metric is 30, that means we don't want any to exceed 225 + # on *this* iteration. Rather than look them all, we just pick an arbitrary one + # (the first) and see if it exceeds 225-120=105, where 120 is the experimentally- + # determined worst-case metric spread for this code and branch metrics in the range 0-30. + + # This is extremely conservative, and empirical testing at a variety of Eb/Nos might + # show that a higher threshold could be used without affecting BER performance + movq (%rdi),%rax # extract first output metric + andq $255,%rax + cmp $105,%rax + jle done # No, no need to normalize + + # Normalize by finding smallest metric and subtracting it + # from all metrics. We can't just pick an arbitrary small constant because + # the minimum metric might be zero! + movdqa (%rdi),%xmm0 + movdqa %xmm0,%xmm4 + movdqa 16(%rdi),%xmm1 + pminub %xmm1,%xmm4 + movdqa 32(%rdi),%xmm2 + pminub %xmm2,%xmm4 + movdqa 48(%rdi),%xmm3 + pminub %xmm3,%xmm4 + + # crunch down to single lowest metric + movdqa %xmm4,%xmm5 + psrldq $8,%xmm5 # the count to psrldq is bytes, not bits! + pminub %xmm5,%xmm4 + movdqa %xmm4,%xmm5 + psrlq $32,%xmm5 + pminub %xmm5,%xmm4 + movdqa %xmm4,%xmm5 + psrlq $16,%xmm5 + pminub %xmm5,%xmm4 + movdqa %xmm4,%xmm5 + psrlq $8,%xmm5 + pminub %xmm5,%xmm4 # now in lowest byte of %xmm4 + + punpcklbw %xmm4,%xmm4 # lowest 2 bytes + pshuflw $0,%xmm4,%xmm4 # lowest 8 bytes + punpcklqdq %xmm4,%xmm4 # all 16 bytes + + # xmm4 now contains lowest metric in all 16 bytes + # subtract it from every output metric + psubusb %xmm4,%xmm0 + psubusb %xmm4,%xmm1 + psubusb %xmm4,%xmm2 + psubusb %xmm4,%xmm3 + movdqa %xmm0,(%rdi) + movdqa %xmm1,16(%rdi) + movdqa %xmm2,32(%rdi) + movdqa %xmm3,48(%rdi) + +done: + # swap metrics + movq %rsi,%rax + movq %rdi,%rsi + movq %rax,%rdi + jmp 1b + +2: movq 8(%rbp),%rbx # ebx = vp + # stash metric pointers + movq %rsi,OLDMETRICS(%rbx) + movq %rdi,NEWMETRICS(%rbx) + movq %rdx,DP(%rbx) # stash incremented value of vp->dp + xorq %rax,%rax +err: popq %rbx + popq %rdx + popq %rdi + popq %rsi + popq %rbp + ret + + .data + .align 16 + +thirtyones: + .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 diff --git a/sse2bfly29-64.s b/sse2bfly29-64.s new file mode 100644 index 0000000..22bd8a1 --- /dev/null +++ b/sse2bfly29-64.s @@ -0,0 +1,254 @@ +/* Intel SIMD SSE2 implementation of Viterbi ACS butterflies + for 256-state (k=9) convolutional code + Copyright 2004 Phil Karn, KA9Q + This code may be used under the terms of the GNU Lesser General Public License (LGPL) + + Modifications for x86_64, 2012 Matthias P. Braendli, HB9EGM + - changed registers to x86-64 equivalents + - changed instructions accordingly + - %rip indirect addressing needed for position independent code, + which is required because x86-64 needs dynamic libs to be PIC. + That still doesn't work + + void update_viterbi29_blk_sse2(struct v29 *vp,unsigned char *syms,int nbits) ; +*/ + # SSE2 (128-bit integer SIMD) version + # All X86-64 CPUs include SSE2 + + # These are offsets into struct v29, defined in viterbi29_av.c + .set DP,512 + .set OLDMETRICS,516 + .set NEWMETRICS,520 + + .text + .global update_viterbi29_blk_sse2,Branchtab29_sse2 + .type update_viterbi29_blk_sse2,@function + .align 16 + +update_viterbi29_blk_sse2: + pushq %rbp + movq %rsp,%rbp + /* convention different between i386 and x86_64: rsi and rdi belong to called function, not caller */ + /* Let's say we don't care (yet) */ + pushq %rsi + pushq %rdi + pushq %rdx + pushq %rbx + + movq 8(%rbp),%rdx # edx = vp + testq %rdx,%rdx + jnz 0f + movq -1,%rax + jmp err +0: movq OLDMETRICS(%rdx),%rsi # esi -> old metrics + movq NEWMETRICS(%rdx),%rdi # edi -> new metrics + movq DP(%rdx),%rdx # edx -> decisions + +1: movq 16(%rbp),%rax # eax = nbits + decq %rax + jl 2f # passed zero, we're done + movq %rax,16(%rbp) + + xorq %rax,%rax + movq 12(%rbp),%rbx # ebx = syms + movb (%rbx),%al + movd %rax,%xmm6 # xmm6[0] = first symbol + movb 1(%rbx),%al + movd %rax,%xmm5 # xmm5[0] = second symbol + addq $2,%rbx + movq %rbx,12(%rbp) + + punpcklbw %xmm6,%xmm6 # xmm6[1] = xmm6[0] + punpcklbw %xmm5,%xmm5 + movdqa thirtyones(%rip),%xmm7 + pshuflw $0,%xmm6,%xmm6 # copy low word to low 3 + pshuflw $0,%xmm5,%xmm5 + punpcklqdq %xmm6,%xmm6 # propagate to all 16 + punpcklqdq %xmm5,%xmm5 + # xmm6 now contains first symbol in each byte, xmm5 the second + + movdqa thirtyones(%rip),%xmm7 + + # each invocation of this macro does 16 butterflies in parallel + .MACRO butterfly GROUP + # compute branch metrics + movdqa Branchtab29_sse2+(16*\GROUP)(%rip),%xmm4 + movdqa Branchtab29_sse2+128+(16*\GROUP)(%rip),%xmm3 + pxor %xmm6,%xmm4 + pxor %xmm5,%xmm3 + pavgb %xmm3,%xmm4 + psrlw $3,%xmm4 + + pand %xmm7,%xmm4 # xmm4 contains branch metrics + + movdqa (16*\GROUP)(%esi),%xmm0 # Incoming path metric, high bit = 0 + movdqa ((16*\GROUP)+128)(%esi),%xmm3 # Incoming path metric, high bit = 1 + movdqa %xmm0,%xmm2 + movdqa %xmm3,%xmm1 + paddusb %xmm4,%xmm0 + paddusb %xmm4,%xmm3 + + # invert branch metrics + pxor %xmm7,%xmm4 + + paddusb %xmm4,%xmm1 + paddusb %xmm4,%xmm2 + + # Find survivors, leave in mm0,2 + pminub %xmm1,%xmm0 + pminub %xmm3,%xmm2 + # get decisions, leave in mm1,3 + pcmpeqb %xmm0,%xmm1 + pcmpeqb %xmm2,%xmm3 + + # interleave and store new branch metrics in mm0,2 + movdqa %xmm0,%xmm4 + punpckhbw %xmm2,%xmm0 # interleave second 16 new metrics + punpcklbw %xmm2,%xmm4 # interleave first 16 new metrics + movdqa %xmm0,(32*\GROUP+16)(%rdi) + movdqa %xmm4,(32*\GROUP)(%rdi) + + # interleave decisions & store + movdqa %xmm1,%xmm4 + punpckhbw %xmm3,%xmm1 + punpcklbw %xmm3,%xmm4 + # work around bug in gas due to Intel doc error + .byte 0x66,0x0f,0xd7,0xd9 # pmovmskb %xmm1,%ebx + shlq $16,%rbx + .byte 0x66,0x0f,0xd7,0xc4 # pmovmskb %xmm4,%eax + orq %rax,%rbx + movq %rbx,(4*\GROUP)(%rdx) + .endm + + # invoke macro 8 times for a total of 128 butterflies + butterfly GROUP=0 + butterfly GROUP=1 + butterfly GROUP=2 + butterfly GROUP=3 + butterfly GROUP=4 + butterfly GROUP=5 + butterfly GROUP=6 + butterfly GROUP=7 + + addq $32,%rdx # bump decision pointer + + # see if we have to normalize + movq (%rdi),%rax # extract first output metric + andq $255,%rax + cmp $50,%rax # is it greater than 50? + movq $0,%rax + jle done # No, no need to normalize + + # Normalize by finding smallest metric and subtracting it + # from all metrics + movdqa (%rdi),%xmm0 + pminub 16(%rdi),%xmm0 + pminub 32(%rdi),%xmm0 + pminub 48(%rdi),%xmm0 + pminub 64(%rdi),%xmm0 + pminub 80(%rdi),%xmm0 + pminub 96(%rdi),%xmm0 + pminub 112(%rdi),%xmm0 + pminub 128(%rdi),%xmm0 + pminub 144(%rdi),%xmm0 + pminub 160(%rdi),%xmm0 + pminub 176(%rdi),%xmm0 + pminub 192(%rdi),%xmm0 + pminub 208(%rdi),%xmm0 + pminub 224(%rdi),%xmm0 + pminub 240(%rdi),%xmm0 + + # crunch down to single lowest metric + movdqa %xmm0,%xmm1 + psrldq $8,%xmm0 # the count to psrldq is bytes, not bits! + pminub %xmm1,%xmm0 + movdqa %xmm0,%xmm1 + psrlq $32,%xmm0 + pminub %xmm1,%xmm0 + movdqa %xmm0,%xmm1 + psrlq $16,%xmm0 + pminub %xmm1,%xmm0 + movdqa %xmm0,%xmm1 + psrlq $8,%xmm0 + pminub %xmm1,%xmm0 + + punpcklbw %xmm0,%xmm0 # lowest 2 bytes + pshuflw $0,%xmm0,%xmm0 # lowest 8 bytes + punpcklqdq %xmm0,%xmm0 # all 16 bytes + + # xmm0 now contains lowest metric in all 16 bytes + # subtract it from every output metric + movdqa (%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,(%rdi) + movdqa 16(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,16(%rdi) + movdqa 32(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,32(%rdi) + movdqa 48(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,48(%rdi) + movdqa 64(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,64(%rdi) + movdqa 80(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,80(%rdi) + movdqa 96(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,96(%rdi) + movdqa 112(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,112(%rdi) + movdqa 128(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,128(%rdi) + movdqa 144(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,144(%rdi) + movdqa 160(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,160(%rdi) + movdqa 176(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,176(%rdi) + movdqa 192(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,192(%rdi) + movdqa 208(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,208(%rdi) + movdqa 224(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,224(%rdi) + movdqa 240(%rdi),%xmm1 + psubusb %xmm0,%xmm1 + movdqa %xmm1,240(%rdi) + +done: + # swap metrics + movq %rsi,%rax + movq %rdi,%rsi + movq %rax,%rdi + jmp 1b + +2: movq 8(%rbp),%rbx # ebx = vp + # stash metric pointers + movq %rsi,OLDMETRICS(%rbx) + movq %rdi,NEWMETRICS(%rbx) + movq %rdx,DP(%rbx) # stash incremented value of vp->dp + xorq %rax,%rax +err: popq %rbx + popq %rdx + popq %rdi + popq %rsi + popq %rbp + ret + + .data + .align 16 +thirtyones: + .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31 + @@ -15,6 +15,10 @@ unsigned long long sumsq_sse(signed short *,int); unsigned long long sumsq_sse2(signed short *,int); #endif +#ifdef __x86_64__ +unsigned long long sumsq_sse2(signed short *,int); +#endif + #ifdef __VEC__ unsigned long long sumsq_av(signed short *,int); #endif @@ -32,6 +36,12 @@ unsigned long long sumsq(signed short *in,int cnt){ return sumsq_sse2(in,cnt); #endif +#ifdef __x86_64__ + case SSE2: + return sumsq_port(in,cnt); + //return sumsq_sse2(in,cnt); +#endif + #ifdef __VEC__ case ALTIVEC: return sumsq_av(in,cnt); diff --git a/viterbi27.c b/viterbi27.c index 554da92..316fee4 100644 --- a/viterbi27.c +++ b/viterbi27.c @@ -26,6 +26,10 @@ void *create_viterbi27(int len){ case SSE2: return create_viterbi27_sse2(len); #endif +#ifdef __x86_64__ + case SSE2: + return create_viterbi27_port(len); +#endif } } @@ -51,6 +55,11 @@ void set_viterbi27_polynomial(int polys[2]){ set_viterbi27_polynomial_sse2(polys); break; #endif +#ifdef __x86_64__ + case SSE2: + set_viterbi27_polynomial_port(polys); + break; +#endif } } @@ -72,6 +81,10 @@ int init_viterbi27(void *p,int starting_state){ case SSE2: return init_viterbi27_sse2(p,starting_state); #endif +#ifdef __x86_64__ + case SSE2: + return init_viterbi27_port(p,starting_state); +#endif } } @@ -98,6 +111,10 @@ int chainback_viterbi27( case SSE2: return chainback_viterbi27_sse2(p,data,nbits,endstate); #endif +#ifdef __x86_64__ + case SSE2: + return chainback_viterbi27_port(p,data,nbits,endstate); +#endif } } @@ -124,6 +141,11 @@ void delete_viterbi27(void *p){ delete_viterbi27_sse2(p); break; #endif +#ifdef __x86_64__ + case SSE2: + delete_viterbi27_port(p); + break; +#endif } } @@ -156,6 +178,11 @@ int update_viterbi27_blk(void *p,unsigned char syms[],int nbits){ update_viterbi27_blk_sse2(p,syms,nbits); break; #endif +#ifdef __x86_64__ + case SSE2: + update_viterbi27_blk_port(p,syms,nbits); + break; +#endif } return 0; } diff --git a/viterbi29.c b/viterbi29.c index 80cbb33..f51e356 100644 --- a/viterbi29.c +++ b/viterbi29.c @@ -26,6 +26,10 @@ void *create_viterbi29(int len){ case SSE2: return create_viterbi29_sse2(len); #endif +#ifdef __x86_64__ + case SSE2: + return create_viterbi29_port(len); +#endif } } @@ -51,6 +55,11 @@ void set_viterbi29_polynomial(int polys[2]){ set_viterbi29_polynomial_sse2(polys); break; #endif +#ifdef __x86_64__ + case SSE2: + set_viterbi29_polynomial_port(polys); + break; +#endif } } @@ -72,6 +81,10 @@ int init_viterbi29(void *p,int starting_state){ case SSE2: return init_viterbi29_sse2(p,starting_state); #endif +#ifdef __x86_64__ + case SSE2: + return init_viterbi29_port(p,starting_state); +#endif } } @@ -98,6 +111,10 @@ int chainback_viterbi29( case SSE2: return chainback_viterbi29_sse2(p,data,nbits,endstate); #endif +#ifdef __x86_64__ + case SSE2: + return chainback_viterbi29_port(p,data,nbits,endstate); +#endif } } @@ -124,6 +141,11 @@ void delete_viterbi29(void *p){ delete_viterbi29_sse2(p); break; #endif +#ifdef __x86_64__ + case SSE2: + delete_viterbi29_port(p); + break; +#endif } } @@ -148,5 +170,9 @@ int update_viterbi29_blk(void *p,unsigned char syms[],int nbits){ case SSE2: return update_viterbi29_blk_sse2(p,syms,nbits); #endif +#ifdef __x86_64__ + case SSE2: + return update_viterbi29_blk_port(p,syms,nbits); +#endif } } diff --git a/viterbi39.c b/viterbi39.c index ac28c2c..d2e65f4 100644 --- a/viterbi39.c +++ b/viterbi39.c @@ -26,6 +26,10 @@ void *create_viterbi39(int len){ case SSE2: return create_viterbi39_sse2(len); #endif +#ifdef __x86_64__ + case SSE2: + return create_viterbi39_port(len); +#endif } } @@ -51,6 +55,11 @@ void set_viterbi39_polynomial(int polys[3]){ set_viterbi39_polynomial_sse2(polys); break; #endif +#ifdef __x86_64__ + case SSE2: + set_viterbi39_polynomial_port(polys); + break; +#endif } } @@ -73,6 +82,10 @@ int init_viterbi39(void *p,int starting_state){ case SSE2: return init_viterbi39_sse2(p,starting_state); #endif +#ifdef __x86_64__ + case SSE2: + return init_viterbi39_port(p,starting_state); +#endif } } @@ -99,6 +112,10 @@ int chainback_viterbi39( case SSE2: return chainback_viterbi39_sse2(p,data,nbits,endstate); #endif +#ifdef __x86_64__ + case SSE2: + return chainback_viterbi39_port(p,data,nbits,endstate); +#endif } } @@ -125,6 +142,11 @@ void delete_viterbi39(void *p){ delete_viterbi39_sse2(p); break; #endif +#ifdef __x86_64__ + case SSE2: + delete_viterbi39_port(p); + break; +#endif } } @@ -149,5 +171,9 @@ int update_viterbi39_blk(void *p,unsigned char syms[],int nbits){ case SSE2: return update_viterbi39_blk_sse2(p,syms,nbits); #endif +#ifdef __x86_64__ + case SSE2: + return update_viterbi39_blk_port(p,syms,nbits); +#endif } } diff --git a/viterbi615.c b/viterbi615.c index 6dda51f..ec2fb3c 100644 --- a/viterbi615.c +++ b/viterbi615.c @@ -27,6 +27,10 @@ void *create_viterbi615(int len){ case SSE2: return create_viterbi615_sse2(len); #endif +#ifdef __x86_64__ + case SSE2: + return create_viterbi615_port(len); +#endif } } @@ -53,6 +57,11 @@ void set_viterbi615_polynomial(int polys[6]){ set_viterbi615_polynomial_sse2(polys); break; #endif +#ifdef __x86_64__ + case SSE2: + set_viterbi615_polynomial_port(polys); + break; +#endif } } @@ -74,6 +83,10 @@ int init_viterbi615(void *p,int starting_state){ case SSE2: return init_viterbi615_sse2(p,starting_state); #endif +#ifdef __x86_64__ + case SSE2: + return init_viterbi615_port(p,starting_state); +#endif } } @@ -100,6 +113,10 @@ int chainback_viterbi615( case SSE2: return chainback_viterbi615_sse2(p,data,nbits,endstate); #endif +#ifdef __x86_64__ + case SSE2: + return chainback_viterbi615_port(p,data,nbits,endstate); +#endif } } @@ -126,6 +143,11 @@ void delete_viterbi615(void *p){ delete_viterbi615_sse2(p); break; #endif +#ifdef __x86_64__ + case SSE2: + delete_viterbi615_port(p); + break; +#endif } } @@ -150,6 +172,10 @@ int update_viterbi615_blk(void *p,unsigned char syms[],int nbits){ case SSE2: return update_viterbi615_blk_sse2(p,syms,nbits); #endif +#ifdef __x86_64__ + case SSE2: + return update_viterbi615_blk_port(p,syms,nbits); +#endif } } |