diff options
author | Nils Wallménius <nils@rockbox.org> | 2013-05-20 22:25:57 +0200 |
---|---|---|
committer | Nils Wallménius <nils@rockbox.org> | 2013-08-31 08:30:51 +0200 |
commit | 580b307fd791c0997a8831bc800bba87797bfb7e (patch) | |
tree | 807846056f06fd944a750ce41217a877910ebd59 /lib/rbcodec/codecs/libopus/celt/fixed_generic.h | |
parent | 74761b70acd96cecc0d35450dd56a98ad9ee7d3d (diff) | |
download | rockbox-580b307fd791c0997a8831bc800bba87797bfb7e.tar.gz rockbox-580b307fd791c0997a8831bc800bba87797bfb7e.zip |
Sync opus codec to upstream git
Sync opus codec to upstream commit
02fed471a4568852d6618e041c4f2af0d7730ee2 (August 30 2013)
This brings in a lot of optimizations but also makes the diff
between our codec and the upstream much smaller as most of our
optimizations have been upstreamed or supeceded.
Speedups across the board for CELT mode files:
64kbps 128kbps
H300 9.82MHz 15.48MHz
c200 4.86MHz 9.63MHz
fuze v1 10.32MHz 15.92MHz
For the silk mode test file (16kbps) arm targets get a speedup
of about 2MHz while the H300 is 7.8MHz slower, likely because it's
now using the pseudostack more rather than the real stack which
is in iram. Patches to get around that are upcomming.
Change-Id: Ifecf963e461c51ac42e09dac1e91bc4bc3b12fa3
Diffstat (limited to 'lib/rbcodec/codecs/libopus/celt/fixed_generic.h')
-rw-r--r-- | lib/rbcodec/codecs/libopus/celt/fixed_generic.h | 62 |
1 files changed, 7 insertions, 55 deletions
diff --git a/lib/rbcodec/codecs/libopus/celt/fixed_generic.h b/lib/rbcodec/codecs/libopus/celt/fixed_generic.h index 28a1598d3e..0e77976e83 100644 --- a/lib/rbcodec/codecs/libopus/celt/fixed_generic.h +++ b/lib/rbcodec/codecs/libopus/celt/fixed_generic.h | |||
@@ -42,64 +42,12 @@ | |||
42 | /** 16x32 multiplication, followed by a 16-bit shift right (round-to-nearest). Results fits in 32 bits */ | 42 | /** 16x32 multiplication, followed by a 16-bit shift right (round-to-nearest). Results fits in 32 bits */ |
43 | #define MULT16_32_P16(a,b) ADD32(MULT16_16((a),SHR((b),16)), PSHR(MULT16_16((a),((b)&0x0000ffff)),16)) | 43 | #define MULT16_32_P16(a,b) ADD32(MULT16_16((a),SHR((b),16)), PSHR(MULT16_16((a),((b)&0x0000ffff)),16)) |
44 | 44 | ||
45 | #if defined(CPU_COLDFIRE) | ||
46 | static inline int32_t MULT16_32_Q15(int32_t a, int32_t b) | ||
47 | { | ||
48 | int32_t r; | ||
49 | asm volatile ("mac.l %[a], %[b], %%acc0;" | ||
50 | "movclr.l %%acc0, %[r];" | ||
51 | : [r] "=r" (r) | ||
52 | : [a] "r" (a<<16), [b] "r" (b) | ||
53 | : "cc"); | ||
54 | return r; | ||
55 | } | ||
56 | |||
57 | #elif defined(CPU_ARM) | ||
58 | static inline int32_t MULT16_32_Q15(int32_t a, int32_t b) | ||
59 | { | ||
60 | int32_t lo, hi; | ||
61 | asm volatile("smull %[lo], %[hi], %[b], %[a] \n\t" | ||
62 | "mov %[lo], %[lo], lsr #15 \n\t" | ||
63 | "orr %[hi], %[lo], %[hi], lsl #17 \n\t" | ||
64 | : [lo] "=&r" (lo), [hi] "=&r" (hi) | ||
65 | : [a] "r" (a), [b] "r" (b) ); | ||
66 | return(hi); | ||
67 | } | ||
68 | |||
69 | #else | ||
70 | /** 16x32 multiplication, followed by a 15-bit shift right. Results fits in 32 bits */ | 45 | /** 16x32 multiplication, followed by a 15-bit shift right. Results fits in 32 bits */ |
71 | #define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),((b)&0x0000ffff)),15)) | 46 | #define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),((b)&0x0000ffff)),15)) |
72 | #endif | ||
73 | 47 | ||
74 | #if defined(CPU_COLDFIRE) | ||
75 | static inline int32_t MULT32_32_Q31(int32_t a, int32_t b) | ||
76 | { | ||
77 | int32_t r; | ||
78 | asm volatile ("mac.l %[a], %[b], %%acc0;" | ||
79 | "movclr.l %%acc0, %[r];" | ||
80 | : [r] "=r" (r) | ||
81 | : [a] "r" (a), [b] "r" (b) | ||
82 | : "cc"); | ||
83 | return r; | ||
84 | } | ||
85 | |||
86 | #elif defined(CPU_ARM) | ||
87 | static inline int32_t MULT32_32_Q31(int32_t a, int32_t b) | ||
88 | { | ||
89 | int32_t lo, hi; | ||
90 | asm volatile("smull %[lo], %[hi], %[a], %[b] \n\t" | ||
91 | "mov %[lo], %[lo], lsr #31 \n\t" | ||
92 | "orr %[hi], %[lo], %[hi], lsl #1 \n\t" | ||
93 | : [lo] "=&r" (lo), [hi] "=&r" (hi) | ||
94 | : [a] "r" (a), [b] "r" (b) ); | ||
95 | return(hi); | ||
96 | } | ||
97 | |||
98 | #else | ||
99 | /** 32x32 multiplication, followed by a 31-bit shift right. Results fits in 32 bits */ | 48 | /** 32x32 multiplication, followed by a 31-bit shift right. Results fits in 32 bits */ |
100 | //#define MULT32_32_Q31(a,b) ADD32(ADD32(SHL(MULT16_16(SHR((a),16),SHR((b),16)),1), SHR(MULT16_16SU(SHR((a),16),((b)&0x0000ffff)),15)), SHR(MULT16_16SU(SHR((b),16),((a)&0x0000ffff)),15)) | 49 | #define MULT32_32_Q31(a,b) ADD32(ADD32(SHL(MULT16_16(SHR((a),16),SHR((b),16)),1), SHR(MULT16_16SU(SHR((a),16),((b)&0x0000ffff)),15)), SHR(MULT16_16SU(SHR((b),16),((a)&0x0000ffff)),15)) |
101 | #define MULT32_32_Q31(a,b) (opus_val32)((((int64_t)(a)) * ((int64_t)(b)))>>31) | 50 | |
102 | #endif | ||
103 | /** Compile-time conversion of float constant to 16-bit value */ | 51 | /** Compile-time conversion of float constant to 16-bit value */ |
104 | #define QCONST16(x,bits) ((opus_val16)(.5+(x)*(((opus_val32)1)<<(bits)))) | 52 | #define QCONST16(x,bits) ((opus_val16)(.5+(x)*(((opus_val32)1)<<(bits)))) |
105 | 53 | ||
@@ -136,6 +84,8 @@ static inline int32_t MULT32_32_Q31(int32_t a, int32_t b) | |||
136 | #define PSHR(a,shift) (SHR((a)+((EXTEND32(1)<<((shift))>>1)),shift)) | 84 | #define PSHR(a,shift) (SHR((a)+((EXTEND32(1)<<((shift))>>1)),shift)) |
137 | #define SATURATE(x,a) (((x)>(a) ? (a) : (x)<-(a) ? -(a) : (x))) | 85 | #define SATURATE(x,a) (((x)>(a) ? (a) : (x)<-(a) ? -(a) : (x))) |
138 | 86 | ||
87 | #define SATURATE16(x) (EXTRACT16((x)>32767 ? 32767 : (x)<-32768 ? -32768 : (x))) | ||
88 | |||
139 | /** Shift by a and round-to-neareast 32-bit value. Result is a 16-bit value */ | 89 | /** Shift by a and round-to-neareast 32-bit value. Result is a 16-bit value */ |
140 | #define ROUND16(x,a) (EXTRACT16(PSHR32((x),(a)))) | 90 | #define ROUND16(x,a) (EXTRACT16(PSHR32((x),(a)))) |
141 | /** Divide by two */ | 91 | /** Divide by two */ |
@@ -160,7 +110,9 @@ static inline int32_t MULT32_32_Q31(int32_t a, int32_t b) | |||
160 | 110 | ||
161 | /** 16x16 multiply-add where the result fits in 32 bits */ | 111 | /** 16x16 multiply-add where the result fits in 32 bits */ |
162 | #define MAC16_16(c,a,b) (ADD32((c),MULT16_16((a),(b)))) | 112 | #define MAC16_16(c,a,b) (ADD32((c),MULT16_16((a),(b)))) |
163 | /** 16x32 multiply-add, followed by a 15-bit shift right. Results fits in 32 bits */ | 113 | /** 16x32 multiply, followed by a 15-bit shift right and 32-bit add. |
114 | b must fit in 31 bits. | ||
115 | Result fits in 32 bits. */ | ||
164 | #define MAC16_32_Q15(c,a,b) ADD32(c,ADD32(MULT16_16((a),SHR((b),15)), SHR(MULT16_16((a),((b)&0x00007fff)),15))) | 116 | #define MAC16_32_Q15(c,a,b) ADD32(c,ADD32(MULT16_16((a),SHR((b),15)), SHR(MULT16_16((a),((b)&0x00007fff)),15))) |
165 | 117 | ||
166 | #define MULT16_16_Q11_32(a,b) (SHR(MULT16_16((a),(b)),11)) | 118 | #define MULT16_16_Q11_32(a,b) (SHR(MULT16_16((a),(b)),11)) |