diff options
Diffstat (limited to 'firmware/asm/sh/memcpy.S')
-rw-r--r-- | firmware/asm/sh/memcpy.S | 219 |
1 files changed, 219 insertions, 0 deletions
diff --git a/firmware/asm/sh/memcpy.S b/firmware/asm/sh/memcpy.S new file mode 100644 index 0000000000..e23a579b05 --- /dev/null +++ b/firmware/asm/sh/memcpy.S | |||
@@ -0,0 +1,219 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2004-2005 by Jens Arnold | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | #include "config.h" | ||
22 | |||
23 | .section .icode,"ax",@progbits | ||
24 | |||
25 | .align 2 | ||
26 | .global _memcpy | ||
27 | .global ___memcpy_fwd_entry | ||
28 | .type _memcpy,@function | ||
29 | |||
30 | /* Copies <length> bytes of data in memory from <source> to <dest> | ||
31 | * This version is optimized for speed | ||
32 | * | ||
33 | * arguments: | ||
34 | * r4 - destination address | ||
35 | * r5 - source address | ||
36 | * r6 - length | ||
37 | * | ||
38 | * return value: | ||
39 | * r0 - destination address (like ANSI version) | ||
40 | * | ||
41 | * register usage: | ||
42 | * r0 - data / scratch | ||
43 | * r1 - 2nd data / scratch | ||
44 | * r2 - scratch | ||
45 | * r3 - first long bound / adjusted end address (only if >= 11 bytes) | ||
46 | * r4 - current dest address | ||
47 | * r5 - current source address | ||
48 | * r6 - source end address | ||
49 | * r7 - stored dest start address | ||
50 | * | ||
51 | * The instruction order is devised in a way to utilize the pipelining | ||
52 | * of the SH1 to the max. The routine also tries to utilize fast page mode. | ||
53 | */ | ||
54 | |||
55 | _memcpy: | ||
56 | mov r4,r7 /* store dest for returning */ | ||
57 | ___memcpy_fwd_entry: | ||
58 | add #-8,r4 /* offset for early increment (max. 2 longs) */ | ||
59 | mov #11,r0 | ||
60 | cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */ | ||
61 | add r5,r6 /* r6 = source_end */ | ||
62 | bf .start_b2 /* no: jump directly to byte loop */ | ||
63 | |||
64 | mov #3,r0 | ||
65 | neg r5,r3 | ||
66 | and r0,r3 /* r3 = (4 - align_offset) % 4 */ | ||
67 | tst r3,r3 /* already aligned? */ | ||
68 | bt .end_b1 /* yes: skip leading byte loop */ | ||
69 | |||
70 | add r5,r3 /* r3 = first source long bound */ | ||
71 | |||
72 | /* leading byte loop: copies 0..3 bytes */ | ||
73 | .loop_b1: | ||
74 | mov.b @r5+,r0 /* load byte & increment source addr */ | ||
75 | add #1,r4 /* increment dest addr */ | ||
76 | mov.b r0,@(7,r4) /* store byte */ | ||
77 | cmp/hi r5,r3 /* runs r5 up to first long bound */ | ||
78 | bt .loop_b1 | ||
79 | /* now r5 is always at a long boundary */ | ||
80 | /* -> memory reading is done in longs for all dest alignments */ | ||
81 | |||
82 | /* selector for main copy loop */ | ||
83 | .end_b1: | ||
84 | mov #3,r1 | ||
85 | and r4,r1 /* r1 = dest alignment offset */ | ||
86 | mova .jmptab,r0 | ||
87 | mov.b @(r0,r1),r1 /* select appropriate main loop */ | ||
88 | add r0,r1 | ||
89 | mov r6,r3 /* move end address to r3 */ | ||
90 | jmp @r1 /* and jump to it */ | ||
91 | add #-7,r3 /* adjust end addr for main loops doing 2 longs/pass */ | ||
92 | |||
93 | /** main loops, copying 2 longs per pass to profit from fast page mode **/ | ||
94 | |||
95 | /* long aligned destination (fastest) */ | ||
96 | .align 2 | ||
97 | .loop_do0: | ||
98 | mov.l @r5+,r1 /* load first long & increment source addr */ | ||
99 | add #16,r4 /* increment dest addr & account for decrementing stores */ | ||
100 | mov.l @r5+,r0 /* load second long & increment source addr */ | ||
101 | cmp/hi r5,r3 /* runs r5 up to last or second last long bound */ | ||
102 | mov.l r0,@-r4 /* store second long */ | ||
103 | mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */ | ||
104 | bt .loop_do0 | ||
105 | |||
106 | add #4,r3 /* readjust end address */ | ||
107 | cmp/hi r5,r3 /* one long left? */ | ||
108 | bf .start_b2 /* no, jump to trailing byte loop */ | ||
109 | |||
110 | mov.l @r5+,r0 /* load last long & increment source addr */ | ||
111 | add #4,r4 /* increment dest addr */ | ||
112 | bra .start_b2 /* jump to trailing byte loop */ | ||
113 | mov.l r0,@(4,r4) /* store last long */ | ||
114 | |||
115 | /* word aligned destination (long + 2) */ | ||
116 | .align 2 | ||
117 | .loop_do2: | ||
118 | mov.l @r5+,r1 /* load first long & increment source addr */ | ||
119 | add #16,r4 /* increment dest addr */ | ||
120 | mov.l @r5+,r0 /* load second long & increment source addr */ | ||
121 | cmp/hi r5,r3 /* runs r5 up to last or second last long bound */ | ||
122 | mov.w r0,@-r4 /* store low word of second long */ | ||
123 | xtrct r1,r0 /* extract low word of first long & high word of second long */ | ||
124 | mov.l r0,@-r4 /* and store as long */ | ||
125 | swap.w r1,r0 /* get high word of first long */ | ||
126 | mov.w r0,@-r4 /* and store it */ | ||
127 | bt .loop_do2 | ||
128 | |||
129 | add #4,r3 /* readjust end address */ | ||
130 | cmp/hi r5,r3 /* one long left? */ | ||
131 | bf .start_b2 /* no, jump to trailing byte loop */ | ||
132 | |||
133 | mov.l @r5+,r0 /* load last long & increment source addr */ | ||
134 | add #4,r4 /* increment dest addr */ | ||
135 | mov.w r0,@(6,r4) /* store low word */ | ||
136 | shlr16 r0 /* get high word */ | ||
137 | bra .start_b2 /* jump to trailing byte loop */ | ||
138 | mov.w r0,@(4,r4) /* and store it */ | ||
139 | |||
140 | /* jumptable for loop selector */ | ||
141 | .align 2 | ||
142 | .jmptab: | ||
143 | .byte .loop_do0 - .jmptab /* placed in the middle because the SH1 */ | ||
144 | .byte .loop_do1 - .jmptab /* loads bytes sign-extended. Otherwise */ | ||
145 | .byte .loop_do2 - .jmptab /* the last loop would be out of reach */ | ||
146 | .byte .loop_do3 - .jmptab /* of the offset range. */ | ||
147 | |||
148 | /* byte aligned destination (long + 1) */ | ||
149 | .align 2 | ||
150 | .loop_do1: | ||
151 | mov.l @r5+,r1 /* load first long & increment source addr */ | ||
152 | add #16,r4 /* increment dest addr */ | ||
153 | mov.l @r5+,r0 /* load second long & increment source addr */ | ||
154 | cmp/hi r5,r3 /* runs r5 up to last or second last long bound */ | ||
155 | mov.b r0,@-r4 /* store low byte of second long */ | ||
156 | shlr8 r0 /* get upper 3 bytes */ | ||
157 | mov r1,r2 /* copy first long */ | ||
158 | shll16 r2 /* move low byte of first long all the way up, .. */ | ||
159 | shll8 r2 | ||
160 | or r2,r0 /* ..combine with the 3 bytes of second long.. */ | ||
161 | mov.l r0,@-r4 /* ..and store as long */ | ||
162 | shlr8 r1 /* get middle 2 bytes */ | ||
163 | mov.w r1,@-r4 /* store as word */ | ||
164 | shlr16 r1 /* get upper byte */ | ||
165 | mov.b r1,@-r4 /* and store */ | ||
166 | bt .loop_do1 | ||
167 | |||
168 | add #4,r3 /* readjust end address */ | ||
169 | .last_do13: | ||
170 | cmp/hi r5,r3 /* one long left? */ | ||
171 | bf .start_b2 /* no, jump to trailing byte loop */ | ||
172 | |||
173 | mov.l @r5+,r0 /* load last long & increment source addr */ | ||
174 | add #12,r4 /* increment dest addr */ | ||
175 | mov.b r0,@-r4 /* store low byte */ | ||
176 | shlr8 r0 /* get middle 2 bytes */ | ||
177 | mov.w r0,@-r4 /* store as word */ | ||
178 | shlr16 r0 /* get upper byte */ | ||
179 | mov.b r0,@-r4 /* and store */ | ||
180 | bra .start_b2 /* jump to trailing byte loop */ | ||
181 | add #-4,r4 /* readjust destination */ | ||
182 | |||
183 | /* byte aligned destination (long + 3) */ | ||
184 | .align 2 | ||
185 | .loop_do3: | ||
186 | mov.l @r5+,r1 /* load first long & increment source addr */ | ||
187 | add #16,r4 /* increment dest addr */ | ||
188 | mov.l @r5+,r0 /* load second long & increment source addr */ | ||
189 | mov r1,r2 /* copy first long */ | ||
190 | mov.b r0,@-r4 /* store low byte of second long */ | ||
191 | shlr8 r0 /* get middle 2 bytes */ | ||
192 | mov.w r0,@-r4 /* store as word */ | ||
193 | shlr16 r0 /* get upper byte */ | ||
194 | shll8 r2 /* move lower 3 bytes of first long one up.. */ | ||
195 | or r2,r0 /* ..combine with the 1 byte of second long.. */ | ||
196 | mov.l r0,@-r4 /* ..and store as long */ | ||
197 | shlr16 r1 /* get upper byte of first long.. */ | ||
198 | shlr8 r1 | ||
199 | cmp/hi r5,r3 /* runs r5 up to last or second last long bound */ | ||
200 | mov.b r1,@-r4 /* ..and store */ | ||
201 | bt .loop_do3 | ||
202 | |||
203 | bra .last_do13 /* handle last longword: reuse routine for (long + 1) */ | ||
204 | add #4,r3 /* readjust end address */ | ||
205 | |||
206 | /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */ | ||
207 | .align 2 | ||
208 | .loop_b2: | ||
209 | mov.b @r5+,r0 /* load byte & increment source addr */ | ||
210 | add #1,r4 /* increment dest addr */ | ||
211 | mov.b r0,@(7,r4) /* store byte */ | ||
212 | .start_b2: | ||
213 | cmp/hi r5,r6 /* runs r5 up to end address */ | ||
214 | bt .loop_b2 | ||
215 | |||
216 | rts | ||
217 | mov r7,r0 /* return dest start address */ | ||
218 | .end: | ||
219 | .size _memcpy,.end-_memcpy | ||