summaryrefslogtreecommitdiff
path: root/firmware/target/sh/memmove-sh.S
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/sh/memmove-sh.S')
-rwxr-xr-xfirmware/target/sh/memmove-sh.S220
1 files changed, 220 insertions, 0 deletions
diff --git a/firmware/target/sh/memmove-sh.S b/firmware/target/sh/memmove-sh.S
new file mode 100755
index 0000000000..9ae9ae5fa2
--- /dev/null
+++ b/firmware/target/sh/memmove-sh.S
@@ -0,0 +1,220 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2006 by Jens Arnold
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#include "config.h"
20
21 .section .icode,"ax",@progbits
22
23 .align 2
24 .global _memmove
25 .type _memmove,@function
26
27/* Moves <length> bytes of data in memory from <source> to <dest>
28 * Regions may overlap.
29 * This version is optimized for speed, and needs the corresponding memcpy
30 * implementation for the forward copy branch.
31 *
32 * arguments:
33 * r4 - destination address
34 * r5 - source address
35 * r6 - length
36 *
37 * return value:
38 * r0 - destination address (like ANSI version)
39 *
40 * register usage:
41 * r0 - data / scratch
42 * r1 - 2nd data / scratch
43 * r2 - scratch
44 * r3 - last long bound / adjusted start address (only if >= 11 bytes)
45 * r4 - current dest address
46 * r5 - source start address
47 * r6 - current source address
48 *
49 * The instruction order is devised in a way to utilize the pipelining
50 * of the SH1 to the max. The routine also tries to utilize fast page mode.
51 */
52
53_memmove:
54 cmp/hi r4,r5 /* source > destination */
55 bf .backward /* no: backward copy */
56 mov.l .memcpy_fwd,r0
57 jmp @r0
58 mov r4,r7 /* store dest for returning */
59
60 .align 2
61.memcpy_fwd:
62 .long ___memcpy_fwd_entry
63
64.backward:
65 add r6,r4 /* r4 = destination end */
66 mov #11,r0
67 cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
68 add #-8,r5 /* adjust for late decrement (max. 2 longs) */
69 add r5,r6 /* r6 = source end - 8 */
70 bf .start_b2r /* no: jump directly to byte loop */
71
72 mov #-4,r3 /* r3 = 0xfffffffc */
73 and r6,r3 /* r3 = last source long bound */
74 cmp/hi r3,r6 /* already aligned? */
75 bf .end_b1r /* yes: skip leading byte loop */
76
77.loop_b1r:
78 mov.b @(7,r6),r0 /* load byte */
79 add #-1,r6 /* decrement source addr */
80 mov.b r0,@-r4 /* store byte */
81 cmp/hi r3,r6 /* runs r6 down to last long bound */
82 bt .loop_b1r
83
84.end_b1r:
85 mov #3,r1
86 and r4,r1 /* r1 = dest alignment offset */
87 mova .jmptab_r,r0
88 mov.b @(r0,r1),r1 /* select appropriate main loop.. */
89 add r0,r1
90 mov r5,r3 /* copy start adress to r3 */
91 jmp @r1 /* ..and jump to it */
92 add #7,r3 /* adjust end addr for main loops doing 2 longs/pass */
93
94 /** main loops, copying 2 longs per pass to profit from fast page mode **/
95
96 /* long aligned destination (fastest) */
97 .align 2
98.loop_do0r:
99 mov.l @r6,r1 /* load first long */
100 add #-8,r6 /* decrement source addr */
101 mov.l @(12,r6),r0 /* load second long */
102 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
103 mov.l r0,@-r4 /* store second long */
104 mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
105 bt .loop_do0r
106
107 add #-4,r3 /* readjust end address */
108 cmp/hi r3,r6 /* first long left? */
109 bf .start_b2r /* no, jump to trailing byte loop */
110
111 mov.l @(4,r6),r0 /* load first long */
112 add #-4,r6 /* decrement source addr */
113 bra .start_b2r /* jump to trailing byte loop */
114 mov.l r0,@-r4 /* store first long */
115
116 /* word aligned destination (long + 2) */
117 .align 2
118.loop_do2r:
119 mov.l @r6,r1 /* load first long */
120 add #-8,r6 /* decrement source addr */
121 mov.l @(12,r6),r0 /* load second long */
122 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
123 mov.w r0,@-r4 /* store low word of second long */
124 xtrct r1,r0 /* extract low word of first long & high word of second long */
125 mov.l r0,@-r4 /* and store as long */
126 shlr16 r1 /* get high word of first long */
127 mov.w r1,@-r4 /* and store it */
128 bt .loop_do2r
129
130 add #-4,r3 /* readjust end address */
131 cmp/hi r3,r6 /* first long left? */
132 bf .start_b2r /* no, jump to trailing byte loop */
133
134 mov.l @(4,r6),r0 /* load first long & decrement source addr */
135 add #-4,r6 /* decrement source addr */
136 mov.w r0,@-r4 /* store low word */
137 shlr16 r0 /* get high word */
138 bra .start_b2r /* jump to trailing byte loop */
139 mov.w r0,@-r4 /* and store it */
140
141 /* jumptable for loop selector */
142 .align 2
143.jmptab_r:
144 .byte .loop_do0r - .jmptab_r /* placed in the middle because the SH1 */
145 .byte .loop_do1r - .jmptab_r /* loads bytes sign-extended. Otherwise */
146 .byte .loop_do2r - .jmptab_r /* the last loop would be out of reach */
147 .byte .loop_do3r - .jmptab_r /* of the offset range. */
148
149 /* byte aligned destination (long + 1) */
150 .align 2
151.loop_do1r:
152 mov.l @r6,r1 /* load first long */
153 add #-8,r6 /* decrement source addr */
154 mov.l @(12,r6),r0 /* load second long */
155 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
156 mov.b r0,@-r4 /* store low byte of second long */
157 shlr8 r0 /* get upper 3 bytes */
158 mov r1,r2 /* copy first long */
159 shll16 r2 /* move low byte of first long all the way up, .. */
160 shll8 r2
161 or r2,r0 /* ..combine with the 3 bytes of second long.. */
162 mov.l r0,@-r4 /* ..and store as long */
163 shlr8 r1 /* get middle 2 bytes */
164 mov.w r1,@-r4 /* store as word */
165 shlr16 r1 /* get upper byte */
166 mov.b r1,@-r4 /* and store */
167 bt .loop_do1r
168
169 add #-4,r3 /* readjust end address */
170.last_do13r:
171 cmp/hi r3,r6 /* first long left? */
172 bf .start_b2r /* no, jump to trailing byte loop */
173
174 nop /* alignment */
175 mov.l @(4,r6),r0 /* load first long */
176 add #-4,r6 /* decrement source addr */
177 mov.b r0,@-r4 /* store low byte */
178 shlr8 r0 /* get middle 2 bytes */
179 mov.w r0,@-r4 /* store as word */
180 shlr16 r0 /* get upper byte */
181 bra .start_b2r /* jump to trailing byte loop */
182 mov.b r0,@-r4 /* and store */
183
184 /* byte aligned destination (long + 3) */
185 .align 2
186.loop_do3r:
187 mov.l @r6,r1 /* load first long */
188 add #-8,r6 /* decrement source addr */
189 mov.l @(12,r6),r0 /* load second long */
190 mov r1,r2 /* copy first long */
191 mov.b r0,@-r4 /* store low byte of second long */
192 shlr8 r0 /* get middle 2 bytes */
193 mov.w r0,@-r4 /* store as word */
194 shlr16 r0 /* get upper byte */
195 shll8 r2 /* move lower 3 bytes of first long one up.. */
196 or r2,r0 /* ..combine with the 1 byte of second long.. */
197 mov.l r0,@-r4 /* ..and store as long */
198 shlr16 r1 /* get upper byte of first long */
199 shlr8 r1
200 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
201 mov.b r1,@-r4 /* ..and store */
202 bt .loop_do3r
203
204 bra .last_do13r /* handle first longword: reuse routine for (long + 1) */
205 add #-4,r3 /* readjust end address */
206
207 /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */
208 .align 2
209.loop_b2r:
210 mov.b @(7,r6),r0 /* load byte */
211 add #-1,r6 /* decrement source addr */
212 mov.b r0,@-r4 /* store byte */
213.start_b2r:
214 cmp/hi r5,r6 /* runs r6 down to start address */
215 bt .loop_b2r
216
217 rts
218 mov r4,r0 /* return dest start address */
219.end:
220 .size _memmove,.end-_memmove