summaryrefslogtreecommitdiff
path: root/firmware/asm/sh
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/asm/sh')
-rw-r--r--firmware/asm/sh/memcpy.S219
-rw-r--r--firmware/asm/sh/memmove.S222
-rw-r--r--firmware/asm/sh/memset.S109
-rw-r--r--firmware/asm/sh/strlen.S96
4 files changed, 646 insertions, 0 deletions
diff --git a/firmware/asm/sh/memcpy.S b/firmware/asm/sh/memcpy.S
new file mode 100644
index 0000000000..e23a579b05
--- /dev/null
+++ b/firmware/asm/sh/memcpy.S
@@ -0,0 +1,219 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2004-2005 by Jens Arnold
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23 .section .icode,"ax",@progbits
24
25 .align 2
26 .global _memcpy
27 .global ___memcpy_fwd_entry
28 .type _memcpy,@function
29
30/* Copies <length> bytes of data in memory from <source> to <dest>
31 * This version is optimized for speed
32 *
33 * arguments:
34 * r4 - destination address
35 * r5 - source address
36 * r6 - length
37 *
38 * return value:
39 * r0 - destination address (like ANSI version)
40 *
41 * register usage:
42 * r0 - data / scratch
43 * r1 - 2nd data / scratch
44 * r2 - scratch
45 * r3 - first long bound / adjusted end address (only if >= 11 bytes)
46 * r4 - current dest address
47 * r5 - current source address
48 * r6 - source end address
49 * r7 - stored dest start address
50 *
51 * The instruction order is devised in a way to utilize the pipelining
52 * of the SH1 to the max. The routine also tries to utilize fast page mode.
53 */
54
55_memcpy:
56 mov r4,r7 /* store dest for returning */
57___memcpy_fwd_entry:
58 add #-8,r4 /* offset for early increment (max. 2 longs) */
59 mov #11,r0
60 cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
61 add r5,r6 /* r6 = source_end */
62 bf .start_b2 /* no: jump directly to byte loop */
63
64 mov #3,r0
65 neg r5,r3
66 and r0,r3 /* r3 = (4 - align_offset) % 4 */
67 tst r3,r3 /* already aligned? */
68 bt .end_b1 /* yes: skip leading byte loop */
69
70 add r5,r3 /* r3 = first source long bound */
71
72 /* leading byte loop: copies 0..3 bytes */
73.loop_b1:
74 mov.b @r5+,r0 /* load byte & increment source addr */
75 add #1,r4 /* increment dest addr */
76 mov.b r0,@(7,r4) /* store byte */
77 cmp/hi r5,r3 /* runs r5 up to first long bound */
78 bt .loop_b1
79 /* now r5 is always at a long boundary */
80 /* -> memory reading is done in longs for all dest alignments */
81
82 /* selector for main copy loop */
83.end_b1:
84 mov #3,r1
85 and r4,r1 /* r1 = dest alignment offset */
86 mova .jmptab,r0
87 mov.b @(r0,r1),r1 /* select appropriate main loop */
88 add r0,r1
89 mov r6,r3 /* move end address to r3 */
90 jmp @r1 /* and jump to it */
91 add #-7,r3 /* adjust end addr for main loops doing 2 longs/pass */
92
93 /** main loops, copying 2 longs per pass to profit from fast page mode **/
94
95 /* long aligned destination (fastest) */
96 .align 2
97.loop_do0:
98 mov.l @r5+,r1 /* load first long & increment source addr */
99 add #16,r4 /* increment dest addr & account for decrementing stores */
100 mov.l @r5+,r0 /* load second long & increment source addr */
101 cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
102 mov.l r0,@-r4 /* store second long */
103 mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
104 bt .loop_do0
105
106 add #4,r3 /* readjust end address */
107 cmp/hi r5,r3 /* one long left? */
108 bf .start_b2 /* no, jump to trailing byte loop */
109
110 mov.l @r5+,r0 /* load last long & increment source addr */
111 add #4,r4 /* increment dest addr */
112 bra .start_b2 /* jump to trailing byte loop */
113 mov.l r0,@(4,r4) /* store last long */
114
115 /* word aligned destination (long + 2) */
116 .align 2
117.loop_do2:
118 mov.l @r5+,r1 /* load first long & increment source addr */
119 add #16,r4 /* increment dest addr */
120 mov.l @r5+,r0 /* load second long & increment source addr */
121 cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
122 mov.w r0,@-r4 /* store low word of second long */
123 xtrct r1,r0 /* extract low word of first long & high word of second long */
124 mov.l r0,@-r4 /* and store as long */
125 swap.w r1,r0 /* get high word of first long */
126 mov.w r0,@-r4 /* and store it */
127 bt .loop_do2
128
129 add #4,r3 /* readjust end address */
130 cmp/hi r5,r3 /* one long left? */
131 bf .start_b2 /* no, jump to trailing byte loop */
132
133 mov.l @r5+,r0 /* load last long & increment source addr */
134 add #4,r4 /* increment dest addr */
135 mov.w r0,@(6,r4) /* store low word */
136 shlr16 r0 /* get high word */
137 bra .start_b2 /* jump to trailing byte loop */
138 mov.w r0,@(4,r4) /* and store it */
139
140 /* jumptable for loop selector */
141 .align 2
142.jmptab:
143 .byte .loop_do0 - .jmptab /* placed in the middle because the SH1 */
144 .byte .loop_do1 - .jmptab /* loads bytes sign-extended. Otherwise */
145 .byte .loop_do2 - .jmptab /* the last loop would be out of reach */
146 .byte .loop_do3 - .jmptab /* of the offset range. */
147
148 /* byte aligned destination (long + 1) */
149 .align 2
150.loop_do1:
151 mov.l @r5+,r1 /* load first long & increment source addr */
152 add #16,r4 /* increment dest addr */
153 mov.l @r5+,r0 /* load second long & increment source addr */
154 cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
155 mov.b r0,@-r4 /* store low byte of second long */
156 shlr8 r0 /* get upper 3 bytes */
157 mov r1,r2 /* copy first long */
158 shll16 r2 /* move low byte of first long all the way up, .. */
159 shll8 r2
160 or r2,r0 /* ..combine with the 3 bytes of second long.. */
161 mov.l r0,@-r4 /* ..and store as long */
162 shlr8 r1 /* get middle 2 bytes */
163 mov.w r1,@-r4 /* store as word */
164 shlr16 r1 /* get upper byte */
165 mov.b r1,@-r4 /* and store */
166 bt .loop_do1
167
168 add #4,r3 /* readjust end address */
169.last_do13:
170 cmp/hi r5,r3 /* one long left? */
171 bf .start_b2 /* no, jump to trailing byte loop */
172
173 mov.l @r5+,r0 /* load last long & increment source addr */
174 add #12,r4 /* increment dest addr */
175 mov.b r0,@-r4 /* store low byte */
176 shlr8 r0 /* get middle 2 bytes */
177 mov.w r0,@-r4 /* store as word */
178 shlr16 r0 /* get upper byte */
179 mov.b r0,@-r4 /* and store */
180 bra .start_b2 /* jump to trailing byte loop */
181 add #-4,r4 /* readjust destination */
182
183 /* byte aligned destination (long + 3) */
184 .align 2
185.loop_do3:
186 mov.l @r5+,r1 /* load first long & increment source addr */
187 add #16,r4 /* increment dest addr */
188 mov.l @r5+,r0 /* load second long & increment source addr */
189 mov r1,r2 /* copy first long */
190 mov.b r0,@-r4 /* store low byte of second long */
191 shlr8 r0 /* get middle 2 bytes */
192 mov.w r0,@-r4 /* store as word */
193 shlr16 r0 /* get upper byte */
194 shll8 r2 /* move lower 3 bytes of first long one up.. */
195 or r2,r0 /* ..combine with the 1 byte of second long.. */
196 mov.l r0,@-r4 /* ..and store as long */
197 shlr16 r1 /* get upper byte of first long.. */
198 shlr8 r1
199 cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
200 mov.b r1,@-r4 /* ..and store */
201 bt .loop_do3
202
203 bra .last_do13 /* handle last longword: reuse routine for (long + 1) */
204 add #4,r3 /* readjust end address */
205
206 /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */
207 .align 2
208.loop_b2:
209 mov.b @r5+,r0 /* load byte & increment source addr */
210 add #1,r4 /* increment dest addr */
211 mov.b r0,@(7,r4) /* store byte */
212.start_b2:
213 cmp/hi r5,r6 /* runs r5 up to end address */
214 bt .loop_b2
215
216 rts
217 mov r7,r0 /* return dest start address */
218.end:
219 .size _memcpy,.end-_memcpy
diff --git a/firmware/asm/sh/memmove.S b/firmware/asm/sh/memmove.S
new file mode 100644
index 0000000000..d5a7160043
--- /dev/null
+++ b/firmware/asm/sh/memmove.S
@@ -0,0 +1,222 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2006 by Jens Arnold
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23 .section .icode,"ax",@progbits
24
25 .align 2
26 .global _memmove
27 .type _memmove,@function
28
29/* Moves <length> bytes of data in memory from <source> to <dest>
30 * Regions may overlap.
31 * This version is optimized for speed, and needs the corresponding memcpy
32 * implementation for the forward copy branch.
33 *
34 * arguments:
35 * r4 - destination address
36 * r5 - source address
37 * r6 - length
38 *
39 * return value:
40 * r0 - destination address (like ANSI version)
41 *
42 * register usage:
43 * r0 - data / scratch
44 * r1 - 2nd data / scratch
45 * r2 - scratch
46 * r3 - last long bound / adjusted start address (only if >= 11 bytes)
47 * r4 - current dest address
48 * r5 - source start address
49 * r6 - current source address
50 *
51 * The instruction order is devised in a way to utilize the pipelining
52 * of the SH1 to the max. The routine also tries to utilize fast page mode.
53 */
54
55_memmove:
56 cmp/hi r4,r5 /* source > destination */
57 bf .backward /* no: backward copy */
58 mov.l .memcpy_fwd,r0
59 jmp @r0
60 mov r4,r7 /* store dest for returning */
61
62 .align 2
63.memcpy_fwd:
64 .long ___memcpy_fwd_entry
65
66.backward:
67 add r6,r4 /* r4 = destination end */
68 mov #11,r0
69 cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
70 add #-8,r5 /* adjust for late decrement (max. 2 longs) */
71 add r5,r6 /* r6 = source end - 8 */
72 bf .start_b2r /* no: jump directly to byte loop */
73
74 mov #-4,r3 /* r3 = 0xfffffffc */
75 and r6,r3 /* r3 = last source long bound */
76 cmp/hi r3,r6 /* already aligned? */
77 bf .end_b1r /* yes: skip leading byte loop */
78
79.loop_b1r:
80 mov.b @(7,r6),r0 /* load byte */
81 add #-1,r6 /* decrement source addr */
82 mov.b r0,@-r4 /* store byte */
83 cmp/hi r3,r6 /* runs r6 down to last long bound */
84 bt .loop_b1r
85
86.end_b1r:
87 mov #3,r1
88 and r4,r1 /* r1 = dest alignment offset */
89 mova .jmptab_r,r0
90 mov.b @(r0,r1),r1 /* select appropriate main loop.. */
91 add r0,r1
92 mov r5,r3 /* copy start adress to r3 */
93 jmp @r1 /* ..and jump to it */
94 add #7,r3 /* adjust end addr for main loops doing 2 longs/pass */
95
96 /** main loops, copying 2 longs per pass to profit from fast page mode **/
97
98 /* long aligned destination (fastest) */
99 .align 2
100.loop_do0r:
101 mov.l @r6,r1 /* load first long */
102 add #-8,r6 /* decrement source addr */
103 mov.l @(12,r6),r0 /* load second long */
104 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
105 mov.l r0,@-r4 /* store second long */
106 mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
107 bt .loop_do0r
108
109 add #-4,r3 /* readjust end address */
110 cmp/hi r3,r6 /* first long left? */
111 bf .start_b2r /* no, jump to trailing byte loop */
112
113 mov.l @(4,r6),r0 /* load first long */
114 add #-4,r6 /* decrement source addr */
115 bra .start_b2r /* jump to trailing byte loop */
116 mov.l r0,@-r4 /* store first long */
117
118 /* word aligned destination (long + 2) */
119 .align 2
120.loop_do2r:
121 mov.l @r6,r1 /* load first long */
122 add #-8,r6 /* decrement source addr */
123 mov.l @(12,r6),r0 /* load second long */
124 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
125 mov.w r0,@-r4 /* store low word of second long */
126 xtrct r1,r0 /* extract low word of first long & high word of second long */
127 mov.l r0,@-r4 /* and store as long */
128 shlr16 r1 /* get high word of first long */
129 mov.w r1,@-r4 /* and store it */
130 bt .loop_do2r
131
132 add #-4,r3 /* readjust end address */
133 cmp/hi r3,r6 /* first long left? */
134 bf .start_b2r /* no, jump to trailing byte loop */
135
136 mov.l @(4,r6),r0 /* load first long & decrement source addr */
137 add #-4,r6 /* decrement source addr */
138 mov.w r0,@-r4 /* store low word */
139 shlr16 r0 /* get high word */
140 bra .start_b2r /* jump to trailing byte loop */
141 mov.w r0,@-r4 /* and store it */
142
143 /* jumptable for loop selector */
144 .align 2
145.jmptab_r:
146 .byte .loop_do0r - .jmptab_r /* placed in the middle because the SH1 */
147 .byte .loop_do1r - .jmptab_r /* loads bytes sign-extended. Otherwise */
148 .byte .loop_do2r - .jmptab_r /* the last loop would be out of reach */
149 .byte .loop_do3r - .jmptab_r /* of the offset range. */
150
151 /* byte aligned destination (long + 1) */
152 .align 2
153.loop_do1r:
154 mov.l @r6,r1 /* load first long */
155 add #-8,r6 /* decrement source addr */
156 mov.l @(12,r6),r0 /* load second long */
157 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
158 mov.b r0,@-r4 /* store low byte of second long */
159 shlr8 r0 /* get upper 3 bytes */
160 mov r1,r2 /* copy first long */
161 shll16 r2 /* move low byte of first long all the way up, .. */
162 shll8 r2
163 or r2,r0 /* ..combine with the 3 bytes of second long.. */
164 mov.l r0,@-r4 /* ..and store as long */
165 shlr8 r1 /* get middle 2 bytes */
166 mov.w r1,@-r4 /* store as word */
167 shlr16 r1 /* get upper byte */
168 mov.b r1,@-r4 /* and store */
169 bt .loop_do1r
170
171 add #-4,r3 /* readjust end address */
172.last_do13r:
173 cmp/hi r3,r6 /* first long left? */
174 bf .start_b2r /* no, jump to trailing byte loop */
175
176 nop /* alignment */
177 mov.l @(4,r6),r0 /* load first long */
178 add #-4,r6 /* decrement source addr */
179 mov.b r0,@-r4 /* store low byte */
180 shlr8 r0 /* get middle 2 bytes */
181 mov.w r0,@-r4 /* store as word */
182 shlr16 r0 /* get upper byte */
183 bra .start_b2r /* jump to trailing byte loop */
184 mov.b r0,@-r4 /* and store */
185
186 /* byte aligned destination (long + 3) */
187 .align 2
188.loop_do3r:
189 mov.l @r6,r1 /* load first long */
190 add #-8,r6 /* decrement source addr */
191 mov.l @(12,r6),r0 /* load second long */
192 mov r1,r2 /* copy first long */
193 mov.b r0,@-r4 /* store low byte of second long */
194 shlr8 r0 /* get middle 2 bytes */
195 mov.w r0,@-r4 /* store as word */
196 shlr16 r0 /* get upper byte */
197 shll8 r2 /* move lower 3 bytes of first long one up.. */
198 or r2,r0 /* ..combine with the 1 byte of second long.. */
199 mov.l r0,@-r4 /* ..and store as long */
200 shlr16 r1 /* get upper byte of first long */
201 shlr8 r1
202 cmp/hi r3,r6 /* runs r6 down to first or second long bound */
203 mov.b r1,@-r4 /* ..and store */
204 bt .loop_do3r
205
206 bra .last_do13r /* handle first longword: reuse routine for (long + 1) */
207 add #-4,r3 /* readjust end address */
208
209 /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */
210 .align 2
211.loop_b2r:
212 mov.b @(7,r6),r0 /* load byte */
213 add #-1,r6 /* decrement source addr */
214 mov.b r0,@-r4 /* store byte */
215.start_b2r:
216 cmp/hi r5,r6 /* runs r6 down to start address */
217 bt .loop_b2r
218
219 rts
220 mov r4,r0 /* return dest start address */
221.end:
222 .size _memmove,.end-_memmove
diff --git a/firmware/asm/sh/memset.S b/firmware/asm/sh/memset.S
new file mode 100644
index 0000000000..8cae1ea112
--- /dev/null
+++ b/firmware/asm/sh/memset.S
@@ -0,0 +1,109 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2004 by Jens Arnold
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23 .section .icode,"ax",@progbits
24
25 .align 2
26 .global _memset
27 .type _memset,@function
28
29/* Fills a memory region with specified byte value
30 * This version is optimized for speed
31 *
32 * arguments:
33 * r4 - start address
34 * r5 - data
35 * r6 - length
36 *
37 * return value:
38 * r0 - start address (like ANSI version)
39 *
40 * register usage:
41 * r0 - temporary
42 * r1 - start address +11 for main loop
43 * r4 - start address
44 * r5 - data (spread to all 4 bytes when using long stores)
45 * r6 - current address (runs down from end to start)
46 *
47 * The instruction order below is devised in a way to utilize the pipelining
48 * of the SH1 to the max. The routine fills memory from end to start in
49 * order to utilize the auto-decrementing store instructions.
50 */
51
52_memset:
53 neg r4,r0
54 and #3,r0 /* r0 = (4 - align_offset) % 4 */
55 add #4,r0
56 cmp/hs r0,r6 /* at least one aligned longword to fill? */
57 add r4,r6 /* r6 = end_address */
58 bf .no_longs /* no, jump directly to byte loop */
59
60 extu.b r5,r5 /* start: spread data to all 4 bytes */
61 swap.b r5,r0
62 or r0,r5 /* data now in 2 lower bytes of r5 */
63 swap.w r5,r0
64 or r0,r5 /* data now in all 4 bytes of r5 */
65
66 mov r6,r0
67 tst #3,r0 /* r0 already long aligned? */
68 bt .end_b1 /* yes: skip loop */
69
70 /* leading byte loop: sets 0..3 bytes */
71.loop_b1:
72 mov.b r5,@-r0 /* store byte */
73 tst #3,r0 /* r0 long aligned? */
74 bf .loop_b1 /* runs r0 down until long aligned */
75
76 mov r0,r6 /* r6 = last long bound */
77 nop /* keep alignment */
78
79.end_b1:
80 mov r4,r1 /* r1 = start_address... */
81 add #11,r1 /* ... + 11, combined for rounding and offset */
82 xor r1,r0
83 tst #4,r0 /* bit 2 tells whether an even or odd number of */
84 bf .loop_odd /* longwords to set */
85
86 /* main loop: set 2 longs per pass */
87.loop_2l:
88 mov.l r5,@-r6 /* store first long */
89.loop_odd:
90 cmp/hi r1,r6 /* runs r6 down to first long bound */
91 mov.l r5,@-r6 /* store second long */
92 bt .loop_2l
93
94.no_longs:
95 cmp/hi r4,r6 /* any bytes left? */
96 bf .end_b2 /* no: skip loop */
97
98 /* trailing byte loop */
99.loop_b2:
100 mov.b r5,@-r6 /* store byte */
101 cmp/hi r4,r6 /* runs r6 down to the start address */
102 bt .loop_b2
103
104.end_b2:
105 rts
106 mov r4,r0 /* return start address */
107
108.end:
109 .size _memset,.end-_memset
diff --git a/firmware/asm/sh/strlen.S b/firmware/asm/sh/strlen.S
new file mode 100644
index 0000000000..e7169e25db
--- /dev/null
+++ b/firmware/asm/sh/strlen.S
@@ -0,0 +1,96 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2005 by Jens Arnold
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23 .section .icode,"ax",@progbits
24
25 .align 2
26 .global _strlen
27 .type _strlen,@function
28
29/* Works out the length of a string
30 * This version is optimized for speed
31 *
32 * arguments:
33 * r4 - start address
34 *
35 * return value:
36 * r0 - string length
37 *
38 * register usage:
39 * r0 - current address
40 * r1 - current value (byte/long)
41 * r2 - mask for alignment / zero (for cmp/str)
42 * r4 - start address
43 *
44 */
45
46_strlen:
47 mov r4,r0 /* r0 = start address */
48 tst #3,r0 /* long aligned? */
49 bt .start_l /* yes, jump directly to the longword loop */
50
51 /* not long aligned: check the first 3 bytes */
52 mov.b @r0+,r1 /* fetch first byte */
53 tst r1,r1 /* byte == 0 ? */
54 bt .hitzero /* yes, string end found */
55 mov.b @r0+,r1 /* fetch second byte */
56 mov #3,r2 /* prepare mask: r2 = 0..00000011b */
57 tst r1,r1 /* byte == 0 ? */
58 bt .hitzero /* yes, string end found */
59 mov.b @r0+,r1 /* fetch third byte */
60 not r2,r2 /* prepare mask: r2 = 1..11111100b */
61 tst r1,r1 /* byte == 0 ? */
62 bt .hitzero /* yes, string end found */
63
64 /* not yet found, fall through into longword loop */
65 and r2,r0 /* align down to long bound */
66
67 /* main loop: check longwords */
68.start_l:
69 mov #0,r2 /* zero longword for cmp/str */
70.loop_l:
71 mov.l @r0+,r1 /* fetch long word */
72 cmp/str r1,r2 /* any zero byte within? */
73 bf .loop_l /* no, loop */
74 add #-4,r0 /* set address back to start of this longword */
75
76 /* the last longword contains the string end: figure out the byte */
77 mov.b @r0+,r1 /* fetch first byte */
78 tst r1,r1 /* byte == 0 ? */
79 bt .hitzero /* yes, string end found */
80 mov.b @r0+,r1 /* fetch second byte */
81 tst r1,r1 /* byte == 0 ? */
82 bt .hitzero /* yes, string end found */
83 mov.b @r0+,r1 /* fetch third byte */
84 tst r1,r1 /* byte == 0 ? */
85 bt .hitzero /* yes, string end found */
86 rts /* must be the fourth byte */
87 sub r4,r0 /* len = string_end - string_start */
88
89.hitzero:
90 add #-1,r0 /* undo address increment */
91 rts
92 sub r4,r0 /* len = string_end - string_start */
93
94.end:
95 .size _strlen,.end-_strlen
96