summaryrefslogtreecommitdiff
path: root/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'firmware')
-rw-r--r--firmware/common/memset_a.S55
1 files changed, 27 insertions, 28 deletions
diff --git a/firmware/common/memset_a.S b/firmware/common/memset_a.S
index 96fece4421..6dbdab9595 100644
--- a/firmware/common/memset_a.S
+++ b/firmware/common/memset_a.S
@@ -241,30 +241,29 @@ memset:
241 241
242#elif defined(CPU_ARM) 242#elif defined(CPU_ARM)
243 243
244/* The following code is taken from the Linux kernel version 2.6.15.3 244/* The following code is based on code found in Linux kernel version 2.6.15.3
245 * linux/arch/arm/lib/memset.S 245 * linux/arch/arm/lib/memset.S
246 * 246 *
247 * Copyright (C) 1995-2000 Russell King 247 * Copyright (C) 1995-2000 Russell King
248 */ 248 */
249 249
250@ .word 0 250/* This code will align a pointer for memset, if needed */
2511: subs r2, r2, #4 @ 1 do we have enough 2511: cmp r2, #4 @ 1 do we have enough
252 blt 5f @ 1 bytes to align with? 252 blt 5f @ 1 bytes to align with?
253 cmp r3, #2 @ 1 253 cmp r3, #2 @ 1
254 strltb r1, [r0], #1 @ 1 254 strgtb r1, [r0, #-1]! @ 1
255 strleb r1, [r0], #1 @ 1 255 strgeb r1, [r0, #-1]! @ 1
256 strb r1, [r0], #1 @ 1 256 strb r1, [r0, #-1]! @ 1
257 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 257 sub r2, r2, r3 @ 1 r2 = r2 - r3
258/* 258 b 2f
259 * The pointer is now aligned and the length is adjusted. Try doing the
260 * memzero again.
261 */
262 259
263 .global memset 260 .global memset
264 .type memset,%function 261 .type memset,%function
265memset: 262memset:
263 add r0, r0, r2 @ we'll write backwards in memory
266 ands r3, r0, #3 @ 1 unaligned? 264 ands r3, r0, #3 @ 1 unaligned?
267 bne 1b @ 1 265 bne 1b @ 1
2662:
268/* 267/*
269 * we know that the pointer in r0 is aligned to a word boundary. 268 * we know that the pointer in r0 is aligned to a word boundary.
270 */ 269 */
@@ -272,7 +271,7 @@ memset:
272 orr r1, r1, r1, lsl #16 271 orr r1, r1, r1, lsl #16
273 mov r3, r1 272 mov r3, r1
274 cmp r2, #16 273 cmp r2, #16
275 blt 4f 274 blt 5f
276/* 275/*
277 * We need an extra register for this loop - save the return address and 276 * We need an extra register for this loop - save the return address and
278 * use the LR 277 * use the LR
@@ -281,36 +280,36 @@ memset:
281 mov ip, r1 280 mov ip, r1
282 mov lr, r1 281 mov lr, r1
283 282
2842: subs r2, r2, #64 2833: subs r2, r2, #64
285 stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. 284 stmgedb r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
286 stmgeia r0!, {r1, r3, ip, lr} 285 stmgedb r0!, {r1, r3, ip, lr}
287 stmgeia r0!, {r1, r3, ip, lr} 286 stmgedb r0!, {r1, r3, ip, lr}
288 stmgeia r0!, {r1, r3, ip, lr} 287 stmgedb r0!, {r1, r3, ip, lr}
289 bgt 2b 288 bgt 3b
290 ldmeqfd sp!, {pc} @ Now <64 bytes to go. 289 ldmeqfd sp!, {pc} @ Now <64 bytes to go.
291/* 290/*
292 * No need to correct the count; we're only testing bits from now on 291 * No need to correct the count; we're only testing bits from now on
293 */ 292 */
294 tst r2, #32 293 tst r2, #32
295 stmneia r0!, {r1, r3, ip, lr} 294 stmnedb r0!, {r1, r3, ip, lr}
296 stmneia r0!, {r1, r3, ip, lr} 295 stmnedb r0!, {r1, r3, ip, lr}
297 tst r2, #16 296 tst r2, #16
298 stmneia r0!, {r1, r3, ip, lr} 297 stmnedb r0!, {r1, r3, ip, lr}
299 ldr lr, [sp], #4 298 ldr lr, [sp], #4
300 299
3014: tst r2, #8 3005: tst r2, #8
302 stmneia r0!, {r1, r3} 301 stmnedb r0!, {r1, r3}
303 tst r2, #4 302 tst r2, #4
304 strne r1, [r0], #4 303 strne r1, [r0, #-4]!
305/* 304/*
306 * When we get here, we've got less than 4 bytes to zero. We 305 * When we get here, we've got less than 4 bytes to zero. We
307 * may have an unaligned pointer as well. 306 * may have an unaligned pointer as well.
308 */ 307 */
3095: tst r2, #2 3086: tst r2, #2
310 strneb r1, [r0], #1 309 strneb r1, [r0, #-1]!
311 strneb r1, [r0], #1 310 strneb r1, [r0, #-1]!
312 tst r2, #1 311 tst r2, #1
313 strneb r1, [r0], #1 312 strneb r1, [r0, #-1]!
314 mov pc, lr 313 mov pc, lr
315end: 314end:
316 .size memset,.end-memset 315 .size memset,.end-memset