summaryrefslogtreecommitdiff
path: root/firmware/asm/mips
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-08-06 04:26:52 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-29 22:06:57 -0400
commit77b3625763ae4d5aa6aaa9d44fbc1bfec6b29335 (patch)
tree74b12e2669da8653932f48f1ca3816eef4bf6324 /firmware/asm/mips
parent7d1a47cf13726c95ac46027156cc12dd9da5b855 (diff)
downloadrockbox-77b3625763ae4d5aa6aaa9d44fbc1bfec6b29335.tar.gz
rockbox-77b3625763ae4d5aa6aaa9d44fbc1bfec6b29335.zip
Add mempcpy implementation
A GNU extension that returns dst + size instead of dst. It's a nice shortcut when copying strings with a known size or back-to-back blocks and you have to do it often. May of course be called directly or alternately through __builtin_mempcpy in some compiler versions. For ASM on native targets, it is implemented as an alternate entrypoint to memcpy which adds minimal code and overhead. Change-Id: I4cbb3483f6df3c1007247fe0a95fd7078737462b
Diffstat (limited to 'firmware/asm/mips')
-rw-r--r--firmware/asm/mips/memcpy.S11
1 files changed, 10 insertions, 1 deletions
diff --git a/firmware/asm/mips/memcpy.S b/firmware/asm/mips/memcpy.S
index edbf5ac5eb..ec1625bb4f 100644
--- a/firmware/asm/mips/memcpy.S
+++ b/firmware/asm/mips/memcpy.S
@@ -43,8 +43,16 @@
43 43
44 .global memcpy 44 .global memcpy
45 .type memcpy, %function 45 .type memcpy, %function
46 .global mempcpy
47 .type mempcpy, %function
46 48
47 .set noreorder 49 .set noreorder
50mempcpy:
51 slti t0, a2, 8 # Less than 8?
52 bne t0, zero, last8
53 addu v0, a0, a2 # exit value = s1 + n
54 b 1f
55 xor t0, a1, a0 # Find a0/a1 displacement (fill delay)
48 56
49memcpy: 57memcpy:
50 slti t0, a2, 8 # Less than 8? 58 slti t0, a2, 8 # Less than 8?
@@ -52,7 +60,8 @@ memcpy:
52 move v0, a0 # Setup exit value before too late 60 move v0, a0 # Setup exit value before too late
53 61
54 xor t0, a1, a0 # Find a0/a1 displacement 62 xor t0, a1, a0 # Find a0/a1 displacement
55 andi t0, 0x3 63
641: andi t0, 0x3
56 bne t0, zero, shift # Go handle the unaligned case 65 bne t0, zero, shift # Go handle the unaligned case
57 subu t1, zero, a1 66 subu t1, zero, a1
58 andi t1, 0x3 # a0/a1 are aligned, but are we 67 andi t1, 0x3 # a0/a1 are aligned, but are we