summaryrefslogtreecommitdiff
path: root/firmware/asm/sh/memcpy.S
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/asm/sh/memcpy.S')
-rw-r--r--firmware/asm/sh/memcpy.S10
1 files changed, 5 insertions, 5 deletions
diff --git a/firmware/asm/sh/memcpy.S b/firmware/asm/sh/memcpy.S
index e23a579b05..59c5801ac0 100644
--- a/firmware/asm/sh/memcpy.S
+++ b/firmware/asm/sh/memcpy.S
@@ -60,13 +60,13 @@ ___memcpy_fwd_entry:
60 cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */ 60 cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
61 add r5,r6 /* r6 = source_end */ 61 add r5,r6 /* r6 = source_end */
62 bf .start_b2 /* no: jump directly to byte loop */ 62 bf .start_b2 /* no: jump directly to byte loop */
63 63
64 mov #3,r0 64 mov #3,r0
65 neg r5,r3 65 neg r5,r3
66 and r0,r3 /* r3 = (4 - align_offset) % 4 */ 66 and r0,r3 /* r3 = (4 - align_offset) % 4 */
67 tst r3,r3 /* already aligned? */ 67 tst r3,r3 /* already aligned? */
68 bt .end_b1 /* yes: skip leading byte loop */ 68 bt .end_b1 /* yes: skip leading byte loop */
69 69
70 add r5,r3 /* r3 = first source long bound */ 70 add r5,r3 /* r3 = first source long bound */
71 71
72 /* leading byte loop: copies 0..3 bytes */ 72 /* leading byte loop: copies 0..3 bytes */
@@ -89,7 +89,7 @@ ___memcpy_fwd_entry:
89 mov r6,r3 /* move end address to r3 */ 89 mov r6,r3 /* move end address to r3 */
90 jmp @r1 /* and jump to it */ 90 jmp @r1 /* and jump to it */
91 add #-7,r3 /* adjust end addr for main loops doing 2 longs/pass */ 91 add #-7,r3 /* adjust end addr for main loops doing 2 longs/pass */
92 92
93 /** main loops, copying 2 longs per pass to profit from fast page mode **/ 93 /** main loops, copying 2 longs per pass to profit from fast page mode **/
94 94
95 /* long aligned destination (fastest) */ 95 /* long aligned destination (fastest) */
@@ -102,11 +102,11 @@ ___memcpy_fwd_entry:
102 mov.l r0,@-r4 /* store second long */ 102 mov.l r0,@-r4 /* store second long */
103 mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */ 103 mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
104 bt .loop_do0 104 bt .loop_do0
105 105
106 add #4,r3 /* readjust end address */ 106 add #4,r3 /* readjust end address */
107 cmp/hi r5,r3 /* one long left? */ 107 cmp/hi r5,r3 /* one long left? */
108 bf .start_b2 /* no, jump to trailing byte loop */ 108 bf .start_b2 /* no, jump to trailing byte loop */
109 109
110 mov.l @r5+,r0 /* load last long & increment source addr */ 110 mov.l @r5+,r0 /* load last long & increment source addr */
111 add #4,r4 /* increment dest addr */ 111 add #4,r4 /* increment dest addr */
112 bra .start_b2 /* jump to trailing byte loop */ 112 bra .start_b2 /* jump to trailing byte loop */