summaryrefslogtreecommitdiff
path: root/firmware/target/arm/s3c2440/gigabeat-fx/lcd-as-meg-fx.S
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm/s3c2440/gigabeat-fx/lcd-as-meg-fx.S')
-rw-r--r--firmware/target/arm/s3c2440/gigabeat-fx/lcd-as-meg-fx.S279
1 files changed, 0 insertions, 279 deletions
diff --git a/firmware/target/arm/s3c2440/gigabeat-fx/lcd-as-meg-fx.S b/firmware/target/arm/s3c2440/gigabeat-fx/lcd-as-meg-fx.S
deleted file mode 100644
index 4926c7fa79..0000000000
--- a/firmware/target/arm/s3c2440/gigabeat-fx/lcd-as-meg-fx.S
+++ /dev/null
@@ -1,279 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 by Michael Sevakis
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
20#include "config.h"
21#include "cpu.h"
22
23/****************************************************************************
24 * void lcd_copy_buffer_rect(fb_data *dst, fb_data *src, int width,
25 * int height);
26 */
27 .section .icode, "ax", %progbits
28 .align 2
29 .global lcd_copy_buffer_rect
30 .type lcd_copy_buffer_rect, %function
31 @ r0 = dst
32 @ r1 = src
33 @ r2 = width
34 @ r3 = height
35lcd_copy_buffer_rect: @
36 stmfd sp!, { r4-r12, lr } @ save non-scratch regs
37 mov r5, r2 @ r5 = cached width
38 rsb r4, r2, #LCD_WIDTH @ r4 = LCD_WIDTH - width
3910: @ copy line @
40 subs r2, r5, #1 @ r2 = width - 1
41 beq 40f @ finish line @ one halfword? skip to trailing copy
42 tst r0, #2 @ word aligned?
43 beq 20f @ rem copy @ yes? skip to word copy
44 ldrh r6, [r1], #2 @ copy leading halfword
45 subs r2, r2, #1 @
46 strh r6, [r0], #2 @
47 ble 40f @ finish line @ next line if lt or finish
48 @ trailing halfword if eq
4920: @ rem copy @
50 add r14, r2, #1 @ get remaining width mod 16 after word
51 @ align (rw)
52 and r14, r14, #0xe @ r14 = 0 (16), 2, 4, 6, 8, 10, 12, 14
53 add pc, pc, r14, lsl #3 @ branch to 32-byte align
54 nop @
55 b 30f @ rw % 16 = 0 or 1? use octword loop
56 nop @
57 nop @
58 nop @
59 ldr r6, [r1], #4 @ rw % 16 = 2 or 3
60 subs r2, r2, #2 @
61 str r6, [r0], #4 @
62 b 25f @ copy up done @
63 ldmia r1!, { r6-r7 } @ rw % 16 = 4 or 5
64 subs r2, r2, #4 @
65 stmia r0!, { r6-r7 } @
66 b 25f @ copy up done @
67 ldmia r1!, { r6-r8 } @ rw % 16 = 6 or 7
68 subs r2, r2, #6 @
69 stmia r0!, { r6-r8 } @
70 b 25f @ copy up done @
71 ldmia r1!, { r6-r9 } @ rw % 16 = 8 or 9
72 subs r2, r2, #8 @
73 stmia r0!, { r6-r9 } @
74 b 25f @ copy up done @
75 ldmia r1!, { r6-r10 } @ rw % 16 = 10 or 11
76 subs r2, r2, #10 @
77 stmia r0!, { r6-r10 } @
78 b 25f @ copy up done @
79 ldmia r1!, { r6-r11 } @ rw % 16 = 12 or 13
80 subs r2, r2, #12 @
81 stmia r0!, { r6-r11 } @
82 b 25f @ copy up done @
83 ldmia r1!, { r6-r12 } @ rw % 16 = 14 or 15
84 subs r2, r2, #14 @
85 stmia r0!, { r6-r12 } @
8625: @ copy up done @
87 ble 40f @ finish line @ no 32-byte segments remaining?
8830: @ octword loop @ copy 16 pixels per loop
89 ldmia r1!, { r6-r12, r14 } @
90 subs r2, r2, #16 @
91 stmia r0!, { r6-r12, r14 } @
92 bgt 30b @ octword loop @
9340: @ finish line @
94 ldreqh r6, [r1], #2 @ finish last halfword if eq ...
95 add r1, r1, r4, lsl #1 @
96 streqh r6, [r0], #2 @ ...
97 add r0, r0, r4, lsl #1 @
98 subs r3, r3, #1 @ next line
99 bgt 10b @ copy line @
100 ldmfd sp!, { r4-r12, pc } @ restore regs and return
101 .size lcd_copy_buffer_rect, .-lcd_copy_buffer_rect
102
103
104/****************************************************************************
105 * void lcd_write_yuv_420_lines(fb_data *dst,
106 * unsigned char const * const src[3],
107 * int width,
108 * int stride);
109 *
110 * |R| |1.000000 -0.000001 1.402000| |Y'|
111 * |G| = |1.000000 -0.334136 -0.714136| |Pb|
112 * |B| |1.000000 1.772000 0.000000| |Pr|
113 * Scaled, normalized, rounded and tweaked to yield RGB 565:
114 * |R| |74 0 101| |Y' - 16| >> 9
115 * |G| = |74 -24 -51| |Cb - 128| >> 8
116 * |B| |74 128 0| |Cr - 128| >> 9
117 *
118 * Write four RGB565 pixels in the following order on each loop:
119 * 1 3 + > down
120 * 2 4 \/ left
121 */
122 .section .icode, "ax", %progbits
123 .align 2
124 .global lcd_write_yuv420_lines
125 .type lcd_write_yuv420_lines, %function
126lcd_write_yuv420_lines:
127 @ r0 = dst
128 @ r1 = yuv_src
129 @ r2 = width
130 @ r3 = stride
131 stmfd sp!, { r4-r12 } @ save non-scratch
132 ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p
133 @ r5 = yuv_src[1] = Cb_p
134 @ r6 = yuv_src[2] = Cr_p
135 @ r1 = scratch
13610: @ loop line @
137 ldrb r7, [r4] @ r7 = *Y'_p;
138 ldrb r8, [r5], #1 @ r8 = *Cb_p++;
139 ldrb r9, [r6], #1 @ r9 = *Cr_p++;
140 @
141 sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74
142 add r12, r7, r7, asl #2 @ actually (Y' - 16)*37 and shift right
143 add r7, r12, r7, asl #5 @ by one less when adding - same for all
144 @
145 sub r8, r8, #128 @ Cb -= 128
146 sub r9, r9, #128 @ Cr -= 128
147 @
148 add r10, r9, r9, asl #1 @ r10 = Cr*51 + Cb*24
149 add r10, r10, r10, asl #4 @
150 add r10, r10, r8, asl #3 @
151 add r10, r10, r8, asl #4 @
152 @
153 add r11, r9, r9, asl #2 @ r9 = Cr*101
154 add r11, r11, r9, asl #5 @
155 add r9, r11, r9, asl #6 @
156 @
157 add r8, r8, #2 @ r8 = bu = (Cb*128 + 128) >> 8
158 mov r8, r8, asr #2 @
159 add r9, r9, #256 @ r9 = rv = (r9 + 256) >> 9
160 mov r9, r9, asr #9 @
161 rsb r10, r10, #128 @ r10 = guv = (-r10 + 128) >> 8
162 mov r10, r10, asr #8 @
163 @ compute R, G, and B
164 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
165 add r11, r9, r7, asr #8 @ r11 = r = (Y >> 9) + rv
166 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
167 @
168 orr r12, r1, r11 @ check if clamping is needed...
169 orr r12, r12, r7, asr #1 @ ...at all
170 cmp r12, #31 @
171 bls 15f @ no clamp @
172 cmp r1, #31 @ clamp b
173 mvnhi r1, r1, asr #31 @
174 andhi r1, r1, #31 @
175 cmp r11, #31 @ clamp r
176 mvnhi r11, r11, asr #31 @
177 andhi r11, r11, #31 @
178 cmp r7, #63 @ clamp g
179 mvnhi r7, r7, asr #31 @
180 andhi r7, r7, #63 @
18115: @ no clamp @
182 @
183 orr r12, r1, r7, lsl #5 @ r4 |= (g << 5)
184 ldrb r7, [r4, r3] @ r7 = Y' = *(Y'_p + stride)
185 orr r12, r12, r11, lsl #11 @ r4 = b | (r << 11)
186 strh r12, [r0] @ store pixel
187 @
188 sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74
189 add r12, r7, r7, asl #2 @
190 add r7, r12, r7, asl #5 @
191 @ compute R, G, and B
192 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
193 add r11, r9, r7, asr #8 @ r11 = r = (Y >> 9) + rv
194 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
195 @
196 orr r12, r1, r11 @ check if clamping is needed...
197 orr r12, r12, r7, asr #1 @ ...at all
198 cmp r12, #31 @
199 bls 15f @ no clamp @
200 cmp r1, #31 @ clamp b
201 mvnhi r1, r1, asr #31 @
202 andhi r1, r1, #31 @
203 cmp r11, #31 @ clamp r
204 mvnhi r11, r11, asr #31 @
205 andhi r11, r11, #31 @
206 cmp r7, #63 @ clamp g
207 mvnhi r7, r7, asr #31 @
208 andhi r7, r7, #63 @
20915: @ no clamp @
210 @
211 orr r12, r1, r11, lsl #11 @ r12 = b | (r << 11)
212 orr r12, r12, r7, lsl #5 @ r12 |= (g << 5)
213 ldrb r7, [r4, #1]! @ r7 = Y' = *(++Y'_p)
214 strh r12, [r0, #-2] @ store pixel
215 add r0, r0, #2*LCD_WIDTH @
216 @
217 sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74
218 add r12, r7, r7, asl #2 @
219 add r7, r12, r7, asl #5 @
220 @ compute R, G, and B
221 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
222 add r11, r9, r7, asr #8 @ r11 = r = (Y >> 9) + rv
223 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
224 @
225 orr r12, r1, r11 @ check if clamping is needed...
226 orr r12, r12, r7, asr #1 @ ...at all
227 cmp r12, #31 @
228 bls 15f @ no clamp @
229 cmp r1, #31 @ clamp b
230 mvnhi r1, r1, asr #31 @
231 andhi r1, r1, #31 @
232 cmp r11, #31 @ clamp r
233 mvnhi r11, r11, asr #31 @
234 andhi r11, r11, #31 @
235 cmp r7, #63 @ clamp g
236 mvnhi r7, r7, asr #31 @
237 andhi r7, r7, #63 @
23815: @ no clamp @
239 @
240 orr r12, r1, r7, lsl #5 @ r12 = b | (g << 5)
241 ldrb r7, [r4, r3] @ r7 = Y' = *(Y'_p + stride)
242 orr r12, r12, r11, lsl #11 @ r12 |= (r << 11)
243 strh r12, [r0] @ store pixel
244 @
245 sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74
246 add r12, r7, r7, asl #2 @
247 add r7, r12, r7, asl #5 @
248 @ compute R, G, and B
249 add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
250 add r11, r9, r7, asr #8 @ r11 = r = (Y >> 9) + rv
251 add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
252 @
253 orr r12, r1, r11 @ check if clamping is needed...
254 orr r12, r12, r7, asr #1 @ ...at all
255 cmp r12, #31 @
256 bls 15f @ no clamp @
257 cmp r1, #31 @ clamp b
258 mvnhi r1, r1, asr #31 @
259 andhi r1, r1, #31 @
260 cmp r11, #31 @ clamp r
261 mvnhi r11, r11, asr #31 @
262 andhi r11, r11, #31 @
263 cmp r7, #63 @ clamp g
264 mvnhi r7, r7, asr #31 @
265 andhi r7, r7, #63 @
26615: @ no clamp @
267 @
268 orr r12, r1, r11, lsl #11 @ r12 = b | (r << 11)
269 orr r12, r12, r7, lsl #5 @ r12 |= (g << 5)
270 strh r12, [r0, #-2] @ store pixel
271 add r0, r0, #2*LCD_WIDTH @
272 add r4, r4, #1 @
273 @
274 subs r2, r2, #2 @ subtract block from width
275 bgt 10b @ loop line @
276 @
277 ldmfd sp!, { r4-r12 } @ restore registers and return
278 bx lr @
279 .size lcd_write_yuv420_lines, .-lcd_write_yuv420_lines