From fe6aa21e9eb88f49005863efd2003d0982920048 Mon Sep 17 00:00:00 2001 From: Aidan MacDonald Date: Mon, 3 Oct 2022 10:17:41 +0100 Subject: Remove YUV blitting functions and LCD modes None of this is needed now that mpegplayer is gone. Change-Id: I360366db8513e4d988021e8d7b7d8eb09930efb8 --- firmware/asm/arm/lcd-as-memframe.S | 591 ------------------------------------- firmware/asm/lcd-as-memframe.c | 168 ----------- 2 files changed, 759 deletions(-) (limited to 'firmware/asm') diff --git a/firmware/asm/arm/lcd-as-memframe.S b/firmware/asm/arm/lcd-as-memframe.S index 52ab0447c2..4bbae6fc0a 100644 --- a/firmware/asm/arm/lcd-as-memframe.S +++ b/firmware/asm/arm/lcd-as-memframe.S @@ -99,594 +99,3 @@ lcd_copy_buffer_rect: @ bgt 10b @ copy line @ ldmpc regs=r4-r11 @ restore regs and return .size lcd_copy_buffer_rect, .-lcd_copy_buffer_rect - - -/**************************************************************************** - * void lcd_write_yuv420_lines(fb_data *dst, - * unsigned char const * const src[3], - * int width, - * int stride); - * - * |R| |1.000000 -0.000001 1.402000| |Y'| - * |G| = |1.000000 -0.334136 -0.714136| |Pb| - * |B| |1.000000 1.772000 0.000000| |Pr| - * Scaled, normalized, rounded and tweaked to yield RGB 565: - * |R| |74 0 101| |Y' - 16| >> 9 - * |G| = |74 -24 -51| |Cb - 128| >> 8 - * |B| |74 128 0| |Cr - 128| >> 9 - * - * Write four RGB565 pixels in the following order on each loop: - * 1 3 + > down - * 2 4 \/ left - */ - .section .icode.lcd_write_yuv420_lines, "ax", %progbits - .align 2 - .global lcd_write_yuv420_lines - .type lcd_write_yuv420_lines, %function -lcd_write_yuv420_lines: - @ r0 = dst - @ r1 = yuv_src - @ r2 = width - @ r3 = stride - stmfd sp!, { r4-r10, lr } @ save non-scratch - ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p - @ r5 = yuv_src[1] = Cb_p - @ r6 = yuv_src[2] = Cr_p - @ r1 = scratch - sub r3, r3, #1 @ -10: @ loop line @ - ldrb r7, [r4], #1 @ r7 = *Y'_p++; - ldrb r8, [r5], #1 @ r8 = *Cb_p++; - ldrb r9, [r6], #1 @ r9 = *Cr_p++; - @ - sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74 - add r12, r7, r7, asl #2 @ actually (Y' - 16)*37 and shift right - add r7, r12, r7, asl #5 @ by one less when adding - same for all - @ - sub r8, r8, #128 @ Cb -= 128 - sub r9, r9, #128 @ Cr -= 128 - @ - add r10, r9, r9, asl #1 @ r10 = Cr*51 + Cb*24 - add r10, r10, r10, asl #4 @ - add r10, r10, r8, asl #3 @ - add r10, r10, r8, asl #4 @ - @ - add lr, r9, r9, asl #2 @ r9 = Cr*101 - add lr, lr, r9, asl #5 @ - add r9, lr, r9, asl #6 @ - @ - add r8, r8, #2 @ r8 = bu = (Cb*128 + 128) >> 8 - mov r8, r8, asr #2 @ - add r9, r9, #256 @ r9 = rv = (r9 + 256) >> 9 - mov r9, r9, asr #9 @ - rsb r10, r10, #128 @ r10 = guv = (-r10 + 128) >> 8 - mov r10, r10, asr #8 @ - @ compute R, G, and B - add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu - add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv - add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv - @ -#if ARM_ARCH >= 6 - usat r1, #5, r1 @ clamp b - usat lr, #5, lr @ clamp r - usat r7, #6, r7 @ clamp g -#else - orr r12, r1, lr @ check if clamping is needed... - orr r12, r12, r7, asr #1 @ ...at all - cmp r12, #31 @ - bls 15f @ no clamp @ - cmp r1, #31 @ clamp b - mvnhi r1, r1, asr #31 @ - andhi r1, r1, #31 @ - cmp lr, #31 @ clamp r - mvnhi lr, lr, asr #31 @ - andhi lr, lr, #31 @ - cmp r7, #63 @ clamp g - mvnhi r7, r7, asr #31 @ - andhi r7, r7, #63 @ -15: @ no clamp @ -#endif - @ - ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride) - @ - orr r1, r1, r7, lsl #5 @ r4 |= (g << 5) - orr r1, r1, lr, lsl #11 @ r4 = b | (r << 11) - -#if LCD_WIDTH >= LCD_HEIGHT - strh r1, [r0] @ -#elif LCD_WIDTH < 256 - strh r1, [r0], #LCD_WIDTH @ store pixel -#else - strh r1, [r0] @ -#endif - @ - sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74 - add r12, r7, r7, asl #2 @ - add r7, r12, r7, asl #5 @ - @ compute R, G, and B - add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu - add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv - add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv - @ -#if ARM_ARCH >= 6 - usat r1, #5, r1 @ clamp b - usat lr, #5, lr @ clamp r - usat r7, #6, r7 @ clamp g -#else - orr r12, r1, lr @ check if clamping is needed... - orr r12, r12, r7, asr #1 @ ...at all - cmp r12, #31 @ - bls 15f @ no clamp @ - cmp r1, #31 @ clamp b - mvnhi r1, r1, asr #31 @ - andhi r1, r1, #31 @ - cmp lr, #31 @ clamp r - mvnhi lr, lr, asr #31 @ - andhi lr, lr, #31 @ - cmp r7, #63 @ clamp g - mvnhi r7, r7, asr #31 @ - andhi r7, r7, #63 @ -15: @ no clamp @ -#endif - @ - ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++) - @ - orr r1, r1, lr, lsl #11 @ r1 = b | (r << 11) - orr r1, r1, r7, lsl #5 @ r1 |= (g << 5) - -#if LCD_WIDTH >= LCD_HEIGHT - add r0, r0, #2*LCD_WIDTH @ - strh r1, [r0] @ store pixel - sub r0, r0, #2*LCD_WIDTH @ -#elif LCD_WIDTH < 256 - strh r1, [r0, #-LCD_WIDTH-2] @ store pixel -#else - strh r1, [r0, #-2] @ - add r0, r0, #LCD_WIDTH @ -#endif - @ - sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74 - add r12, r7, r7, asl #2 @ - add r7, r12, r7, asl #5 @ - @ compute R, G, and B - add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu - add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv - add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv - @ -#if ARM_ARCH >= 6 - usat r1, #5, r1 @ clamp b - usat lr, #5, lr @ clamp r - usat r7, #6, r7 @ clamp g -#else - orr r12, r1, lr @ check if clamping is needed... - orr r12, r12, r7, asr #1 @ ...at all - cmp r12, #31 @ - bls 15f @ no clamp @ - cmp r1, #31 @ clamp b - mvnhi r1, r1, asr #31 @ - andhi r1, r1, #31 @ - cmp lr, #31 @ clamp r - mvnhi lr, lr, asr #31 @ - andhi lr, lr, #31 @ - cmp r7, #63 @ clamp g - mvnhi r7, r7, asr #31 @ - andhi r7, r7, #63 @ -15: @ no clamp @ -#endif - @ - ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride) - @ - orr r1, r1, r7, lsl #5 @ r1 = b | (g << 5) - orr r1, r1, lr, lsl #11 @ r1 |= (r << 11) - -#if LCD_WIDTH >= LCD_HEIGHT - strh r1, [r0, #2] -#elif LCD_WIDTH < 256 - strh r1, [r0, #LCD_WIDTH]! @ store pixel -#else - strh r1, [r0] @ -#endif - @ - sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74 - add r12, r7, r7, asl #2 @ - add r7, r12, r7, asl #5 @ - @ compute R, G, and B - add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu - add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv - add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv - @ -#if ARM_ARCH >= 6 - usat r1, #5, r1 @ clamp b - usat lr, #5, lr @ clamp r - usat r7, #6, r7 @ clamp g -#else - orr r12, r1, lr @ check if clamping is needed... - orr r12, r12, r7, asr #1 @ ...at all - cmp r12, #31 @ - bls 15f @ no clamp @ - cmp r1, #31 @ clamp b - mvnhi r1, r1, asr #31 @ - andhi r1, r1, #31 @ - cmp lr, #31 @ clamp r - mvnhi lr, lr, asr #31 @ - andhi lr, lr, #31 @ - cmp r7, #63 @ clamp g - mvnhi r7, r7, asr #31 @ - andhi r7, r7, #63 @ -15: @ no clamp @ -#endif - @ - orr r12, r1, lr, lsl #11 @ r12 = b | (r << 11) - orr r12, r12, r7, lsl #5 @ r12 |= (g << 5) - -#if LCD_WIDTH >= LCD_HEIGHT - add r0, r0, #2*LCD_WIDTH - strh r12, [r0, #2] -#if LCD_WIDTH <= 512 - sub r0, r0, #(2*LCD_WIDTH)-4 -#else - sub r0, r0, #(2*LCD_WIDTH) - add r0, r0, #4 -#endif -#else - strh r12, [r0, #-2] @ store pixel -#if LCD_WIDTH < 256 - add r0, r0, #2*LCD_WIDTH @ -#else - add r0, r0, #LCD_WIDTH @ -#endif -#endif - @ - subs r2, r2, #2 @ subtract block from width - bgt 10b @ loop line @ - @ - ldmpc regs=r4-r10 @ restore registers and return - .ltorg @ dump constant pool - .size lcd_write_yuv420_lines, .-lcd_write_yuv420_lines - - -/**************************************************************************** - * void lcd_write_yuv420_lines_odither(fb_data *dst, - * unsigned char const * const src[3], - * int width, - * int stride, - * int x_screen, - * int y_screen); - * - * |R| |1.000000 -0.000001 1.402000| |Y'| - * |G| = |1.000000 -0.334136 -0.714136| |Pb| - * |B| |1.000000 1.772000 0.000000| |Pr| - * Red scaled at twice g & b but at same precision to place it in correct - * bit position after multiply and leave instruction count lower. - * |R| |258 0 408| |Y' - 16| - * |G| = |149 -49 -104| |Cb - 128| - * |B| |149 258 0| |Cr - 128| - * - * Write four RGB565 pixels in the following order on each loop: - * 1 3 + > down - * 2 4 \/ left - * - * Kernel pattern (raw|rotated|use order): - * 5 3 4 2 2 6 3 7 row0 row2 > down - * 1 7 0 6 | 4 0 5 1 | 2 4 6 0 3 5 7 1 col0 left - * 4 2 5 3 | 3 7 2 6 | 3 5 7 1 2 4 6 0 col2 \/ - * 0 6 1 7 5 1 4 0 - */ - .section .icode.lcd_write_yuv420_lines_odither, "ax", %progbits - .align 2 - .global lcd_write_yuv420_lines_odither - .type lcd_write_yuv420_lines_odither, %function -lcd_write_yuv420_lines_odither: - @ r0 = dst - @ r1 = yuv_src - @ r2 = width - @ r3 = stride - @ [sp] = x_screen - @ [sp+4] = y_screen - stmfd sp!, { r4-r11, lr } @ save non-scratch - ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p - @ r5 = yuv_src[1] = Cb_p - @ r6 = yuv_src[2] = Cr_p - @ - sub r3, r3, #1 @ - add r1, sp, #36 @ Line up pattern and kernel quadrant - ldmia r1, { r12, r14 } @ - eor r14, r14, r12 @ - and r14, r14, #0x2 @ - mov r14, r14, lsl #6 @ 0x00 or 0x80 -10: @ loop line @ - @ - ldrb r7, [r4], #1 @ r7 = *Y'_p++; - ldrb r8, [r5], #1 @ r8 = *Cb_p++; - ldrb r9, [r6], #1 @ r9 = *Cr_p++; - @ - eor r14, r14, #0x80 @ flip pattern quadrant - @ - sub r7, r7, #16 @ r7 = Y = (Y' - 16)*149 - add r12, r7, r7, asl #2 @ - add r12, r12, r12, asl #4 @ - add r7, r12, r7, asl #6 @ - @ - sub r8, r8, #128 @ Cb -= 128 - sub r9, r9, #128 @ Cr -= 128 - @ - add r10, r8, r8, asl #4 @ r10 = guv = Cr*104 + Cb*49 - add r10, r10, r8, asl #5 @ - add r10, r10, r9, asl #3 @ - add r10, r10, r9, asl #5 @ - add r10, r10, r9, asl #6 @ - @ - mov r8, r8, asl #1 @ r8 = bu = Cb*258 - add r8, r8, r8, asl #7 @ - @ - add r9, r9, r9, asl #1 @ r9 = rv = Cr*408 - add r9, r9, r9, asl #4 @ - mov r9, r9, asl #3 @ - @ - @ compute R, G, and B - add r1, r8, r7 @ r1 = b' = Y + bu - add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv - rsb r7, r10, r7 @ r7 = g' = Y + guv - @ - @ r8 = bu, r9 = rv, r10 = guv - @ - sub r12, r1, r1, lsr #5 @ r1 = 31/32*b + b/256 - add r1, r12, r1, lsr #8 @ - @ - sub r12, r11, r11, lsr #5 @ r11 = 31/32*r + r/256 - add r11, r12, r11, lsr #8 @ - @ - sub r12, r7, r7, lsr #6 @ r7 = 63/64*g + g/256 - add r7, r12, r7, lsr #8 @ - @ - add r12, r14, #0x100 @ - @ - add r1, r1, r12 @ b = r1 + delta - add r11, r11, r12, lsl #1 @ r = r11 + delta*2 - add r7, r7, r12, lsr #1 @ g = r7 + delta/2 - @ -#if ARM_ARCH >= 6 - usat r11, #5, r11, asr #11 @ clamp r - usat r7, #6, r7, asr #9 @ clamp g - usat r1, #5, r1, asr #10 @ clamp b - @ - ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride) - @ - orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11) - orr r1, r1, r7, lsl #5 @ r1 |= (g << 5) -#else - orr r12, r1, r11, asr #1 @ check if clamping is needed... - orr r12, r12, r7 @ ...at all - movs r12, r12, asr #15 @ - beq 15f @ no clamp @ - movs r12, r1, asr #15 @ clamp b - mvnne r1, r12, lsr #15 @ - andne r1, r1, #0x7c00 @ mask b only if clamped - movs r12, r11, asr #16 @ clamp r - mvnne r11, r12, lsr #16 @ - movs r12, r7, asr #15 @ clamp g - mvnne r7, r12, lsr #15 @ -15: @ no clamp @ - @ - ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride) - @ - and r11, r11, #0xf800 @ pack pixel - and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) | - orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) | - orr r1, r11, r1, lsr #10 @ (b >> 10) -#endif - @ -#if LCD_WIDTH >= LCD_HEIGHT - strh r1, [r0] @ -#elif LCD_WIDTH < 256 - strh r1, [r0], #LCD_WIDTH @ store pixel -#else - strh r1, [r0] @ -#endif - @ - sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149 - add r12, r7, r7, asl #2 @ - add r12, r12, r12, asl #4 @ - add r7, r12, r7, asl #6 @ - @ compute R, G, and B - add r1, r8, r7 @ r1 = b' = Y + bu - add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv - rsb r7, r10, r7 @ r7 = g' = Y + guv - @ - sub r12, r1, r1, lsr #5 @ r1 = 31/32*b' + b'/256 - add r1, r12, r1, lsr #8 @ - @ - sub r12, r11, r11, lsr #5 @ r11 = 31/32*r' + r'/256 - add r11, r12, r11, lsr #8 @ - @ - sub r12, r7, r7, lsr #6 @ r7 = 63/64*g' + g'/256 - add r7, r12, r7, lsr #8 @ - @ - add r12, r14, #0x200 @ - @ - add r1, r1, r12 @ b = r1 + delta - add r11, r11, r12, lsl #1 @ r = r11 + delta*2 - add r7, r7, r12, lsr #1 @ g = r7 + delta/2 - @ -#if ARM_ARCH >= 6 - usat r11, #5, r11, asr #11 @ clamp r - usat r7, #6, r7, asr #9 @ clamp g - usat r1, #5, r1, asr #10 @ clamp b - @ - ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++) - @ - orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11) - orr r1, r1, r7, lsl #5 @ r1 |= (g << 5) -#else - orr r12, r1, r11, asr #1 @ check if clamping is needed... - orr r12, r12, r7 @ ...at all - movs r12, r12, asr #15 @ - beq 15f @ no clamp @ - movs r12, r1, asr #15 @ clamp b - mvnne r1, r12, lsr #15 @ - andne r1, r1, #0x7c00 @ mask b only if clamped - movs r12, r11, asr #16 @ clamp r - mvnne r11, r12, lsr #16 @ - movs r12, r7, asr #15 @ clamp g - mvnne r7, r12, lsr #15 @ -15: @ no clamp @ - @ - ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++) - @ - and r11, r11, #0xf800 @ pack pixel - and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) | - orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) | - orr r1, r11, r1, lsr #10 @ (b >> 10) -#endif - @ -#if LCD_WIDTH >= LCD_HEIGHT - add r0, r0, #2*LCD_WIDTH @ - strh r1, [r0] @ store pixel - sub r0, r0, #2*LCD_WIDTH @ -#elif LCD_WIDTH < 256 - strh r1, [r0, #-LCD_WIDTH-2] @ store pixel -#else - strh r1, [r0, #-2] @ store pixel - add r0, r0, #LCD_WIDTH @ -#endif - @ - sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149 - add r12, r7, r7, asl #2 @ - add r12, r12, r12, asl #4 @ - add r7, r12, r7, asl #6 @ - @ compute R, G, and B - add r1, r8, r7 @ r1 = b' = Y + bu - add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv - rsb r7, r10, r7 @ r7 = g' = Y + guv - @ - @ r8 = bu, r9 = rv, r10 = guv - @ - sub r12, r1, r1, lsr #5 @ r1 = 31/32*b' + b'/256 - add r1, r12, r1, lsr #8 @ - @ - sub r12, r11, r11, lsr #5 @ r11 = 31/32*r' + r'/256 - add r11, r12, r11, lsr #8 @ - @ - sub r12, r7, r7, lsr #6 @ r7 = 63/64*g' + g'/256 - add r7, r12, r7, lsr #8 @ - @ - add r12, r14, #0x300 @ - @ - add r1, r1, r12 @ b = r1 + delta - add r11, r11, r12, lsl #1 @ r = r11 + delta*2 - add r7, r7, r12, lsr #1 @ g = r7 + delta/2 - @ -#if ARM_ARCH >= 6 - usat r11, #5, r11, asr #11 @ clamp r - usat r7, #6, r7, asr #9 @ clamp g - usat r1, #5, r1, asr #10 @ clamp b - @ - ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride) - @ - orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11) - orr r1, r1, r7, lsl #5 @ r1 |= (g << 5) -#else - orr r12, r1, r11, asr #1 @ check if clamping is needed... - orr r12, r12, r7 @ ...at all - movs r12, r12, asr #15 @ - beq 15f @ no clamp @ - movs r12, r1, asr #15 @ clamp b - mvnne r1, r12, lsr #15 @ - andne r1, r1, #0x7c00 @ mask b only if clamped - movs r12, r11, asr #16 @ clamp r - mvnne r11, r12, lsr #16 @ - movs r12, r7, asr #15 @ clamp g - mvnne r7, r12, lsr #15 @ -15: @ no clamp @ - @ - ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride) - @ - and r11, r11, #0xf800 @ pack pixel - and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) | - orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) | - orr r1, r11, r1, lsr #10 @ (b >> 10) -#endif - @ -#if LCD_WIDTH >= LCD_HEIGHT - strh r1, [r0, #2] -#elif LCD_WIDTH < 256 - strh r1, [r0, #LCD_WIDTH]! @ store pixel -#else - strh r1, [r0] @ -#endif - - sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149 - add r12, r7, r7, asl #2 @ - add r12, r12, r12, asl #4 @ - add r7, r12, r7, asl #6 @ - @ compute R, G, and B - add r1, r8, r7 @ r1 = b' = Y + bu - add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv - rsb r7, r10, r7 @ r7 = g' = Y + guv - @ - sub r12, r1, r1, lsr #5 @ r1 = 31/32*b + b/256 - add r1, r12, r1, lsr #8 @ - @ - sub r12, r11, r11, lsr #5 @ r11 = 31/32*r + r/256 - add r11, r12, r11, lsr #8 @ - @ - sub r12, r7, r7, lsr #6 @ r7 = 63/64*g + g/256 - add r7, r12, r7, lsr #8 @ - @ - @ This element is zero - use r14 @ - @ - add r1, r1, r14 @ b = r1 + delta - add r11, r11, r14, lsl #1 @ r = r11 + delta*2 - add r7, r7, r14, lsr #1 @ g = r7 + delta/2 - @ -#if ARM_ARCH >= 6 - usat r11, #5, r11, asr #11 @ clamp r - usat r7, #6, r7, asr #9 @ clamp g - usat r1, #5, r1, asr #10 @ clamp b - @ - orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11) - orr r1, r1, r7, lsl #5 @ r1 |= (g << 5) -#else - orr r12, r1, r11, asr #1 @ check if clamping is needed... - orr r12, r12, r7 @ ...at all - movs r12, r12, asr #15 @ - beq 15f @ no clamp @ - movs r12, r1, asr #15 @ clamp b - mvnne r1, r12, lsr #15 @ - andne r1, r1, #0x7c00 @ mask b only if clamped - movs r12, r11, asr #16 @ clamp r - mvnne r11, r12, lsr #16 @ - movs r12, r7, asr #15 @ clamp g - mvnne r7, r12, lsr #15 @ -15: @ no clamp @ - @ - and r11, r11, #0xf800 @ pack pixel - and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) | - orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) | - orr r1, r11, r1, lsr #10 @ (b >> 10) -#endif - @ -#if LCD_WIDTH >= LCD_HEIGHT - add r0, r0, #2*LCD_WIDTH - strh r1, [r0, #2] @ store pixel -#if LCD_WIDTH <= 512 - sub r0, r0, #(2*LCD_WIDTH)-4 -#else - sub r0, r0, #(2*LCD_WIDTH) - add r0, r0, #4 -#endif -#else - strh r1, [r0, #-2] @ store pixel -#if LCD_WIDTH < 256 - add r0, r0, #2*LCD_WIDTH @ -#else - add r0, r0, #LCD_WIDTH @ -#endif -#endif - @ - subs r2, r2, #2 @ subtract block from width - bgt 10b @ loop line @ - @ - ldmpc regs=r4-r11 @ restore registers and return - .ltorg @ dump constant pool - .size lcd_write_yuv420_lines_odither, .-lcd_write_yuv420_lines_odither diff --git a/firmware/asm/lcd-as-memframe.c b/firmware/asm/lcd-as-memframe.c index fb31fa1953..f7f3473fad 100644 --- a/firmware/asm/lcd-as-memframe.c +++ b/firmware/asm/lcd-as-memframe.c @@ -9,171 +9,3 @@ void lcd_copy_buffer_rect(fb_data *dst, fb_data *src, int width, int height) dst += LCD_WIDTH; } while (--height); } - -#define YFAC (74) -#define RVFAC (101) -#define GUFAC (-24) -#define GVFAC (-51) -#define BUFAC (128) - -static inline int clamp(int val, int min, int max) -{ - if (val < min) - val = min; - else if (val > max) - val = max; - return val; -} - -extern void lcd_write_yuv420_lines(fb_data *dst, - unsigned char const * const src[3], - int width, - int stride) -{ - /* Draw a partial YUV colour bitmap - similiar behavior to lcd_blit_yuv - in the core */ - const unsigned char *ysrc, *usrc, *vsrc; - fb_data *row_end; - - /* width and height must be >= 2 and an even number */ - width &= ~1; - -#if LCD_WIDTH >= LCD_HEIGHT - row_end = dst + width; -#else - row_end = dst + LCD_WIDTH * width; -#endif - - ysrc = src[0]; - usrc = src[1]; - vsrc = src[2]; - - /* stride => amount to jump from end of last row to start of next */ - stride -= width; - - /* upsampling, YUV->RGB conversion and reduction to RGB in one go */ - - do - { - int y, cb, cr, rv, guv, bu, r, g, b; - - y = YFAC*(*ysrc++ - 16); - cb = *usrc++ - 128; - cr = *vsrc++ - 128; - - rv = RVFAC*cr; - guv = GUFAC*cb + GVFAC*cr; - bu = BUFAC*cb; - - r = y + rv; - g = y + guv; - b = y + bu; - - if ((unsigned)(r | g | b) > 64*256-1) - { - r = clamp(r, 0, 64*256-1); - g = clamp(g, 0, 64*256-1); - b = clamp(b, 0, 64*256-1); - } - - *dst = FB_RGBPACK(r >> 6, g >> 6, b >> 6); - -#if LCD_WIDTH >= LCD_HEIGHT - dst++; -#else - dst += LCD_WIDTH; -#endif - - y = YFAC*(*ysrc++ - 16); - r = y + rv; - g = y + guv; - b = y + bu; - - if ((unsigned)(r | g | b) > 64*256-1) - { - r = clamp(r, 0, 64*256-1); - g = clamp(g, 0, 64*256-1); - b = clamp(b, 0, 64*256-1); - } - - *dst = FB_RGBPACK(r >> 6, g >> 6, b >> 6); - -#if LCD_WIDTH >= LCD_HEIGHT - dst++; -#else - dst += LCD_WIDTH; -#endif - } - while (dst < row_end); - - ysrc += stride; - usrc -= width >> 1; - vsrc -= width >> 1; - -#if LCD_WIDTH >= LCD_HEIGHT - row_end += LCD_WIDTH; - dst += LCD_WIDTH - width; -#else - row_end -= 1; - dst -= LCD_WIDTH*width + 1; -#endif - - do - { - int y, cb, cr, rv, guv, bu, r, g, b; - - y = YFAC*(*ysrc++ - 16); - cb = *usrc++ - 128; - cr = *vsrc++ - 128; - - rv = RVFAC*cr; - guv = GUFAC*cb + GVFAC*cr; - bu = BUFAC*cb; - - r = y + rv; - g = y + guv; - b = y + bu; - - if ((unsigned)(r | g | b) > 64*256-1) - { - r = clamp(r, 0, 64*256-1); - g = clamp(g, 0, 64*256-1); - b = clamp(b, 0, 64*256-1); - } - - *dst = FB_RGBPACK(r >> 6, g >> 6, b >> 6); - -#if LCD_WIDTH >= LCD_HEIGHT - dst++; -#else - dst += LCD_WIDTH; -#endif - - y = YFAC*(*ysrc++ - 16); - r = y + rv; - g = y + guv; - b = y + bu; - - if ((unsigned)(r | g | b) > 64*256-1) - { - r = clamp(r, 0, 64*256-1); - g = clamp(g, 0, 64*256-1); - b = clamp(b, 0, 64*256-1); - } - - *dst = FB_RGBPACK(r >> 6, g >> 6, b >> 6); - -#if LCD_WIDTH >= LCD_HEIGHT - dst++; -#else - dst += LCD_WIDTH; -#endif - } - while (dst < row_end); -} - -void lcd_write_yuv420_lines_odither(fb_data *dst, - unsigned char const * const src[3], - int width, int stride, - int x_screen, int y_screen) -__attribute__((alias("lcd_write_yuv420_lines"))); -- cgit v1.2.3