summaryrefslogtreecommitdiff
path: root/lib/x1000-installer/src/xf_nandio.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/x1000-installer/src/xf_nandio.c')
-rw-r--r--lib/x1000-installer/src/xf_nandio.c292
1 files changed, 0 insertions, 292 deletions
diff --git a/lib/x1000-installer/src/xf_nandio.c b/lib/x1000-installer/src/xf_nandio.c
deleted file mode 100644
index 6dc87bc420..0000000000
--- a/lib/x1000-installer/src/xf_nandio.c
+++ /dev/null
@@ -1,292 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2021 Aidan MacDonald
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#include "xf_nandio.h"
23#include "xf_error.h"
24#include "core_alloc.h"
25#include "system.h"
26#include <string.h>
27#include <stdbool.h>
28
29int xf_nandio_init(struct xf_nandio* nio)
30{
31 int rc;
32
33 memset(nio, 0, sizeof(*nio));
34
35 /* open NAND */
36 nio->ndrv = nand_init();
37 nand_lock(nio->ndrv);
38 rc = nand_open(nio->ndrv);
39 if(rc != NAND_SUCCESS) {
40 nio->nand_err = rc;
41 rc = XF_E_NAND;
42 goto out;
43 }
44
45 /* read chip parameters */
46 nio->page_size = nio->ndrv->chip->page_size;
47 nio->block_size = nio->page_size << nio->ndrv->chip->log2_ppb;
48
49 /* allocate memory */
50 size_t alloc_size = 0;
51 alloc_size += CACHEALIGN_SIZE - 1;
52 alloc_size += nio->block_size * 2;
53
54 nio->alloc_handle = core_alloc_ex("xf_nandio", alloc_size, &buflib_ops_locked);
55 if(nio->alloc_handle < 0) {
56 rc = XF_E_OUT_OF_MEMORY;
57 goto out_nclose;
58 }
59
60 uint8_t* buffer = core_get_data(nio->alloc_handle);
61 CACHEALIGN_BUFFER(buffer, alloc_size);
62
63 nio->old_buf = buffer;
64 nio->new_buf = &buffer[nio->block_size];
65
66 rc = XF_E_SUCCESS;
67 goto out;
68
69 out_nclose:
70 nand_close(nio->ndrv);
71 out:
72 nand_unlock(nio->ndrv);
73 return rc;
74}
75
76void xf_nandio_destroy(struct xf_nandio* nio)
77{
78 nio->alloc_handle = core_free(nio->alloc_handle);
79
80 if(nio->ndrv) {
81 nand_lock(nio->ndrv);
82 nand_close(nio->ndrv);
83 nand_unlock(nio->ndrv);
84 nio->ndrv = NULL;
85 }
86}
87
88static bool is_page_blank(const uint8_t* buf, uint32_t length)
89{
90 for(uint32_t i = 0; i < length; ++i)
91 if(buf[i] != 0xff)
92 return false;
93
94 return true;
95}
96
97static int flush_block(struct xf_nandio* nio, bool invalidate)
98{
99 /* no block, or only reading - flush is a no-op */
100 if(!nio->block_valid || nio->mode == XF_NANDIO_READ)
101 return XF_E_SUCCESS;
102
103 /* nothing to do if new data is same as old data */
104 if(!memcmp(nio->old_buf, nio->new_buf, nio->block_size))
105 return XF_E_SUCCESS;
106
107 /* data mismatch during verification - report the error */
108 if(nio->mode == XF_NANDIO_VERIFY)
109 return XF_E_VERIFY_FAILED;
110
111 /* erase the block */
112 int rc = nand_block_erase(nio->ndrv, nio->cur_block);
113 if(rc != NAND_SUCCESS) {
114 nio->block_valid = false;
115 nio->nand_err = rc;
116 return XF_E_NAND;
117 }
118
119 size_t oob_size = nio->ndrv->chip->oob_size;
120
121 unsigned page = 0;
122 nand_page_t page_addr = nio->cur_block;
123 for(; page < nio->ndrv->ppb; ++page, ++page_addr) {
124 /* skip programming blank pages to go faster & reduce wear */
125 uint8_t* page_data = &nio->new_buf[page * nio->page_size];
126 if(is_page_blank(page_data, nio->page_size))
127 continue;
128
129 /* copy page and write blank OOB data */
130 memcpy(nio->ndrv->page_buf, page_data, nio->page_size);
131 memset(&nio->ndrv->page_buf[nio->page_size], 0xff, oob_size);
132
133 /* program the page */
134 rc = nand_page_program(nio->ndrv, page_addr, nio->ndrv->page_buf);
135 if(rc != NAND_SUCCESS) {
136 nio->block_valid = false;
137 nio->nand_err = rc;
138 return XF_E_NAND;
139 }
140 }
141
142 if(invalidate)
143 nio->block_valid = false;
144 else {
145 /* update our 'old' buffer so a subsequent flush
146 * will not reprogram the same block */
147 memcpy(nio->old_buf, nio->new_buf, nio->block_size);
148 }
149
150 return XF_E_SUCCESS;
151}
152
153static int seek_to_block(struct xf_nandio* nio, nand_block_t block_addr,
154 size_t offset_in_block)
155{
156 /* already on this block? */
157 if(nio->block_valid && block_addr == nio->cur_block) {
158 nio->offset_in_block = offset_in_block;
159 return XF_E_SUCCESS;
160 }
161
162 /* ensure new block is within range */
163 if(block_addr >= (nio->ndrv->chip->nr_blocks << nio->ndrv->chip->log2_ppb))
164 return XF_E_OUT_OF_RANGE;
165
166 /* flush old block */
167 int rc = flush_block(nio, true);
168 if(rc)
169 return rc;
170
171 nio->block_valid = false;
172
173 /* read the new block */
174 unsigned page = 0;
175 nand_page_t page_addr = block_addr;
176 for(; page < nio->ndrv->ppb; ++page, ++page_addr) {
177 rc = nand_page_read(nio->ndrv, page_addr, nio->ndrv->page_buf);
178 if(rc != NAND_SUCCESS) {
179 nio->nand_err = rc;
180 return XF_E_NAND;
181 }
182
183 memcpy(&nio->old_buf[page * nio->page_size], nio->ndrv->page_buf, nio->page_size);
184 }
185
186 /* copy to 2nd buffer */
187 memcpy(nio->new_buf, nio->old_buf, nio->block_size);
188
189 /* update position */
190 nio->cur_block = block_addr;
191 nio->offset_in_block = offset_in_block;
192 nio->block_valid = true;
193 return XF_E_SUCCESS;
194}
195
196int xf_nandio_set_mode(struct xf_nandio* nio, enum xf_nandio_mode mode)
197{
198 nand_lock(nio->ndrv);
199
200 /* flush the current block before switching to the new mode,
201 * to ensure consistency */
202 int rc = flush_block(nio, false);
203 if(rc)
204 goto err;
205
206 nio->mode = mode;
207 rc = XF_E_SUCCESS;
208
209 err:
210 nand_unlock(nio->ndrv);
211 return rc;
212}
213
214static int nandio_rdwr(struct xf_nandio* nio, void* buf, size_t count, bool write)
215{
216 while(count > 0) {
217 void* ptr;
218 size_t amount = count;
219 int rc = xf_nandio_get_buffer(nio, &ptr, &amount);
220 if(rc)
221 return rc;
222
223 if(write)
224 memcpy(ptr, buf, amount);
225 else
226 memcpy(buf, ptr, amount);
227
228 count -= amount;
229 }
230
231 return XF_E_SUCCESS;
232}
233
234int xf_nandio_seek(struct xf_nandio* nio, size_t offset)
235{
236 uint32_t block_nr = offset / nio->block_size;
237 size_t offset_in_block = offset % nio->block_size;
238 nand_block_t block_addr = block_nr << nio->ndrv->chip->log2_ppb;
239
240 nand_lock(nio->ndrv);
241 int rc = seek_to_block(nio, block_addr, offset_in_block);
242 nand_unlock(nio->ndrv);
243
244 return rc;
245}
246
247int xf_nandio_read(struct xf_nandio* nio, void* buf, size_t count)
248{
249 return nandio_rdwr(nio, buf, count, false);
250}
251
252int xf_nandio_write(struct xf_nandio* nio, const void* buf, size_t count)
253{
254 return nandio_rdwr(nio, (void*)buf, count, true);
255}
256
257int xf_nandio_get_buffer(struct xf_nandio* nio, void** buf, size_t* count)
258{
259 nand_lock(nio->ndrv);
260
261 /* make sure the current block data is read in */
262 int rc = seek_to_block(nio, nio->cur_block, nio->offset_in_block);
263 if(rc)
264 goto err;
265
266 size_t amount_left = nio->block_size - nio->offset_in_block;
267 if(amount_left == 0) {
268 amount_left = nio->block_size;
269 rc = seek_to_block(nio, nio->cur_block + nio->ndrv->ppb, 0);
270 if(rc)
271 goto err;
272 }
273
274 *buf = &nio->new_buf[nio->offset_in_block];
275 *count = MIN(*count, amount_left);
276
277 nio->offset_in_block += *count;
278 rc = XF_E_SUCCESS;
279
280 err:
281 nand_unlock(nio->ndrv);
282 return rc;
283}
284
285int xf_nandio_flush(struct xf_nandio* nio)
286{
287 nand_lock(nio->ndrv);
288 int rc = flush_block(nio, false);
289 nand_unlock(nio->ndrv);
290
291 return rc;
292}