summaryrefslogtreecommitdiff
path: root/firmware/target/arm/s5l8700/ipodnano2g
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm/s5l8700/ipodnano2g')
-rw-r--r--firmware/target/arm/s5l8700/ipodnano2g/ftl-nano2g.c1789
-rw-r--r--firmware/target/arm/s5l8700/ipodnano2g/ftl-target.h34
-rw-r--r--firmware/target/arm/s5l8700/ipodnano2g/nand-nano2g.c399
-rw-r--r--firmware/target/arm/s5l8700/ipodnano2g/nand-target.h54
4 files changed, 2276 insertions, 0 deletions
diff --git a/firmware/target/arm/s5l8700/ipodnano2g/ftl-nano2g.c b/firmware/target/arm/s5l8700/ipodnano2g/ftl-nano2g.c
new file mode 100644
index 0000000000..bbef9d2920
--- /dev/null
+++ b/firmware/target/arm/s5l8700/ipodnano2g/ftl-nano2g.c
@@ -0,0 +1,1789 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2009 by Michael Sparmann
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22
23
24#include <config.h>
25#include <cpu.h>
26#include <nand-target.h>
27#include <ftl-target.h>
28#include <string.h>
29
30
31
32/* Keeps the state of a scattered page block.
33 This structure is used in memory only, not on flash,
34 but it equals the one the OFW uses. */
35struct ftl_log_type
36{
37
38 /* The ftl_cxt.nextblockusn at the time the block was allocated,
39 needed in order to be able to remove the oldest ones first. */
40 uint32_t usn;
41
42 /* The vBlock number at which the scattered pages are stored */
43 uint16_t scatteredvblock;
44
45 /* the lBlock number for which those pages are */
46 uint16_t logicalvblock;
47
48 /* Pointer to ftl_offsets, contains the mapping which lPage is
49 currently stored at which scattered vPage. */
50 uint16_t* pageoffsets;
51
52 /* Pages used in the vBlock, i.e. next page number to be written */
53 uint16_t pagesused;
54
55 /* Pages that are still up to date in this block, i.e. need to be
56 moved when this vBlock is deallocated. */
57 uint16_t pagescurrent;
58
59 /* A flag whether all pages are still sequential in this block.
60 Initialized to 1 on allocation, zeroed as soon as anything is
61 written out of sequence, so that the block will need copying
62 when committing to get the pages back into the right order.
63 This is used to half the number of block erases needed when
64 writing huge amounts of sequential data. */
65 uint32_t issequential;
66
67} __attribute__((packed));
68
69
70/* Keeps the state of the FTL, both on flash and in memory */
71struct ftl_cxt_type
72{
73
74 /* Update sequence number of the FTL context, decremented
75 every time a new revision of FTL meta data is written. */
76 uint32_t usn;
77
78 /* Update sequence number for user data blocks. Incremented
79 every time a portion of user pages is written, so that
80 a consistency check can determine which copy of a user
81 page is the most recent one. */
82 uint32_t nextblockusn;
83
84 /* Count of currently free pages in the block pool */
85 uint16_t freecount;
86
87 /* Index to the first free block in the blockpool ring buffer */
88 uint16_t nextfreeidx;
89
90 /* This is a counter that is used to better distribute block
91 wear. It is incremented on every block erase, and if it
92 gets too high (300 on writes, 20 on sync), the most and
93 least worn block will be swapped (inferring an additional
94 block write) and the counter will be decreased by 20. */
95 uint16_t swapcounter;
96
97 /* Ring buffer of currently free blocks. nextfreeidx is the
98 index to freecount free ones, the other ones are currently
99 allocated for scattered page blocks. */
100 uint16_t blockpool[0x14];
101
102 /* Alignment to 32 bits */
103 uint16_t field_36;
104
105 /* vPages where the block map is stored */
106 uint32_t ftl_map_pages[8];
107
108 /* Probably additional map page number space for bigger chips */
109 uint8_t field_58[0x28];
110
111 /* vPages where the erase counters are stored */
112 uint32_t ftl_erasectr_pages[8];
113
114 /* Seems to be padding */
115 uint8_t field_A0[0x70];
116
117 /* Pointer to ftl_map used by Whimory, not used by us */
118 uint32_t ftl_map_ptr;
119
120 /* Pointer to ftl_erasectr used by Whimory, not used by us */
121 uint32_t ftl_erasectr_ptr;
122
123 /* Pointer to ftl_log used by Whimory, not used by us */
124 uint32_t ftl_log_ptr;
125
126 /* Flag used to indicate that some erase counter pages should be committed
127 as they were changed more than 100 times since the last commit. */
128 uint32_t erasedirty;
129
130 /* Seems to be unused */
131 uint16_t field_120;
132
133 /* vBlocks used to store the FTL context, map, and erase
134 counter pages. This is also a ring buffer, and the oldest
135 page gets swapped with the least used page from the block
136 pool ring buffer when a new one is allocated. */
137 uint16_t ftlctrlblocks[3];
138
139 /* The last used vPage number from ftlctrlblocks */
140 uint32_t ftlctrlpage;
141
142 /* Set on context sync, reset on write, so obviously never
143 zero in the context written to the flash */
144 uint32_t clean_flag;
145
146 /* Seems to be unused, but gets loaded from flash by Whimory. */
147 uint8_t field_130[0x15C];
148
149} __attribute__((packed)) FTLCxtType;
150
151
152/* Keeps the state of the bank's VFL, both on flash and in memory.
153 There is one of these per bank. */
154typedef struct ftl_vfl_cxt_type
155{
156
157 /* Cross-bank update sequence number, incremented on every VFL
158 context commit on any bank. */
159 uint32_t usn;
160
161 /* See ftl_cxt.ftlctrlblocks. This is stored to the VFL contexts
162 in order to be able to find the most recent FTL context copy
163 when mounting the FTL. The VFL context number this will be
164 written to on an FTL context commit is chosen semi-randomly. */
165 uint16_t ftlctrlblocks[3];
166
167 /* Alignment to 32 bits */
168 uint8_t field_A[2];
169
170 /* Decrementing update counter for VFL context commits per bank */
171 uint32_t updatecount;
172
173 /* Number of the currently active VFL context block, it's an index
174 into vflcxtblocks. */
175 uint16_t activecxtblock;
176
177 /* Number of the first free page in the active FTL context block */
178 uint16_t nextcxtpage;
179
180 /* Seems to be unused */
181 uint8_t field_14[4];
182
183 /* Incremented every time a block erase error leads to a remap,
184 but doesn't seem to be read anywhere. */
185 uint16_t field_18;
186
187 /* Number of spare blocks used */
188 uint16_t spareused;
189
190 /* pBlock number of the first spare block */
191 uint16_t firstspare;
192
193 /* Total number of spare blocks */
194 uint16_t sparecount;
195
196 /* Block remap table. Contains the vBlock number the n-th spare
197 block is used as a replacement for. 0 = unused, 0xFFFF = bad. */
198 uint16_t remaptable[0x334];
199
200 /* Bad block table. Each bit represents 8 blocks. 1 = OK, 0 = Bad.
201 If the entry is zero, you should look at the remap table to see
202 if the block is remapped, and if yes, where the replacement is. */
203 uint8_t bbt[0x11A];
204
205 /* pBlock numbers used to store the VFL context. This is a ring
206 buffer. On a VFL context write, always 8 pages are written,
207 and it passes if at least 4 of them can be read back. */
208 uint16_t vflcxtblocks[4];
209
210 /* Blocks scheduled for remapping are stored at the end of the
211 remap table. This is the first index used for them. */
212 uint16_t scheduledstart;
213
214 /* Probably padding */
215 uint8_t field_7AC[0x4C];
216
217 /* First checksum (addition) */
218 uint32_t checksum1;
219
220 /* Second checksum (XOR), there is a bug in whimory regarding this. */
221 uint32_t checksum2;
222
223} __attribute__((packed)) FTLVFLCxtType;
224
225
226/* Layout of the spare bytes of each page on the flash */
227union ftl_spare_data_type
228{
229
230 /* The layout used for actual user data (types 0x40 and 0x41) */
231 struct ftl_spare_data_user_type
232 {
233
234 /* The lPage, i.e. Sector, number */
235 uint32_t lpn;
236
237 /* The update sequence number of that page,
238 copied from ftl_cxt.nextblockusn on write */
239 uint32_t usn;
240
241 /* Seems to be unused */
242 uint8_t field_8;
243
244 /* Type field, 0x40 (data page) or 0x41 (last data page of block) */
245 uint8_t type;
246
247 /* ECC mark, usually 0xFF. If an error occurred while reading the
248 page during a copying operation earlier, this will be 0x55. */
249 uint8_t eccmark;
250
251 /* Seems to be unused */
252 uint8_t field_B;
253
254 /* ECC data for the user data */
255 uint8_t dataecc[0x28];
256
257 /* ECC data for the first 0xC bytes above */
258 uint8_t spareecc[0xC];
259
260 } __attribute__((packed)) user;
261
262 /* The layout used for meta data (other types) */
263 struct ftl_spare_data_meta_type
264 {
265
266 /* ftl_cxt.usn for FTL stuff, ftl_vfl_cxt.updatecount for VFL stuff */
267 uint32_t usn;
268
269 /* Index of the thing inside the page,
270 for example number / index of the map or erase counter page */
271 uint16_t idx;
272
273 /* Seems to be unused */
274 uint8_t field_6;
275
276 /* Seems to be unused */
277 uint8_t field_7;
278
279 /* Seems to be unused */
280 uint8_t field_8;
281
282 /* Type field:
283 0x43: FTL context page
284 0x44: Block map page
285 0x46: Erase counter page
286 0x47: "FTL is currently mounted", i.e. unclean shutdown, mark
287 0x80: VFL context page */
288 uint8_t type;
289
290 /* ECC mark, usually 0xFF. If an error occurred while reading the
291 page during a copying operation earlier, this will be 0x55. */
292 uint8_t eccmark;
293
294 /* Seems to be unused */
295 uint8_t field_B;
296
297 /* ECC data for the user data */
298 uint8_t dataecc[0x28];
299
300 /* ECC data for the first 0xC bytes above */
301 uint8_t spareecc[0xC];
302
303 } __attribute__((packed)) meta;
304
305};
306
307
308// Keeps track of troublesome blocks, only in memory, lost on unmount. */
309struct ftl_trouble_type
310{
311
312 /* vBlock number of the block giving trouble */
313 uint16_t block;
314
315 /* Bank of the block giving trouble */
316 uint8_t bank;
317
318 /* Error counter, incremented by 3 on error, decremented by 1 on erase,
319 remaping will be done when it reaches 6. */
320 uint8_t errors;
321
322} __attribute__((packed));
323
324
325
326/* Pointer to an info structure regarding the flash type used */
327const struct nand_device_info_type* ftl_nand_type;
328
329/* Number of banks we detected a chip on */
330uint32_t ftl_banks;
331
332/* Block map, used vor pBlock to vBlock mapping */
333uint16_t ftl_map[0x2000];
334
335/* VFL context for each bank */
336struct ftl_vfl_cxt_type ftl_vfl_cxt[4];
337
338/* FTL context */
339struct ftl_cxt_type ftl_cxt;
340
341/* Temporary data buffer for internal use by the FTL */
342uint8_t ftl_buffer[0x800];
343
344/* Temporary spare byte buffer for internal use by the FTL */
345union ftl_spare_data_type ftl_sparebuffer;
346
347
348#ifndef FTL_READONLY
349
350/* Lowlevel BBT for each bank */
351uint8_t ftl_bbt[4][0x410];
352
353/* Erase countes for the vBlocks */
354uint16_t ftl_erasectr[0x2000];
355
356/* Used by ftl_log */
357uint16_t ftl_offsets[0x11][0x200];
358
359/* Structs keeping record of scattered page blocks */
360struct ftl_log_type ftl_log[0x11];
361
362/* Global cross-bank update sequence number of the VFL context */
363uint32_t ftl_vfl_usn;
364
365/* Keeps track (temporarily) of troublesome blocks */
366struct ftl_trouble_type ftl_troublelog[5];
367
368/* Counts erase counter page changes, after 100 of them the affected
369 page will be committed to the flash. */
370uint8_t ftl_erasectr_dirt[8];
371
372#endif
373
374
375
376/* Finds a device info page for the specified bank and returns its number.
377 Used to check if one is present, and to read the lowlevel BBT. */
378uint32_t ftl_find_devinfo(uint32_t bank)
379{
380 /* Scan the last 10% of the flash for device info pages */
381 uint32_t lowestBlock = (*ftl_nand_type).blocks
382 - ((*ftl_nand_type).blocks / 10);
383 uint32_t block, page, pagenum;
384 for (block = (*ftl_nand_type).blocks - 1; block >= lowestBlock; block--)
385 {
386 page = (*ftl_nand_type).pagesperblock - 8;
387 for (; page < (*ftl_nand_type).pagesperblock; page++)
388 {
389 pagenum = block * (*ftl_nand_type).pagesperblock + page;
390 if ((nand_read_page(bank, pagenum, ftl_buffer,
391 &ftl_sparebuffer, 1, 0) & 0x11F) != 0)
392 continue;
393 if (memcmp(ftl_buffer, "DEVICEINFOSIGN\0", 0x10) == 0)
394 return pagenum;
395 }
396 }
397 return 0;
398}
399
400
401/* Checks if all banks have proper device info pages */
402uint32_t ftl_has_devinfo(void)
403{
404 uint32_t i;
405 for (i = 0; i < ftl_banks; i++) if (ftl_find_devinfo(i) == 0) return 0;
406 return 1;
407}
408
409
410/* Loads the lowlevel BBT for a bank to the specified buffer.
411 This is based on some cryptic disassembly and not fully understood yet. */
412uint32_t ftl_load_bbt(uint32_t bank, uint8_t* bbt)
413{
414 uint32_t i, j;
415 uint32_t pagebase, page = ftl_find_devinfo(bank), page2;
416 uint32_t unk1, unk2, unk3;
417 if (page == 0) return 1;
418 pagebase = page & ~((*ftl_nand_type).pagesperblock - 1);
419 if ((nand_read_page(bank, page, ftl_buffer,
420 (uint32_t*)0, 1, 0) & 0x11F) != 0) return 1;
421 if (memcmp(&ftl_buffer[0x18], "BBT", 4) != 0) return 1;
422 unk1 = ((uint16_t*)ftl_buffer)[0x10];
423 unk2 = ((uint16_t*)ftl_buffer)[0x11];
424 unk3 = ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 0xC + 10]
425 + ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 0xC + 11];
426 for (i = 0; i < unk1; i++)
427 {
428 for (j = 0; ; j++)
429 {
430 page2 = unk2 + i + unk3 * j;
431 if (page2 >= (uint32_t)((*ftl_nand_type).pagesperblock - 8))
432 break;
433 if ((nand_read_page(bank, pagebase + page2, ftl_buffer,
434 (void*)0, 1, 0) & 0x11F) == 0)
435 {
436 memcpy(bbt, ftl_buffer, 0x410);
437 return 0;
438 }
439 }
440 }
441 return 1;
442}
443
444
445/* Calculates the checksums for the VFL context page of the specified bank */
446void ftl_vfl_calculate_checksum(uint32_t bank,
447 uint32_t* checksum1, uint32_t* checksum2)
448{
449 uint32_t i;
450 *checksum1 = 0xAABBCCDD;
451 *checksum2 = 0xAABBCCDD;
452 for (i = 0; i < 0x1FE; i++)
453 {
454 *checksum1 += ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
455 *checksum2 ^= ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
456 }
457}
458
459
460/* Checks if the checksums of the VFL context
461 of the specified bank are correct */
462uint32_t ftl_vfl_verify_checksum(uint32_t bank)
463{
464 uint32_t checksum1, checksum2;
465 ftl_vfl_calculate_checksum(bank, &checksum1, &checksum2);
466 if (checksum1 == ftl_vfl_cxt[bank].checksum1) return 0;
467 /* The following line is pretty obviously a bug in Whimory,
468 but we do it the same way for compatibility. */
469 if (checksum2 != ftl_vfl_cxt[bank].checksum2) return 0;
470 return 1;
471}
472
473
474#ifndef FTL_READONLY
475/* Updates the checksums of the VFL context of the specified bank */
476void ftl_vfl_update_checksum(uint32_t bank)
477{
478 ftl_vfl_calculate_checksum(bank, &ftl_vfl_cxt[bank].checksum1,
479 &ftl_vfl_cxt[bank].checksum2);
480}
481#endif
482
483
484#ifndef FTL_READONLY
485/* Writes 8 copies of the VFL context of the specified bank to flash,
486 and succeeds if at least 4 can be read back properly. */
487uint32_t ftl_vfl_store_cxt(uint32_t bank)
488{
489 uint32_t i;
490 ftl_vfl_cxt[bank].updatecount--;
491 ftl_vfl_cxt[bank].usn = ++ftl_vfl_usn;
492 ftl_vfl_cxt[bank].nextcxtpage += 8;
493 ftl_vfl_update_checksum(bank);
494 memset(&ftl_sparebuffer, 0xFF, 0x40);
495 ftl_sparebuffer.meta.usn = ftl_vfl_cxt[bank].updatecount;
496 ftl_sparebuffer.meta.field_8 = 0;
497 ftl_sparebuffer.meta.type = 0x80;
498 for (i = 1; i <= 8; i++)
499 {
500 uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
501 uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
502 uint32_t page = block * (*ftl_nand_type).pagesperblock;
503 page += ftl_vfl_cxt[bank].nextcxtpage - i;
504 nand_write_page(bank, page, &ftl_vfl_cxt[bank], &ftl_sparebuffer, 1);
505 }
506 uint32_t good = 0;
507 for (i = 0; i < 8; i++)
508 {
509 uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
510 uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
511 uint32_t page = block * (*ftl_nand_type).pagesperblock;
512 page += ftl_vfl_cxt[bank].nextcxtpage - i;
513 if ((nand_read_page(bank, page, ftl_buffer,
514 &ftl_sparebuffer, 1, 0) & 0x11F) != 0)
515 continue;
516 if (memcmp(ftl_buffer, &ftl_vfl_cxt[bank], 0x7AC) != 0)
517 continue;
518 if (ftl_sparebuffer.meta.usn != ftl_vfl_cxt[bank].updatecount)
519 continue;
520 if (ftl_sparebuffer.meta.field_8 == 0
521 && ftl_sparebuffer.meta.type == 0x80) good++;
522 }
523 return good > 3 ? 0 : 1;
524}
525#endif
526
527
528#ifndef FTL_READONLY
529/* Commits the VFL context of the specified bank to flash,
530 retries until it works or all available pages have been tried */
531uint32_t ftl_vfl_commit_cxt(uint32_t bank)
532{
533 if (ftl_vfl_cxt[bank].nextcxtpage + 8 <= (*ftl_nand_type).pagesperblock)
534 if (ftl_vfl_store_cxt(bank) == 0) return 0;
535 uint32_t current = ftl_vfl_cxt[bank].activecxtblock;
536 uint32_t i = current, j;
537 while (1)
538 {
539 i = (i + 1) & 3;
540 if (i == current) break;
541 if (ftl_vfl_cxt[bank].vflcxtblocks[i] == 0xFFFF) continue;
542 for (j = 0; j < 4; j++)
543 if (nand_block_erase(bank, ftl_vfl_cxt[bank].vflcxtblocks[i]
544 * (*ftl_nand_type).pagesperblock) == 0)
545 break;
546 if (j == 4) continue;
547 ftl_vfl_cxt[bank].activecxtblock = i;
548 ftl_vfl_cxt[bank].nextcxtpage = 0;
549 if (ftl_vfl_store_cxt(bank) == 0) return 0;
550 }
551 return 1;
552}
553#endif
554
555
556/* Returns a pointer to the most recently updated VFL context,
557 used to find out the current FTL context vBlock numbers
558 (planetbeing's "maxthing") */
559struct ftl_vfl_cxt_type* ftl_vfl_get_newest_cxt(void)
560{
561 uint32_t i, maxusn;
562 struct ftl_vfl_cxt_type* cxt = (struct ftl_vfl_cxt_type*)0;
563 maxusn = 0;
564 for (i = 0; i < ftl_banks; i++)
565 if (ftl_vfl_cxt[i].usn >= maxusn)
566 {
567 cxt = &ftl_vfl_cxt[i];
568 maxusn = ftl_vfl_cxt[i].usn;
569 }
570 return cxt;
571}
572
573
574/* Checks if the specified pBlock is marked bad in the supplied lowlevel BBT.
575 Only used while mounting the VFL. */
576uint32_t ftl_is_good_block(uint8_t* bbt, uint32_t block)
577{
578 if ((bbt[block >> 3] & (1 << (block & 7))) == 0) return 0;
579 else return 1;
580}
581
582
583/* Checks if the specified vBlock could be remapped */
584uint32_t ftl_vfl_is_good_block(uint32_t bank, uint32_t block)
585{
586 uint8_t bbtentry = ftl_vfl_cxt[bank].bbt[block >> 6];
587 if ((bbtentry & (1 << ((7 - (block >> 3)) & 7))) == 0) return 0;
588 else return 1;
589}
590
591
592#ifndef FTL_READONLY
593/* Sets or unsets the bad bit of the specified vBlock
594 in the specified bank's VFL context */
595void ftl_vfl_set_good_block(uint32_t bank, uint32_t block, uint32_t isgood)
596{
597 uint8_t bit = (1 << ((7 - (block >> 3)) & 7));
598 if (isgood == 1) ftl_vfl_cxt[bank].bbt[block >> 6] |= bit;
599 else ftl_vfl_cxt[bank].bbt[block >> 6] &= ~bit;
600}
601#endif
602
603
604/* Tries to read a VFL context from the specified bank, pBlock and page */
605uint32_t ftl_vfl_read_page(uint32_t bank, uint32_t block,
606 uint32_t startpage, void* databuffer,
607 union ftl_spare_data_type* sparebuffer)
608{
609 uint32_t i;
610 for (i = 0; i < 8; i++)
611 {
612 uint32_t page = block * (*ftl_nand_type).pagesperblock
613 + startpage + i;
614 if ((nand_read_page(bank, page, databuffer,
615 sparebuffer, 1, 1) & 0x11F) == 0)
616 if ((*sparebuffer).meta.field_8 == 0
617 && (*sparebuffer).meta.type == 0x80)
618 return 0;
619 }
620 return 1;
621}
622
623
624/* Translates a bank and vBlock to a pBlock, following remaps */
625uint32_t ftl_vfl_get_physical_block(uint32_t bank, uint32_t block)
626{
627 if (ftl_vfl_is_good_block(bank, block) == 1) return block;
628
629 uint32_t spareindex;
630 uint32_t spareused = ftl_vfl_cxt[bank].spareused;
631 for (spareindex = 0; spareindex < spareused; spareindex++)
632 if (ftl_vfl_cxt[bank].remaptable[spareindex] == block)
633 return ftl_vfl_cxt[bank].firstspare + spareindex;
634 return block;
635}
636
637
638#ifndef FTL_READONLY
639/* Checks if remapping is scheduled for the specified bank and vBlock */
640uint32_t ftl_vfl_check_remap_scheduled(uint32_t bank, uint32_t block)
641{
642 uint32_t i;
643 for (i = 0x333; i > 0 && i > ftl_vfl_cxt[bank].scheduledstart; i--)
644 if (ftl_vfl_cxt[bank].remaptable[i] == block) return 1;
645 return 0;
646}
647#endif
648
649
650#ifndef FTL_READONLY
651/* Schedules remapping for the specified bank and vBlock */
652void ftl_vfl_schedule_block_for_remap(uint32_t bank, uint32_t block)
653{
654 if (ftl_vfl_check_remap_scheduled(bank, block) == 1)
655 return;
656 if (ftl_vfl_cxt[bank].scheduledstart == ftl_vfl_cxt[bank].spareused)
657 return;
658 ftl_vfl_cxt[bank].remaptable[--ftl_vfl_cxt[bank].scheduledstart] = block;
659 ftl_vfl_commit_cxt(bank);
660}
661#endif
662
663
664#ifndef FTL_READONLY
665/* Removes the specified bank and vBlock combination
666 from the remap scheduled list */
667void ftl_vfl_mark_remap_done(uint32_t bank, uint32_t block)
668{
669 uint32_t i;
670 uint32_t start = ftl_vfl_cxt[bank].scheduledstart;
671 uint32_t lastscheduled = ftl_vfl_cxt[bank].remaptable[start];
672 for (i = 0x333; i > 0 && i > start; i--)
673 if (ftl_vfl_cxt[bank].remaptable[i] == block)
674 {
675 if (i != start && i != 0x333)
676 ftl_vfl_cxt[bank].remaptable[i] = lastscheduled;
677 ftl_vfl_cxt[bank].scheduledstart++;
678 return;
679 }
680}
681#endif
682
683
684#ifndef FTL_READONLY
685/* Logs that there is trouble for the specified vBlock on the specified bank.
686 The vBlock will be scheduled for remap
687 if there is too much trouble with it. */
688void ftl_vfl_log_trouble(uint32_t bank, uint32_t vblock)
689{
690 uint32_t i;
691 for (i = 0; i < 5; i++)
692 if (ftl_troublelog[i].block == vblock
693 && ftl_troublelog[i].bank == bank)
694 {
695 ftl_troublelog[i].errors += 3;
696 if (ftl_troublelog[i].errors > 5)
697 {
698 ftl_vfl_schedule_block_for_remap(bank, vblock);
699 ftl_troublelog[i].block = 0xFFFF;
700 }
701 return;
702 }
703 for (i = 0; i < 5; i++)
704 if (ftl_troublelog[i].block == 0xFFFF)
705 {
706 ftl_troublelog[i].block = vblock;
707 ftl_troublelog[i].bank = bank;
708 ftl_troublelog[i].errors = 3;
709 return;
710 }
711}
712#endif
713
714
715#ifndef FTL_READONLY
716/* Logs a successful erase for the specified vBlock on the specified bank */
717void ftl_vfl_log_success(uint32_t bank, uint32_t vblock)
718{
719 uint32_t i;
720 for (i = 0; i < 5; i++)
721 if (ftl_troublelog[i].block == vblock
722 && ftl_troublelog[i].bank == bank)
723 {
724 if (--ftl_troublelog[i].errors == 0)
725 ftl_troublelog[i].block = 0xFFFF;
726 return;
727 }
728}
729#endif
730
731
732#ifndef FTL_READONLY
733/* Tries to remap the specified vBlock on the specified bank,
734 not caring about data in there.
735 If it worked, it will return the new pBlock number,
736 if not (no more spare blocks available), it will return zero. */
737uint32_t ftl_vfl_remap_block(uint32_t bank, uint32_t block)
738{
739 uint32_t i;
740 uint32_t newblock = 0, newidx;
741 if (bank >= ftl_banks || block >= (*ftl_nand_type).blocks) return 0;
742 for (i = 0; i < ftl_vfl_cxt[bank].sparecount; i++)
743 if (ftl_vfl_cxt[bank].remaptable[i] == 0)
744 {
745 newblock = ftl_vfl_cxt[bank].firstspare + i;
746 newidx = i;
747 break;
748 }
749 if (newblock == 0) return 0;
750 for (i = 0; i < 9; i++)
751 if (nand_block_erase(bank,
752 newblock * (*ftl_nand_type).pagesperblock) == 0)
753 break;
754 for (i = 0; i < newidx; i++)
755 if (ftl_vfl_cxt[bank].remaptable[i] == block)
756 ftl_vfl_cxt[bank].remaptable[i] = 0xFFFF;
757 ftl_vfl_cxt[bank].remaptable[newidx] = block;
758 ftl_vfl_cxt[bank].spareused++;
759 ftl_vfl_set_good_block(bank, block, 0);
760 return newblock;
761}
762#endif
763
764
765/* Reads the specified vPage, dealing with all kinds of trouble */
766uint32_t ftl_vfl_read(uint32_t vpage, void* buffer, void* sparebuffer,
767 uint32_t checkempty, uint32_t remaponfail)
768{
769 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
770 uint32_t syshyperblocks = (*ftl_nand_type).blocks
771 - (*ftl_nand_type).userblocks - 0x17;
772 uint32_t abspage = vpage + ppb * syshyperblocks;
773 if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
774 return 4;
775
776 uint32_t bank = abspage % ftl_banks;
777 uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
778 uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
779 uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
780 uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
781
782 uint32_t ret = nand_read_page(bank, physpage, buffer,
783 sparebuffer, 1, checkempty);
784
785 if ((ret & 0x11D) != 0 && (ret & 2) == 0)
786 {
787 nand_reset(bank);
788 ret = nand_read_page(bank, physpage, buffer,
789 sparebuffer, 1, checkempty);
790#ifndef FTL_READONLY
791 if (remaponfail == 1 &&(ret & 0x11D) != 0 && (ret & 2) == 0)
792 ftl_vfl_schedule_block_for_remap(bank, block);
793#endif
794 return ret;
795 }
796
797 return ret;
798}
799
800
801#ifndef FTL_READONLY
802/* Writes the specified vPage, dealing with all kinds of trouble */
803uint32_t ftl_vfl_write(uint32_t vpage, void* buffer, void* sparebuffer)
804{
805 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
806 uint32_t syshyperblocks = (*ftl_nand_type).blocks
807 - (*ftl_nand_type).userblocks - 0x17;
808 uint32_t abspage = vpage + ppb * syshyperblocks;
809 if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
810 return 1;
811
812 uint32_t bank = abspage % ftl_banks;
813 uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
814 uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
815 uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
816 uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
817
818 if (nand_write_page(bank, physpage, buffer, sparebuffer, 1) == 0)
819 return 0;
820
821 if ((nand_read_page(bank, physpage, ftl_buffer,
822 &ftl_sparebuffer, 1, 1) & 0x11F) == 0)
823 return 0;
824
825 ftl_vfl_log_trouble(bank, block);
826 return 1;
827}
828#endif
829
830
831/* Mounts the VFL on all banks */
832uint32_t ftl_vfl_open(void)
833{
834 uint32_t i, j, k;
835 uint32_t minusn, vflcxtidx, last;
836 FTLVFLCxtType* cxt;
837 uint16_t vflcxtblock[4];
838#ifndef FTL_READONLY
839 ftl_vfl_usn = 0;
840#else
841 /* Temporary BBT buffer if we're readonly,
842 as we won't need it again after mounting */
843 uint8_t bbt[0x410];
844#endif
845
846 uint32_t syshyperblocks = (*ftl_nand_type).blocks
847 - (*ftl_nand_type).userblocks - 0x18;
848
849 for (i = 0; i < ftl_banks; i++)
850#ifndef FTL_READONLY
851 if (ftl_load_bbt(i, ftl_bbt[i]) == 0)
852#else
853 if (ftl_load_bbt(i, bbt) == 0)
854#endif
855 {
856 for (j = 1; j <= syshyperblocks; j++)
857#ifndef FTL_READONLY
858 if (ftl_is_good_block(ftl_bbt[i], j) != 0)
859#else
860 if (ftl_is_good_block(bbt, j) != 0)
861#endif
862 if (ftl_vfl_read_page(i, j, 0, ftl_buffer,
863 &ftl_sparebuffer) == 0)
864 {
865 struct ftl_vfl_cxt_type* cxt;
866 cxt = (struct ftl_vfl_cxt_type*)ftl_buffer;
867 memcpy(vflcxtblock, &(*cxt).vflcxtblocks, 8);
868 minusn = 0xFFFFFFFF;
869 vflcxtidx = 4;
870 for (k = 0; k < 4; k++)
871 if (vflcxtblock[k] != 0xFFFF)
872 if (ftl_vfl_read_page(i, vflcxtblock[k], 0,
873 ftl_buffer,
874 &ftl_sparebuffer) == 0)
875 if (ftl_sparebuffer.meta.usn > 0
876 && ftl_sparebuffer.meta.usn <= minusn)
877 {
878 minusn = ftl_sparebuffer.meta.usn;
879 vflcxtidx = k;
880 }
881 if (vflcxtidx == 4) return 1;
882 last = 0;
883 uint32_t max = (*ftl_nand_type).pagesperblock;
884 for (k = 8; k < max; k += 8)
885 {
886 if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
887 k, ftl_buffer,
888 &ftl_sparebuffer) != 0)
889 break;
890 last = k;
891 }
892 if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
893 last, ftl_buffer,
894 &ftl_sparebuffer) != 0)
895 return 1;
896 memcpy(&ftl_vfl_cxt[i], ftl_buffer, 0x800);
897 if (ftl_vfl_verify_checksum(i) != 0) return 1;
898#ifndef FTL_READONLY
899 if (ftl_vfl_usn < ftl_vfl_cxt[i].usn)
900 ftl_vfl_usn = ftl_vfl_cxt[i].usn;
901#endif
902 break;
903 }
904 }
905 else return 1;
906 cxt = ftl_vfl_get_newest_cxt();
907 for (i = 0; i < ftl_banks; i++)
908 memcpy(ftl_vfl_cxt[i].ftlctrlblocks, (*cxt).ftlctrlblocks, 6);
909 return 0;
910}
911
912
913/* Mounts the actual FTL */
914uint32_t ftl_open(void)
915{
916 uint32_t i;
917 uint32_t ret;
918 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
919 struct ftl_vfl_cxt_type* cxt = ftl_vfl_get_newest_cxt();
920
921 uint32_t ftlcxtblock = 0xffffffff;
922 uint32_t minlpn = 0xffffffff;
923 for (i = 0; i < 3; i++)
924 {
925 ret = ftl_vfl_read(ppb * (*cxt).ftlctrlblocks[i],
926 ftl_buffer, &ftl_sparebuffer, 1, 0);
927 if ((ret &= 0x11F) != 0) continue;
928 if (ftl_sparebuffer.user.type - 0x43 > 4) continue;
929 if (ftlcxtblock != 0xffffffff && ftl_sparebuffer.user.lpn >= minlpn)
930 continue;
931 minlpn = ftl_sparebuffer.user.lpn;
932 ftlcxtblock = (*cxt).ftlctrlblocks[i];
933 }
934
935 if (ftlcxtblock == 0xffffffff) return 1;
936
937 uint32_t ftlcxtfound = 0;
938 for (i = (*ftl_nand_type).pagesperblock * ftl_banks - 1; i > 0; i--)
939 {
940 ret = ftl_vfl_read(ppb * ftlcxtblock + i,
941 ftl_buffer, &ftl_sparebuffer, 1, 0);
942 if ((ret & 0x11F) != 0) continue;
943 else if (ftl_sparebuffer.user.type == 0x43)
944 {
945 memcpy(&ftl_cxt, ftl_buffer, 0x28C);
946 ftlcxtfound = 1;
947 break;
948 }
949 else
950 {
951 /* This will trip if there was an unclean unmount before. */
952 break;
953 }
954 }
955
956 if (ftlcxtfound == 0) return 1;
957
958 uint32_t pagestoread = (*ftl_nand_type).userblocks >> 10;
959 if (((*ftl_nand_type).userblocks & 0x1FF) != 0) pagestoread++;
960
961 for (i = 0; i < pagestoread; i++)
962 {
963 if ((ftl_vfl_read(ftl_cxt.ftl_map_pages[i],
964 ftl_buffer, &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
965 return 1;
966
967 uint32_t toread = 2048;
968 if (toread > ((*ftl_nand_type).userblocks << 1) - (i << 11))
969 toread = ((*ftl_nand_type).userblocks << 1) - (i << 11);
970
971 memcpy(&ftl_map[i << 10], ftl_buffer, toread);
972 }
973
974#ifndef FTL_READONLY
975 pagestoread = ((*ftl_nand_type).userblocks + 23) >> 10;
976 if ((((*ftl_nand_type).userblocks + 23) & 0x1FF) != 0) pagestoread++;
977
978 for (i = 0; i < pagestoread; i++)
979 {
980 if ((ftl_vfl_read(ftl_cxt.ftl_erasectr_pages[i],
981 ftl_buffer, &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
982 return 1;
983
984 uint32_t toread = 2048;
985 if (toread > (((*ftl_nand_type).userblocks + 23) << 1) - (i << 11))
986 toread = (((*ftl_nand_type).userblocks + 23) << 1) - (i << 11);
987
988 memcpy(&ftl_erasectr[i << 10], ftl_buffer, toread);
989 }
990
991 for (i = 0; i < 0x11; i++)
992 {
993 ftl_log[i].scatteredvblock = 0xFFFF;
994 ftl_log[i].logicalvblock = 0xFFFF;
995 ftl_log[i].pageoffsets = ftl_offsets[i];
996 }
997
998 memset(ftl_troublelog, 0xFF, 20);
999 memset(ftl_erasectr_dirt, 0, 8);
1000#endif
1001
1002 return 0;
1003}
1004
1005
1006#ifndef FTL_READONLY
1007/* Returns a pointer to the ftl_log entry for the specified vBlock,
1008 or null, if there is none */
1009struct ftl_log_type* ftl_get_log_entry(uint32_t block)
1010{
1011 uint32_t i;
1012 for (i = 0; i < 0x11; i++)
1013 {
1014 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1015 if (ftl_log[i].logicalvblock == block) return &ftl_log[i];
1016 }
1017 return (struct ftl_log_type*)0;
1018}
1019#endif
1020
1021/* Exposed function: Read highlevel sectors */
1022uint32_t ftl_read(uint32_t sector, uint32_t count, void* buffer)
1023{
1024 uint32_t i;
1025 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1026 uint32_t error = 0;
1027
1028 if (sector + count > (*ftl_nand_type).userblocks * ppb)
1029 return 1;
1030
1031 if (count == 0) return 0;
1032
1033 for (i = 0; i < count; i++)
1034 {
1035 uint32_t block = (sector + i) / ppb;
1036 uint32_t page = (sector + i) % ppb;
1037
1038 uint32_t abspage = ftl_map[block] * ppb + page;
1039#ifndef FTL_READONLY
1040 struct ftl_log_type* logentry = ftl_get_log_entry(block);
1041 if (logentry != (struct ftl_log_type*)0)
1042 if ((*logentry).scatteredvblock != 0xFFFF
1043 && (*logentry).pageoffsets[page] != 0xFFFF)
1044 abspage = (*logentry).scatteredvblock * ppb
1045 + (*logentry).pageoffsets[page];
1046#endif
1047
1048 uint32_t ret = ftl_vfl_read(abspage, &((uint8_t*)buffer)[i << 11],
1049 &ftl_sparebuffer, 1, 1);
1050 if ((ret & 2) != 0) memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1051 if ((ret & 0x11F) != 0 || ftl_sparebuffer.user.eccmark != 0xFF)
1052 {
1053 error = 1;
1054 memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1055 }
1056 }
1057 return error;
1058}
1059
1060
1061#ifndef FTL_READONLY
1062/* Performs a vBlock erase, dealing with hardware,
1063 remapping and all kinds of trouble */
1064uint32_t ftl_erase_block_internal(uint32_t block)
1065{
1066 uint32_t i, j;
1067 block = block + (*ftl_nand_type).blocks
1068 - (*ftl_nand_type).userblocks - 0x17;
1069 if (block == 0 || block >= (*ftl_nand_type).blocks) return 1;
1070 for (i = 0; i < ftl_banks; i++)
1071 {
1072 if (ftl_vfl_check_remap_scheduled(i, block) == 1)
1073 {
1074 ftl_vfl_remap_block(i, block);
1075 ftl_vfl_mark_remap_done(i, block);
1076 }
1077 ftl_vfl_log_success(i, block);
1078 uint32_t pblock = ftl_vfl_get_physical_block(i, block);
1079 uint32_t rc;
1080 for (j = 0; j < 3; j++)
1081 {
1082 rc = nand_block_erase(i, pblock * (*ftl_nand_type).pagesperblock);
1083 if (rc == 0) break;
1084 }
1085 if (rc != 0)
1086 {
1087 if (pblock != block)
1088 {
1089 uint32_t spareindex = pblock - ftl_vfl_cxt[i].firstspare;
1090 ftl_vfl_cxt[i].remaptable[spareindex] = 0xFFFF;
1091 }
1092 ftl_vfl_cxt[i].field_18++;
1093 if (ftl_vfl_remap_block(i, block) == 0) return 1;
1094 if (ftl_vfl_commit_cxt(i) != 0) return 1;
1095 memset(&ftl_sparebuffer, 0, 0x40);
1096 nand_write_page(i, pblock, &ftl_vfl_cxt[0], &ftl_sparebuffer, 1);
1097 }
1098 }
1099 return 0;
1100}
1101#endif
1102
1103
1104#ifndef FTL_READONLY
1105/* Highlevel vBlock erase, that increments the erase counter for the block */
1106uint32_t ftl_erase_block(uint32_t block)
1107{
1108 ftl_erasectr[block]++;
1109 if (ftl_erasectr_dirt[block >> 10] == 100) ftl_cxt.erasedirty = 1;
1110 else ftl_erasectr_dirt[block >> 10]++;
1111 return ftl_erase_block_internal(block);
1112}
1113#endif
1114
1115
1116#ifndef FTL_READONLY
1117/* Allocates a block from the pool,
1118 returning its vBlock number, or 0 on error */
1119uint32_t ftl_allocate_pool_block(void)
1120{
1121 uint32_t i;
1122 uint32_t erasectr = 0xFFFFFFFF, bestidx = 0, block;
1123 for (i = 0; i < ftl_cxt.freecount; i++)
1124 {
1125 uint32_t idx = ftl_cxt.nextfreeidx + i;
1126 if (idx >= 0x14) idx -= 0x14;
1127 if (ftl_erasectr[ftl_cxt.blockpool[idx]] < erasectr)
1128 {
1129 erasectr = ftl_erasectr[ftl_cxt.blockpool[idx]];
1130 bestidx = idx;
1131 }
1132 }
1133 block = ftl_cxt.blockpool[bestidx];
1134 if (bestidx != ftl_cxt.nextfreeidx)
1135 {
1136 ftl_cxt.blockpool[bestidx] = ftl_cxt.blockpool[ftl_cxt.nextfreeidx];
1137 ftl_cxt.blockpool[ftl_cxt.nextfreeidx] = block;
1138 }
1139 if (block > (*ftl_nand_type).userblocks) return 0;
1140 if (ftl_erase_block(block) != 0) return 0;
1141 if (++ftl_cxt.nextfreeidx == 0x14) ftl_cxt.nextfreeidx = 0;
1142 ftl_cxt.freecount--;
1143 return block;
1144}
1145#endif
1146
1147
1148#ifndef FTL_READONLY
1149/* Releases a vBlock back into the pool */
1150void ftl_release_pool_block(uint32_t block)
1151{
1152 if (block >= (*ftl_nand_type).userblocks + 0x17) return;
1153 uint32_t idx = ftl_cxt.nextfreeidx + ftl_cxt.freecount++;
1154 if (idx >= 0x14) idx -= 0x14;
1155 ftl_cxt.blockpool[idx] = block;
1156}
1157#endif
1158
1159
1160#ifndef FTL_READONLY
1161/* Commits the location of the FTL context blocks
1162 to a semi-randomly chosen VFL context */
1163uint32_t ftl_store_ctrl_block_list(void)
1164{
1165 uint32_t i;
1166 for (i = 0; i < ftl_banks; i++)
1167 memcpy(ftl_vfl_cxt[i].ftlctrlblocks, ftl_cxt.ftlctrlblocks, 6);
1168 return ftl_vfl_commit_cxt(ftl_vfl_usn % ftl_banks);
1169}
1170#endif
1171
1172
1173#ifndef FTL_READONLY
1174/* Saves the n-th erase counter page to the flash,
1175 because it is too dirty or needs to be moved. */
1176uint32_t ftl_save_erasectr_page(uint32_t index)
1177{
1178 memset(&ftl_sparebuffer, 0xFF, 0x40);
1179 ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1180 ftl_sparebuffer.meta.idx = index;
1181 ftl_sparebuffer.meta.type = 0x46;
1182 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_erasectr[index << 10],
1183 &ftl_sparebuffer) != 0)
1184 return 1;
1185 if ((ftl_vfl_read(ftl_cxt.ftlctrlpage, ftl_buffer,
1186 &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
1187 return 1;
1188 if (memcmp(ftl_buffer, &ftl_erasectr[index << 10], 0x800) != 0) return 1;
1189 if (ftl_sparebuffer.meta.type != 0x46) return 1;
1190 if (ftl_sparebuffer.meta.idx != index) return 1;
1191 if (ftl_sparebuffer.meta.usn != ftl_cxt.usn) return 1;
1192 ftl_cxt.ftl_erasectr_pages[index] = ftl_cxt.ftlctrlpage;
1193 ftl_erasectr_dirt[index] = 0;
1194 return 0;
1195}
1196#endif
1197
1198
1199#ifndef FTL_READONLY
1200/* Increments ftl_cxt.ftlctrlpage to the next available FTL context page,
1201 allocating a new context block if neccessary. */
1202uint32_t ftl_next_ctrl_pool_page(void)
1203{
1204 uint32_t i;
1205 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1206 if (++ftl_cxt.ftlctrlpage % ppb != 0) return 0;
1207 for (i = 0; i < 3; i++)
1208 if ((ftl_cxt.ftlctrlblocks[i] + 1) * ppb == ftl_cxt.ftlctrlpage)
1209 break;
1210 i = (i + 1) % 3;
1211 uint32_t oldblock = ftl_cxt.ftlctrlblocks[i];
1212 uint32_t newblock = ftl_allocate_pool_block();
1213 if (newblock == 0) return 1;
1214 ftl_cxt.ftlctrlblocks[i] = newblock;
1215 ftl_cxt.ftlctrlpage = newblock * ppb;
1216 uint32_t pagestoread = ((*ftl_nand_type).userblocks + 23) >> 10;
1217 if ((((*ftl_nand_type).userblocks + 23) & 0x1FF) != 0) pagestoread++;
1218 for (i = 0; i < pagestoread; i++)
1219 if (oldblock * ppb <= ftl_cxt.ftl_erasectr_pages[i]
1220 && (oldblock + 1) * ppb > ftl_cxt.ftl_erasectr_pages[i])
1221 {
1222 ftl_cxt.usn--;
1223 if (ftl_save_erasectr_page(i) != 0)
1224 {
1225 ftl_cxt.ftlctrlblocks[i] = oldblock;
1226 ftl_cxt.ftlctrlpage = oldblock * (ppb + 1) - 1;
1227 ftl_release_pool_block(newblock);
1228 return 1;
1229 }
1230 ftl_cxt.ftlctrlpage++;
1231 }
1232 ftl_release_pool_block(oldblock);
1233 return ftl_store_ctrl_block_list();
1234}
1235#endif
1236
1237
1238#ifndef FTL_READONLY
1239/* Copies a vPage from one location to another */
1240uint32_t ftl_copy_page(uint32_t source, uint32_t destination,
1241 uint32_t lpn, uint32_t type)
1242{
1243 uint8_t buffer[0x800];
1244 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1245 uint32_t rc = ftl_vfl_read(source, buffer,
1246 &ftl_sparebuffer, 1, 1) & 0x11F;
1247 memset(&ftl_sparebuffer, 0xFF, 0x40);
1248 ftl_sparebuffer.user.lpn = lpn;
1249 ftl_sparebuffer.user.usn = ++ftl_cxt.nextblockusn;
1250 ftl_sparebuffer.user.type = 0x40;
1251 if ((rc & 2) != 0) memset(buffer, 0, 0x800);
1252 else if (rc != 0) ftl_sparebuffer.user.eccmark = 0x55;
1253 if (type == 1 && destination % ppb == ppb - 1)
1254 ftl_sparebuffer.user.type = 0x41;
1255 return ftl_vfl_write(destination, buffer, &ftl_sparebuffer);
1256}
1257#endif
1258
1259
1260#ifndef FTL_READONLY
1261/* Copies a pBlock to a vBlock */
1262uint32_t ftl_copy_block(uint32_t source, uint32_t destination)
1263{
1264 uint32_t i;
1265 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1266 uint32_t error = 0;
1267 uint8_t buffer[0x800];
1268 ftl_cxt.nextblockusn++;
1269 for (i = 0; i < ppb; i++)
1270 {
1271 uint32_t rc = ftl_read(source * ppb + i, 1, buffer) & 0x11D;
1272 memset(&ftl_sparebuffer, 0xFF, 0x40);
1273 ftl_sparebuffer.user.lpn = source * ppb + i;
1274 ftl_sparebuffer.user.usn = ftl_cxt.nextblockusn;
1275 ftl_sparebuffer.user.type = 0x40;
1276 if (rc != 0) ftl_sparebuffer.user.eccmark = 0x55;
1277 if (i == ppb - 1) ftl_sparebuffer.user.type = 0x41;
1278 if (ftl_vfl_write(destination * ppb + i,
1279 buffer, &ftl_sparebuffer) != 0)
1280 {
1281 error = 1;
1282 break;
1283 }
1284 }
1285 if (error != 0)
1286 {
1287 ftl_erase_block(destination);
1288 return 1;
1289 }
1290 return 0;
1291}
1292#endif
1293
1294
1295#ifndef FTL_READONLY
1296/* Clears ftl_log.issequential, if something violating that is written. */
1297void ftl_check_still_sequential(struct ftl_log_type* entry, uint32_t page)
1298{
1299 if ((*entry).pagesused != (*entry).pagescurrent
1300 || (*entry).pageoffsets[page] != page)
1301 (*entry).issequential = 0;
1302}
1303#endif
1304
1305
1306#ifndef FTL_READONLY
1307/* Copies all pages that are currently used from the scattered page block in
1308 use by the supplied ftl_log entry to a newly-allocated one, and releases
1309 the old one.
1310 In other words: It kicks the pages containing old garbage out of it to make
1311 space again. This is usually done when a scattered page block is being
1312 removed because it is full, but less than half of the pages in there are
1313 still in use and rest is just filled with old crap. */
1314uint32_t ftl_compact_scattered(struct ftl_log_type* entry)
1315{
1316 uint32_t i, j;
1317 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1318 uint32_t pagecount = (*entry).pagescurrent;
1319 uint32_t error;
1320 struct ftl_log_type backup;
1321 if ((*entry).pagescurrent == 0)
1322 {
1323 ftl_release_pool_block((*entry).scatteredvblock);
1324 (*entry).scatteredvblock = 0xFFFF;
1325 return 0;
1326 }
1327 backup = *entry;
1328 for (i = 0; i < 4; i++)
1329 {
1330 uint32_t block = ftl_allocate_pool_block();
1331 (*entry).pagesused = 0;
1332 (*entry).pagescurrent = 0;
1333 (*entry).issequential = 1;
1334 if (block == 0) return 1;
1335 error = 0;
1336 for (j = 0; j < ppb; j++)
1337 if ((*entry).pageoffsets[j] != 0xFFFF)
1338 {
1339 uint32_t lpn = (*entry).logicalvblock * ppb + j;
1340 uint32_t newpage = block * ppb + (*entry).pagesused;
1341 uint32_t oldpage = (*entry).scatteredvblock * ppb
1342 + (*entry).pageoffsets[j];
1343 if (ftl_copy_page(oldpage, newpage, lpn,
1344 (*entry).issequential) != 0)
1345 {
1346 error = 1;
1347 break;
1348 }
1349 (*entry).pageoffsets[j] = (*entry).pagesused++;
1350 (*entry).pagescurrent++;
1351 ftl_check_still_sequential(entry, j);
1352 }
1353 if (pagecount != (*entry).pagescurrent) error = 1;
1354 if (error == 0) break;
1355 *entry = backup;
1356 }
1357 return error;
1358}
1359#endif
1360
1361
1362#ifndef FTL_READONLY
1363/* Commits an ftl_log entry to proper blocks, no matter what's in there. */
1364uint32_t ftl_commit_scattered(struct ftl_log_type* entry)
1365{
1366 uint32_t i;
1367 uint32_t error;
1368 uint32_t block;
1369 for (i = 0; i < 4; i++)
1370 {
1371 block = ftl_allocate_pool_block();
1372 if (block == 0) return 1;
1373 error = ftl_copy_block((*entry).logicalvblock, block);
1374 if (error == 0) break;
1375 ftl_release_pool_block(block);
1376 }
1377 if (error != 0) return 1;
1378 ftl_release_pool_block((*entry).scatteredvblock);
1379 (*entry).scatteredvblock = 0xFFFF;
1380 ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
1381 ftl_map[(*entry).logicalvblock] = block;
1382 return 0;
1383}
1384#endif
1385
1386
1387#ifndef FTL_READONLY
1388/* Fills the rest of a scattered page block that was actually written
1389 sequentially until now, in order to be able to save a block erase by
1390 committing it without needing to copy it again.
1391 If this fails for whatever reason, it will be committed the usual way. */
1392uint32_t ftl_commit_sequential(struct ftl_log_type* entry)
1393{
1394 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1395 uint32_t error = 0;
1396
1397 if ((*entry).issequential != 1
1398 || (*entry).pagescurrent != (*entry).pagesused)
1399 return 1;
1400
1401 for (; (*entry).pagesused < ppb; (*entry).pagesused++)
1402 {
1403 uint32_t lpn = (*entry).logicalvblock * ppb + (*entry).pagesused;
1404 uint32_t newpage = (*entry).scatteredvblock * ppb
1405 + (*entry).pagesused;
1406 uint32_t oldpage = ftl_map[(*entry).logicalvblock] * ppb
1407 + (*entry).pagesused;
1408 if ((*entry).pageoffsets[(*entry).pagesused] != 0xFFFF
1409 || ftl_copy_page(oldpage, newpage, lpn, 1) != 0)
1410 {
1411 error = 1;
1412 break;
1413 }
1414 }
1415 if (error != 0) return ftl_commit_scattered(entry);
1416 ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
1417 ftl_map[(*entry).logicalvblock] = (*entry).scatteredvblock;
1418 (*entry).scatteredvblock = 0xFFFF;
1419 return 0;
1420}
1421#endif
1422
1423
1424#ifndef FTL_READONLY
1425/* If a log entry is supplied, its scattered page block will be removed in
1426 whatever way seems most appropriate. Else, the oldest scattered page block
1427 will be freed by committing it. */
1428uint32_t ftl_remove_scattered_block(struct ftl_log_type* entry)
1429{
1430 uint32_t i;
1431 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1432 uint32_t age = 0xFFFFFFFF, used = 0;
1433 if (entry == (struct ftl_log_type*)0)
1434 {
1435 for (i = 0; i < 0x11; i++)
1436 {
1437 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1438 if (ftl_log[i].pagesused == 0 || ftl_log[i].pagescurrent == 0)
1439 return 1;
1440 if (ftl_log[i].usn < age
1441 || (ftl_log[i].usn == age && ftl_log[i].pagescurrent > used))
1442 {
1443 age = ftl_log[i].usn;
1444 used = ftl_log[i].pagescurrent;
1445 entry = &ftl_log[i];
1446 }
1447 }
1448 if (entry == (struct ftl_log_type*)0) return 1;
1449 }
1450 else if ((*entry).pagescurrent < ppb / 2)
1451 {
1452 ftl_cxt.swapcounter++;
1453 return ftl_compact_scattered(entry);
1454 }
1455 ftl_cxt.swapcounter++;
1456 if ((*entry).issequential == 1) return ftl_commit_sequential(entry);
1457 else return ftl_commit_scattered(entry);
1458}
1459#endif
1460
1461
1462#ifndef FTL_READONLY
1463/* Initialize a log entry to the values for an empty scattered page block */
1464void ftl_init_log_entry(struct ftl_log_type* entry)
1465{
1466 (*entry).issequential = 1;
1467 (*entry).pagescurrent = 0;
1468 (*entry).pagesused = 0;
1469 memset((*entry).pageoffsets, 0xFF, 0x400);
1470}
1471#endif
1472
1473
1474#ifndef FTL_READONLY
1475/* Allocates a log entry for the specified vBlock,
1476 first making space, if neccessary. */
1477struct ftl_log_type* ftl_allocate_log_entry(uint32_t block)
1478{
1479 uint32_t i;
1480 struct ftl_log_type* entry = ftl_get_log_entry(block);
1481 if (entry != (struct ftl_log_type*)0) return entry;
1482
1483 for (i = 0; i < 0x11; i++)
1484 {
1485 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1486 if (ftl_log[i].pagesused == 0)
1487 {
1488 entry = &ftl_log[i];
1489 break;
1490 }
1491 }
1492
1493 if (entry == (struct ftl_log_type*)0)
1494 {
1495 if (ftl_cxt.freecount == 3)
1496 if (ftl_remove_scattered_block((struct ftl_log_type*)0) != 0)
1497 return (struct ftl_log_type*)0;
1498 entry = ftl_log;
1499 while ((*entry).scatteredvblock != 0xFFFF) entry = &entry[1];
1500 (*entry).scatteredvblock = ftl_allocate_pool_block();
1501 if ((*entry).scatteredvblock == 0)
1502 {
1503 (*entry).scatteredvblock = 0xFFFF;
1504 return (struct ftl_log_type*)0;
1505 }
1506 }
1507
1508 (*entry).logicalvblock = block;
1509 ftl_init_log_entry(entry);
1510 (*entry).usn = ftl_cxt.nextblockusn - 1;
1511
1512 return entry;
1513}
1514#endif
1515
1516
1517#ifndef FTL_READONLY
1518/* Commits the FTL block map, erase counters, and context to flash */
1519uint32_t ftl_commit_cxt(void)
1520{
1521 uint32_t i;
1522 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1523 uint32_t mappages = ((*ftl_nand_type).userblocks + 0x3ff) >> 10;
1524 uint32_t ctrpages = ((*ftl_nand_type).userblocks + 23 + 0x3ff) >> 10;
1525 uint32_t endpage = ftl_cxt.ftlctrlpage + mappages + ctrpages + 1;
1526 if (endpage % ppb > ppb - 1)
1527 ftl_cxt.ftlctrlpage |= ppb - 1;
1528 for (i = 0; i < ctrpages; i++)
1529 {
1530 if (ftl_next_ctrl_pool_page() != 0) return 1;
1531 if (ftl_save_erasectr_page(i) != 0) return 1;
1532 }
1533 for (i = 0; i < mappages; i++)
1534 {
1535 if (ftl_next_ctrl_pool_page() != 0) return 1;
1536 memset(&ftl_sparebuffer, 0xFF, 0x40);
1537 ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1538 ftl_sparebuffer.meta.idx = i;
1539 ftl_sparebuffer.meta.type = 0x44;
1540 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_map[i << 10],
1541 &ftl_sparebuffer) != 0)
1542 return 1;
1543 ftl_cxt.ftl_map_pages[i] = ftl_cxt.ftlctrlpage;
1544 }
1545 if (ftl_next_ctrl_pool_page() != 0) return 1;
1546 ftl_cxt.clean_flag = 1;
1547 memset(&ftl_sparebuffer, 0xFF, 0x40);
1548 ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1549 ftl_sparebuffer.meta.type = 0x43;
1550 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_cxt, &ftl_sparebuffer) != 0)
1551 return 1;
1552 return 0;
1553}
1554#endif
1555
1556
1557#ifndef FTL_READONLY
1558/* Swaps the most and least worn block on the flash,
1559 to better distribute wear. It will refuse to do anything
1560 if the wear spread is lower than 5 erases. */
1561uint32_t ftl_swap_blocks(void)
1562{
1563 uint32_t i;
1564 uint32_t min = 0xFFFFFFFF, max = 0, maxidx = 0x14;
1565 uint32_t minidx = 0, minvb = 0, maxvb = 0;
1566 for (i = 0; i < ftl_cxt.freecount; i++)
1567 {
1568 uint32_t idx = ftl_cxt.nextfreeidx + i;
1569 if (idx >= 0x14) idx -= 0x14;
1570 if (ftl_erasectr[ftl_cxt.blockpool[idx]] > max)
1571 {
1572 maxidx = idx;
1573 maxvb = ftl_cxt.blockpool[idx];
1574 max = ftl_erasectr[maxidx];
1575 }
1576 }
1577 if (maxidx == 0x14) return 0;
1578 for (i = 0; i < (*ftl_nand_type).userblocks; i++)
1579 {
1580 if (ftl_erasectr[ftl_map[i]] > max) max = ftl_erasectr[ftl_map[i]];
1581 if (ftl_get_log_entry(i) != (struct ftl_log_type*)0) continue;
1582 if (ftl_erasectr[ftl_map[i]] < min)
1583 {
1584 minidx = i;
1585 minvb = ftl_map[i];
1586 min = ftl_erasectr[minidx];
1587 }
1588 }
1589 if (max - min < 5) return 0;
1590 if (minvb == maxvb) return 0;
1591 if (ftl_erase_block(maxvb) != 0) return 1;
1592 if (ftl_copy_block(minidx, maxvb) != 0) return 1;
1593 ftl_cxt.blockpool[maxidx] = minvb;
1594 ftl_map[minidx] = maxvb;
1595 return 0;
1596}
1597#endif
1598
1599
1600#ifndef FTL_READONLY
1601/* Exposed function: Write highlevel sectors */
1602uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
1603{
1604 uint32_t i, j;
1605 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1606
1607 if (sector + count > (*ftl_nand_type).userblocks * ppb)
1608 return 1;
1609
1610 if (count == 0) return 0;
1611
1612 if (ftl_cxt.clean_flag == 1)
1613 {
1614 for (i = 0; i < 3; i++)
1615 {
1616 if (ftl_next_ctrl_pool_page() != 0) return 1;
1617 memset(ftl_buffer, 0xFF, 0x800);
1618 memset(&ftl_sparebuffer, 0xFF, 0x40);
1619 ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1620 ftl_sparebuffer.meta.type = 0x47;
1621 if (ftl_vfl_write(ftl_cxt.ftlctrlpage, ftl_buffer,
1622 &ftl_sparebuffer) == 0)
1623 break;
1624 }
1625 if (i == 3) return 1;
1626 ftl_cxt.clean_flag = 0;
1627 }
1628
1629 for (i = 0; i < count; )
1630 {
1631 uint32_t block = (sector + i) / ppb;
1632 uint32_t page = (sector + i) % ppb;
1633
1634 struct ftl_log_type* logentry = ftl_allocate_log_entry(block);
1635 if (logentry == (struct ftl_log_type*)0) return 1;
1636 if (page == 0 && count - i >= ppb)
1637 {
1638 uint32_t vblock = (*logentry).scatteredvblock;
1639 (*logentry).scatteredvblock = 0xFFFF;
1640 if ((*logentry).pagesused != 0)
1641 {
1642 ftl_release_pool_block(vblock);
1643 vblock = ftl_allocate_pool_block();
1644 if (vblock == 0) return 1;
1645 }
1646 ftl_cxt.nextblockusn++;
1647 for (j = 0; j < ppb; j++)
1648 {
1649 memset(&ftl_sparebuffer, 0xFF, 0x40);
1650 ftl_sparebuffer.user.lpn = sector + i + j;
1651 ftl_sparebuffer.user.usn = ftl_cxt.nextblockusn;
1652 ftl_sparebuffer.user.type = 0x40;
1653 if (j == ppb - 1) ftl_sparebuffer.user.type = 0x41;
1654 while (ftl_vfl_write(vblock * ppb + j,
1655 &((uint8_t*)buffer)[(i + j) << 11],
1656 &ftl_sparebuffer) != 0);
1657 }
1658 ftl_release_pool_block(ftl_map[block]);
1659 ftl_map[block] = vblock;
1660 i += ppb;
1661 }
1662 else
1663 {
1664 if ((*logentry).pagesused == ppb)
1665 {
1666 ftl_remove_scattered_block(logentry);
1667 logentry = ftl_allocate_log_entry(block);
1668 if (logentry == (struct ftl_log_type*)0) return 1;
1669 }
1670 memset(&ftl_sparebuffer, 0xFF, 0x40);
1671 ftl_sparebuffer.user.lpn = sector + i;
1672 ftl_sparebuffer.user.usn = ++ftl_cxt.nextblockusn;
1673 ftl_sparebuffer.user.type = 0x40;
1674 uint32_t abspage = (*logentry).scatteredvblock * ppb
1675 + (*logentry).pagesused++;
1676 if (ftl_vfl_write(abspage, &((uint8_t*)buffer)[i << 11],
1677 &ftl_sparebuffer) == 0)
1678 {
1679 if ((*logentry).pageoffsets[page] == 0xFFFF)
1680 (*logentry).pagescurrent++;
1681 (*logentry).pageoffsets[page] = (*logentry).pagesused - 1;
1682 ftl_check_still_sequential(logentry, page);
1683 i++;
1684 }
1685 }
1686 }
1687 if (ftl_cxt.swapcounter >= 300)
1688 {
1689 ftl_cxt.swapcounter -= 20;
1690 for (i = 0; i < 4; i++) if (ftl_swap_blocks() == 0) break;
1691 }
1692 if (ftl_cxt.erasedirty == 1)
1693 {
1694 ftl_cxt.erasedirty = 0;
1695 for (i = 0; i < 8; i++)
1696 if (ftl_erasectr_dirt[i] >= 100)
1697 {
1698 ftl_next_ctrl_pool_page();
1699 ftl_save_erasectr_page(i);
1700 }
1701 }
1702 return 0;
1703}
1704#endif
1705
1706
1707#ifndef FTL_READONLY
1708/* Exposed function: Performes a sync / unmount,
1709 i.e. commits all scattered page blocks,
1710 distributes wear, and commits the FTL context. */
1711uint32_t ftl_sync(void)
1712{
1713 uint32_t i;
1714 uint32_t rc = 0;
1715 uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1716 if (ftl_cxt.clean_flag == 1) return 0;
1717
1718 if (ftl_cxt.swapcounter >= 20)
1719 for (i = 0; i < 4; i++)
1720 if (ftl_swap_blocks() == 0)
1721 {
1722 ftl_cxt.swapcounter -= 20;
1723 break;
1724 }
1725 for (i = 0; i < 0x11; i++)
1726 {
1727 if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1728 ftl_cxt.nextblockusn++;
1729 if (ftl_log[i].issequential == 1)
1730 rc |= ftl_commit_sequential(&ftl_log[i]);
1731 else rc |= ftl_commit_scattered(&ftl_log[i]);
1732 }
1733 if (rc != 0) return 1;
1734 for (i = 0; i < 5; i++)
1735 if (ftl_commit_cxt() == 0) return 0;
1736 else ftl_cxt.ftlctrlpage |= ppb - 1;
1737 return 1;
1738}
1739#endif
1740
1741
1742/* Initializes and mounts the FTL. As long as nothing was written,
1743 you won't need to unmount it.
1744 Before shutting down after writing something, call ftl_sync(),
1745 which will just do nothing if everything was already clean. */
1746uint32_t ftl_init(void)
1747{
1748 uint32_t i;
1749 uint32_t result = 0;
1750 uint32_t foundsignature, founddevinfo, blockwiped, repaired, skip;
1751 if (nand_device_init() != 0) return 1;
1752 ftl_banks = 0;
1753 for (i = 0; i < 4; i++)
1754 if (nand_get_device_type(i) != 0) ftl_banks = i + 1;
1755 ftl_nand_type = nand_get_device_type(0);
1756 foundsignature = 0;
1757 blockwiped = 1;
1758 for (i = 0; i < (*ftl_nand_type).pagesperblock; i++)
1759 {
1760 result = nand_read_page(0, i, ftl_buffer, (uint32_t*)0, 1, 1);
1761 if ((result & 0x11F) == 0)
1762 {
1763 blockwiped = 0;
1764 if (((uint32_t*)ftl_buffer)[0] != 0x41303034) continue;
1765 foundsignature = 1;
1766 break;
1767 }
1768 else if ((result & 2) != 2) blockwiped = 0;
1769 }
1770
1771 founddevinfo = ftl_has_devinfo();
1772
1773 repaired = 0;
1774 skip = 0;
1775 if (founddevinfo == 0) return 1;
1776 if (foundsignature != 0 && (result & 0x11F) != 0) return 1;
1777 if (ftl_vfl_open() == 0)
1778 if (ftl_open() == 0) return 0;
1779
1780/* Something went terribly wrong. We may want to allow the user to erase
1781 block zero in that condition, to make norboot reinitialize the FTL.
1782 (However there is curently no point in this, as iLoader would already
1783 fail if this would be the case.)
1784
1785 nand_block_erase(0, 0);
1786*/
1787
1788 return 1;
1789}
diff --git a/firmware/target/arm/s5l8700/ipodnano2g/ftl-target.h b/firmware/target/arm/s5l8700/ipodnano2g/ftl-target.h
new file mode 100644
index 0000000000..f214964551
--- /dev/null
+++ b/firmware/target/arm/s5l8700/ipodnano2g/ftl-target.h
@@ -0,0 +1,34 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2009 by Michael Sparmann
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef __FTL_H__
23#define __FTL_H__
24
25#include "config.h"
26#include "inttypes.h"
27
28uint32_t ftl_init(void);
29uint32_t ftl_read(uint32_t sector, uint32_t count, void* buffer);
30uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer);
31uint32_t ftl_sync(void);
32
33
34#endif
diff --git a/firmware/target/arm/s5l8700/ipodnano2g/nand-nano2g.c b/firmware/target/arm/s5l8700/ipodnano2g/nand-nano2g.c
new file mode 100644
index 0000000000..ba83ab6df2
--- /dev/null
+++ b/firmware/target/arm/s5l8700/ipodnano2g/nand-nano2g.c
@@ -0,0 +1,399 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2009 by Michael Sparmann
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22
23#include "config.h"
24#include "system.h"
25#include "cpu.h"
26#include "inttypes.h"
27#include "nand-target.h"
28#include <string.h>
29
30
31#define NAND_CMD_READ 0x00
32#define NAND_CMD_PROGCNFRM 0x10
33#define NAND_CMD_READ2 0x30
34#define NAND_CMD_BLOCKERASE 0x60
35#define NAND_CMD_GET_STATUS 0x70
36#define NAND_CMD_PROGRAM 0x80
37#define NAND_CMD_ERASECNFRM 0xD0
38#define NAND_CMD_RESET 0xFF
39
40#define NAND_STATUS_READY 0x40
41
42#define NAND_DEVICEINFOTABLE_ENTRIES 33
43
44static const struct nand_device_info_type nand_deviceinfotable[] =
45{
46 {0x1580F1EC, 1024, 968, 0x40, 6, 2, 1, 2, 1},
47 {0x1580DAEC, 2048, 1936, 0x40, 6, 2, 1, 2, 1},
48 {0x15C1DAEC, 2048, 1936, 0x40, 6, 2, 1, 2, 1},
49 {0x1510DCEC, 4096, 3872, 0x40, 6, 2, 1, 2, 1},
50 {0x95C1DCEC, 4096, 3872, 0x40, 6, 2, 1, 2, 1},
51 {0x2514DCEC, 2048, 1936, 0x80, 7, 2, 1, 2, 1},
52 {0x2514D3EC, 4096, 3872, 0x80, 7, 2, 1, 2, 1},
53 {0x2555D3EC, 4096, 3872, 0x80, 7, 2, 1, 2, 1},
54 {0x2555D5EC, 8192, 7744, 0x80, 7, 2, 1, 2, 1},
55 {0x2585D3AD, 4096, 3872, 0x80, 7, 3, 2, 3, 2},
56 {0x9580DCAD, 4096, 3872, 0x40, 6, 3, 2, 3, 2},
57 {0xA514D3AD, 4096, 3872, 0x80, 7, 3, 2, 3, 2},
58 {0xA550D3AD, 4096, 3872, 0x80, 7, 3, 2, 3, 2},
59 {0xA560D5AD, 4096, 3872, 0x80, 7, 3, 2, 3, 2},
60 {0xA555D5AD, 8192, 7744, 0x80, 7, 3, 2, 3, 2},
61 {0xA585D598, 8320, 7744, 0x80, 7, 3, 1, 2, 1},
62 {0xA584D398, 4160, 3872, 0x80, 7, 3, 1, 2, 1},
63 {0x95D1D32C, 8192, 7744, 0x40, 6, 2, 1, 2, 1},
64 {0x1580DC2C, 4096, 3872, 0x40, 6, 2, 1, 2, 1},
65 {0x15C1D32C, 8192, 7744, 0x40, 6, 2, 1, 2, 1},
66 {0x9590DC2C, 4096, 3872, 0x40, 6, 2, 1, 2, 1},
67 {0xA594D32C, 4096, 3872, 0x80, 7, 2, 1, 2, 1},
68 {0x2584DC2C, 2048, 1936, 0x80, 7, 2, 1, 2, 1},
69 {0xA5D5D52C, 8192, 7744, 0x80, 7, 3, 2, 2, 1},
70 {0x95D1D389, 8192, 7744, 0x40, 6, 2, 1, 2, 1},
71 {0x1580DC89, 4096, 3872, 0x40, 6, 2, 1, 2, 1},
72 {0x15C1D389, 8192, 7744, 0x40, 6, 2, 1, 2, 1},
73 {0x9590DC89, 4096, 3872, 0x40, 6, 2, 1, 2, 1},
74 {0xA594D389, 4096, 3872, 0x80, 7, 2, 1, 2, 1},
75 {0x2584DC89, 2048, 1936, 0x80, 7, 2, 1, 2, 1},
76 {0xA5D5D589, 8192, 7744, 0x80, 7, 2, 1, 2, 1},
77 {0xA514D320, 4096, 3872, 0x80, 7, 2, 1, 2, 1},
78 {0xA555D520, 8192, 3872, 0x80, 7, 2, 1, 2, 1}
79};
80
81uint8_t nand_tunk1[4];
82uint8_t nand_twp[4];
83uint8_t nand_tunk2[4];
84uint8_t nand_tunk3[4];
85uint32_t nand_type[4];
86
87static uint8_t nand_aligned_data[0x800] __attribute__((aligned(32)));
88static uint8_t nand_aligned_ctrl[0x200] __attribute__((aligned(32)));
89static uint8_t nand_aligned_spare[0x40] __attribute__((aligned(32)));
90static uint8_t nand_aligned_ecc[0x28] __attribute__((aligned(32)));
91#define nand_uncached_data \
92 ((uint8_t*)(((uint32_t)nand_aligned_data) | 0x40000000))
93#define nand_uncached_ctrl \
94 ((uint8_t*)(((uint32_t)nand_aligned_ctrl) | 0x40000000))
95#define nand_uncached_spare \
96 ((uint8_t*)(((uint32_t)nand_aligned_spare) | 0x40000000))
97#define nand_uncached_ecc \
98 ((uint8_t*)(((uint32_t)nand_aligned_ecc) | 0x40000000))
99
100
101uint32_t nand_wait_rbbdone(void)
102{
103 uint32_t timeout = 0x40000;
104 while ((FMCSTAT & FMCSTAT_RBBDONE) == 0) if (timeout-- == 0) return 1;
105 FMCSTAT = FMCSTAT_RBBDONE;
106 return 0;
107}
108
109uint32_t nand_wait_cmddone(void)
110{
111 uint32_t timeout = 0x40000;
112 while ((FMCSTAT & FMCSTAT_CMDDONE) == 0) if (timeout-- == 0) return 1;
113 FMCSTAT = FMCSTAT_CMDDONE;
114 return 0;
115}
116
117uint32_t nand_wait_addrdone(void)
118{
119 uint32_t timeout = 0x40000;
120 while ((FMCSTAT & FMCSTAT_ADDRDONE) == 0) if (timeout-- == 0) return 1;
121 FMCSTAT = FMCSTAT_ADDRDONE;
122 return 0;
123}
124
125uint32_t nand_wait_chip_ready(uint32_t bank)
126{
127 uint32_t timeout = 0x40000;
128 while ((FMCSTAT & (FMCSTAT_BANK0READY << bank)) == 0)
129 if (timeout-- == 0) return 1;
130 FMCSTAT = (FMCSTAT_BANK0READY << bank);
131 return 0;
132}
133
134void nand_set_fmctrl0(uint32_t bank, uint32_t flags)
135{
136 FMCTRL0 = (nand_tunk1[bank] << 16) | (nand_twp[bank] << 12)
137 | (1 << 11) | 1 | (1 << (bank + 1)) | flags;
138}
139
140uint32_t nand_send_cmd(uint32_t cmd)
141{
142 FMCMD = cmd;
143 return nand_wait_rbbdone();
144}
145
146uint32_t nand_send_address(uint32_t page, uint32_t offset)
147{
148 FMANUM = 4;
149 FMADDR0 = (page << 16) | offset;
150 FMADDR1 = (page >> 16) & 0xFF;
151 FMCTRL1 = FMCTRL1_DOTRANSADDR;
152 return nand_wait_cmddone();
153}
154
155uint32_t nand_reset(uint32_t bank)
156{
157 nand_set_fmctrl0(bank, 0);
158 if (nand_send_cmd(NAND_CMD_RESET) != 0) return 1;
159 if (nand_wait_chip_ready(bank) != 0) return 1;
160 FMCTRL1 = FMCTRL1_CLEARRFIFO | FMCTRL1_CLEARWFIFO;
161 return 0;
162}
163
164uint32_t nand_wait_status_ready(uint32_t bank)
165{
166 uint32_t timeout = 0x4000;
167 nand_set_fmctrl0(bank, 0);
168 if ((FMCSTAT & (FMCSTAT_BANK0READY << bank)) != 0)
169 FMCSTAT = (FMCSTAT_BANK0READY << bank);
170 FMCTRL1 = FMCTRL1_CLEARRFIFO | FMCTRL1_CLEARWFIFO;
171 if (nand_send_cmd(NAND_CMD_GET_STATUS) != 0) return 1;
172 while (1)
173 {
174 if (timeout-- == 0) return 1;
175 FMDNUM = 0;
176 FMCTRL1 = FMCTRL1_DOREADDATA;
177 if (nand_wait_addrdone() != 0) return 1;
178 if ((FMFIFO & NAND_STATUS_READY) != 0) break;
179 FMCTRL1 = FMCTRL1_CLEARRFIFO;
180 }
181 FMCTRL1 = FMCTRL1_CLEARRFIFO;
182 return nand_send_cmd(NAND_CMD_READ);
183}
184
185uint32_t nand_transfer_data(uint32_t bank, uint32_t direction,
186 void* buffer, uint32_t size)
187{
188 uint32_t timeout = 0x40000;
189 nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
190 FMDNUM = size - 1;
191 FMCTRL1 = FMCTRL1_DOREADDATA << direction;
192 DMACON3 = (2 << DMACON_DEVICE_SHIFT)
193 | (direction << DMACON_DIRECTION_SHIFT)
194 | (2 << DMACON_DATA_SIZE_SHIFT)
195 | (3 << DMACON_BURST_LEN_SHIFT);
196 while ((DMAALLST & DMAALLST_CHAN3_MASK) != 0)
197 DMACOM3 = DMACOM_CLEARBOTHDONE;
198 DMABASE3 = (uint32_t)buffer;
199 DMATCNT3 = (size >> 4) - 1;
200 DMACOM3 = 4;
201 while ((DMAALLST & DMAALLST_DMABUSY3) != 0)
202 if (timeout-- == 0) return 1;
203 if (nand_wait_addrdone() != 0) return 1;
204 if (direction == 0) FMCTRL1 = FMCTRL1_CLEARRFIFO | FMCTRL1_CLEARWFIFO;
205 return 0;
206}
207
208uint32_t ecc_decode(uint32_t size, void* databuffer, void* sparebuffer)
209{
210 uint32_t timeout = 0x40000;
211 ECC_INT_CLR = 1;
212 SRCPND = INTMSK_ECC;
213 ECC_UNK1 = size;
214 ECC_DATA_PTR = (uint32_t)databuffer;
215 ECC_SPARE_PTR = (uint32_t)sparebuffer;
216 ECC_CTRL = ECCCTRL_STARTDECODING;
217 while ((SRCPND & INTMSK_ECC) == 0) if (timeout-- == 0) return 1;
218 ECC_INT_CLR = 1;
219 SRCPND = INTMSK_ECC;
220 return ECC_RESULT;
221}
222
223uint32_t ecc_encode(uint32_t size, void* databuffer, void* sparebuffer)
224{
225 uint32_t timeout = 0x40000;
226 ECC_INT_CLR = 1;
227 SRCPND = INTMSK_ECC;
228 ECC_UNK1 = size;
229 ECC_DATA_PTR = (uint32_t)databuffer;
230 ECC_SPARE_PTR = (uint32_t)sparebuffer;
231 ECC_CTRL = ECCCTRL_STARTENCODING;
232 while ((SRCPND & INTMSK_ECC) == 0) if (timeout-- == 0) return 1;
233 ECC_INT_CLR = 1;
234 SRCPND = INTMSK_ECC;
235 return 0;
236}
237
238uint32_t nand_check_empty(uint8_t* buffer)
239{
240 uint32_t i, count;
241 count = 0;
242 for (i = 0; i < 0x40; i++) if (buffer[i] != 0xFF) count++;
243 if (count < 2) return 1;
244 return 0;
245}
246
247uint32_t nand_get_chip_type(uint32_t bank)
248{
249 uint32_t result;
250 if (nand_reset(bank) != 0) return 0xFFFFFFFF;
251 if (nand_send_cmd(0x90) != 0) return 0xFFFFFFFF;
252 FMANUM = 0;
253 FMADDR0 = 0;
254 FMCTRL1 = FMCTRL1_DOTRANSADDR;
255 if (nand_wait_cmddone() != 0) return 0xFFFFFFFF;
256 FMDNUM = 4;
257 FMCTRL1 = FMCTRL1_DOREADDATA;
258 if (nand_wait_addrdone() != 0) return 0xFFFFFFFF;
259 result = FMFIFO;
260 FMCTRL1 = FMCTRL1_CLEARRFIFO | FMCTRL1_CLEARWFIFO;
261 return result;
262}
263
264uint32_t nand_read_page(uint32_t bank, uint32_t page, void* databuffer,
265 void* sparebuffer, uint32_t doecc,
266 uint32_t checkempty)
267{
268 uint32_t rc, eccresult;
269 nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
270 if (nand_send_cmd(NAND_CMD_READ) != 0) return 1;
271 if (nand_send_address(page, (databuffer == 0) ? 0x800 : 0) != 0)
272 return 1;
273 if (nand_send_cmd(NAND_CMD_READ2) != 0) return 1;
274 if (nand_wait_status_ready(bank) != 0) return 1;
275 if (databuffer != 0)
276 if (nand_transfer_data(bank, 0, nand_uncached_data, 0x800) != 0)
277 return 1;
278 if (doecc == 0)
279 {
280 memcpy(databuffer, nand_uncached_data, 0x800);
281 if (sparebuffer != 0)
282 {
283 if (nand_transfer_data(bank, 0, nand_uncached_spare, 0x40) != 0)
284 return 1;
285 memcpy(sparebuffer, nand_uncached_spare, 0x800);
286 if (checkempty != 0)
287 return nand_check_empty((uint8_t*)sparebuffer) << 1;
288 }
289 return 0;
290 }
291 rc = 0;
292 if (nand_transfer_data(bank, 0, nand_uncached_spare, 0x40) != 0)
293 return 1;
294 memcpy(nand_uncached_ecc, &nand_uncached_spare[0xC], 0x28);
295 rc |= (ecc_decode(3, nand_uncached_data, nand_uncached_ecc) & 0xF) << 4;
296 if (databuffer != 0) memcpy(databuffer, nand_uncached_data, 0x800);
297 memset(nand_uncached_ctrl, 0xFF, 0x200);
298 memcpy(nand_uncached_ctrl, nand_uncached_spare, 0xC);
299 memcpy(nand_uncached_ecc, &nand_uncached_spare[0x34], 0xC);
300 eccresult = ecc_decode(0, nand_uncached_ctrl, nand_uncached_ecc);
301 rc |= (eccresult & 0xF) << 8;
302 if (sparebuffer != 0)
303 {
304 memcpy(sparebuffer, nand_uncached_spare, 0x40);
305 if ((eccresult & 1) != 0) memset(sparebuffer, 0xFF, 0xC);
306 else memcpy(sparebuffer, nand_uncached_ctrl, 0xC);
307 }
308 if (checkempty != 0) rc |= nand_check_empty(nand_uncached_spare) << 1;
309
310 return rc;
311}
312
313uint32_t nand_write_page(uint32_t bank, uint32_t page, void* databuffer,
314 void* sparebuffer, uint32_t doecc)
315{
316 if (sparebuffer != 0) memcpy(nand_uncached_spare, sparebuffer, 0x40);
317 else memset(nand_uncached_spare, 0xFF, 0x40);
318 if (doecc != 0)
319 {
320 memcpy(nand_uncached_data, databuffer, 0x800);
321 if (ecc_encode(3, nand_uncached_data, nand_uncached_ecc) != 0)
322 return 1;
323 memcpy(&nand_uncached_spare[0xC], nand_uncached_ecc, 0x28);
324 memset(nand_uncached_ctrl, 0xFF, 0x200);
325 memcpy(nand_uncached_ctrl, nand_uncached_spare, 0xC);
326 if (ecc_encode(0, nand_uncached_ctrl, nand_uncached_ecc) != 0)
327 return 1;
328 memcpy(&nand_uncached_spare[0x34], nand_uncached_ecc, 0xC);
329 }
330 nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
331 if (nand_send_cmd(NAND_CMD_PROGRAM) != 0)
332 return 1;
333 if (nand_send_address(page, (databuffer == 0) ? 0x800 : 0) != 0)
334 return 1;
335 if (databuffer != 0)
336 if (nand_transfer_data(bank, 1, nand_uncached_data, 0x800) != 0)
337 return 1;
338 if (sparebuffer != 0 || doecc != 0)
339 if (nand_transfer_data(bank, 1, nand_uncached_spare, 0x40) != 0)
340 return 1;
341 if (nand_send_cmd(NAND_CMD_PROGCNFRM) != 0) return 1;
342 return nand_wait_status_ready(bank);
343}
344
345uint32_t nand_block_erase(uint32_t bank, uint32_t page)
346{
347 nand_set_fmctrl0(bank, 0);
348 if (nand_send_cmd(NAND_CMD_BLOCKERASE) != 0) return 1;
349 FMANUM = 2;
350 FMADDR0 = page;
351 FMCTRL1 = FMCTRL1_DOTRANSADDR;
352 if (nand_wait_cmddone() != 0) return 1;
353 if (nand_send_cmd(NAND_CMD_ERASECNFRM) != 0) return 1;
354 return nand_wait_status_ready(bank);
355}
356
357const struct nand_device_info_type* nand_get_device_type(uint32_t bank)
358{
359 if (nand_type[bank] == 0xFFFFFFFF)
360 return (struct nand_device_info_type*)0;
361 return &nand_deviceinfotable[nand_type[bank]];
362}
363
364uint32_t nand_device_init(void)
365{
366 uint32_t type;
367 uint32_t i, j;
368 PCON2 = 0x33333333;
369 PDAT2 = 0;
370 PCON3 = 0x11113333;
371 PDAT3 = 0;
372 PCON4 = 0x33333333;
373 PDAT4 = 0;
374 for (i = 0; i < 4; i++)
375 {
376 nand_tunk1[i] = 7;
377 nand_twp[i] = 7;
378 nand_tunk2[i] = 7;
379 nand_tunk3[i] = 7;
380 type = nand_get_chip_type(i);
381 nand_type[i] = 0xFFFFFFFF;
382 if (type == 0xFFFFFFFF) continue;
383 for (j = 0; ; j++)
384 {
385 if (j == ARRAYLEN(nand_deviceinfotable)) break;
386 else if (nand_deviceinfotable[j].id == type)
387 {
388 nand_type[i] = j;
389 break;
390 }
391 }
392 nand_tunk1[i] = nand_deviceinfotable[nand_type[i]].tunk1;
393 nand_twp[i] = nand_deviceinfotable[nand_type[i]].twp;
394 nand_tunk2[i] = nand_deviceinfotable[nand_type[i]].tunk2;
395 nand_tunk3[i] = nand_deviceinfotable[nand_type[i]].tunk3;
396 }
397 if (nand_type[0] == 0xFFFFFFFF) return 1;
398 return 0;
399}
diff --git a/firmware/target/arm/s5l8700/ipodnano2g/nand-target.h b/firmware/target/arm/s5l8700/ipodnano2g/nand-target.h
new file mode 100644
index 0000000000..bed94ee243
--- /dev/null
+++ b/firmware/target/arm/s5l8700/ipodnano2g/nand-target.h
@@ -0,0 +1,54 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2009 by Michael Sparmann
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef __NAND_H__
23#define __NAND_H__
24
25#include "config.h"
26#include "inttypes.h"
27
28
29struct nand_device_info_type
30{
31 uint32_t id;
32 uint16_t blocks;
33 uint32_t userblocks;
34 uint16_t pagesperblock;
35 uint8_t blocksizeexponent;
36 uint8_t tunk1;
37 uint8_t twp;
38 uint8_t tunk2;
39 uint8_t tunk3;
40} __attribute__((packed));
41
42uint32_t nand_read_page(uint32_t bank, uint32_t page, void* databuffer,
43 void* sparebuffer, uint32_t doecc,
44 uint32_t checkempty);
45uint32_t nand_write_page(uint32_t bank, uint32_t page, void* databuffer,
46 void* sparebuffer, uint32_t doecc);
47uint32_t nand_block_erase(uint32_t bank, uint32_t page);
48
49const struct nand_device_info_type* nand_get_device_type(uint32_t bank);
50uint32_t nand_reset(uint32_t bank);
51uint32_t nand_device_init(void);
52
53
54#endif