Subversion Repositories freemyipod

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2 theseven 1
//
2
//
3
//    Copyright 2010 TheSeven
4
//
5
//
6
//    This file is part of emBIOS.
7
//
8
//    emBIOS is free software: you can redistribute it and/or
9
//    modify it under the terms of the GNU General Public License as
10
//    published by the Free Software Foundation, either version 2 of the
11
//    License, or (at your option) any later version.
12
//
13
//    emBIOS is distributed in the hope that it will be useful,
14
//    but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16
//    See the GNU General Public License for more details.
17
//
18
//    You should have received a copy of the GNU General Public License along
19
//    with emBIOS.  If not, see <http://www.gnu.org/licenses/>.
20
//
21
//
22
 
23
 
24
 
25
#include "global.h"
26
#include "nand.h"
27
#include "storage.h"
28
#include "util.h"
29
 
30
 
31
 
32
//#define FTL_FORCEMOUNT
33
 
34
 
35
 
36
#ifdef FTL_FORCEMOUNT
37
#ifndef FTL_READONLY
38
#define FTL_READONLY
39
#endif
40
#endif
41
 
42
 
43
#ifdef FTL_READONLY
44
uint32_t storage_write(uint32_t sector, uint32_t count, const void* buffer)
45
{
46
    (void)sector;
47
    (void)count;
48
    (void)buffer;
49
    return -1;
50
}
51
uint32_t storage_sync(void)
52
{
53
    return 0;
54
}
55
#endif
56
 
57
 
58
 
59
/* Keeps the state of a scattered page block.
60
   This structure is used in memory only, not on flash,
61
   but it equals the one the OFW uses. */
62
struct ftl_log_type
63
{
64
 
65
    /* The ftl_cxt.nextblockusn at the time the block was allocated,
66
       needed in order to be able to remove the oldest ones first. */
67
    uint32_t usn;
68
 
69
    /* The vBlock number at which the scattered pages are stored */
70
    uint16_t scatteredvblock;
71
 
72
    /* the lBlock number for which those pages are */
73
    uint16_t logicalvblock;
74
 
75
    /* Pointer to ftl_offsets, contains the mapping which lPage is
76
       currently stored at which scattered vPage. */
77
    uint16_t* pageoffsets;
78
 
79
    /* Pages used in the vBlock, i.e. next page number to be written */
80
    uint16_t pagesused;
81
 
82
    /* Pages that are still up to date in this block, i.e. need to be
83
       moved when this vBlock is deallocated. */
84
    uint16_t pagescurrent;
85
 
86
    /* A flag whether all pages are still sequential in this block.
87
       Initialized to 1 on allocation, zeroed as soon as anything is
88
       written out of sequence, so that the block will need copying
89
       when committing to get the pages back into the right order.
90
       This is used to half the number of block erases needed when
91
       writing huge amounts of sequential data. */
92
    uint32_t issequential;
93
 
94
} __attribute__((packed));
95
 
96
 
97
/* Keeps the state of the FTL, both on flash and in memory */
98
struct ftl_cxt_type
99
{
100
 
101
    /* Update sequence number of the FTL context, decremented
102
       every time a new revision of FTL meta data is written. */
103
    uint32_t usn;
104
 
105
    /* Update sequence number for user data blocks. Incremented
106
       every time a portion of user pages is written, so that
107
       a consistency check can determine which copy of a user
108
       page is the most recent one. */
109
    uint32_t nextblockusn;
110
 
111
    /* Count of currently free pages in the block pool */
112
    uint16_t freecount;
113
 
114
    /* Index to the first free block in the blockpool ring buffer */
115
    uint16_t nextfreeidx;
116
 
117
    /* This is a counter that is used to better distribute block
118
       wear. It is incremented on every block erase, and if it
119
       gets too high (300 on writes, 20 on sync), the most and
120
       least worn block will be swapped (inferring an additional
121
       block write) and the counter will be decreased by 20. */
122
    uint16_t swapcounter;
123
 
124
    /* Ring buffer of currently free blocks. nextfreeidx is the
125
       index to freecount free ones, the other ones are currently
126
       allocated for scattered page blocks. */
127
    uint16_t blockpool[0x14];
128
 
129
    /* Alignment to 32 bits */
130
    uint16_t field_36;
131
 
132
    /* vPages where the block map is stored */
133
    uint32_t ftl_map_pages[8];
134
 
135
    /* Probably additional map page number space for bigger chips */
136
    uint8_t field_58[0x28];
137
 
138
    /* vPages where the erase counters are stored */
139
    uint32_t ftl_erasectr_pages[8];
140
 
141
    /* Seems to be padding */
142
    uint8_t field_A0[0x70];
143
 
144
    /* Pointer to ftl_map used by Whimory, not used by us */
145
    uint32_t ftl_map_ptr;
146
 
147
    /* Pointer to ftl_erasectr used by Whimory, not used by us */
148
    uint32_t ftl_erasectr_ptr;
149
 
150
    /* Pointer to ftl_log used by Whimory, not used by us */
151
    uint32_t ftl_log_ptr;
152
 
153
    /* Flag used to indicate that some erase counter pages should be committed
154
       as they were changed more than 100 times since the last commit. */
155
    uint32_t erasedirty;
156
 
157
    /* Seems to be unused */
158
    uint16_t field_120;
159
 
160
    /* vBlocks used to store the FTL context, map, and erase
161
       counter pages. This is also a ring buffer, and the oldest
162
       page gets swapped with the least used page from the block
163
       pool ring buffer when a new one is allocated. */
164
    uint16_t ftlctrlblocks[3];
165
 
166
    /* The last used vPage number from ftlctrlblocks */
167
    uint32_t ftlctrlpage;
168
 
169
    /* Set on context sync, reset on write, so obviously never
170
       zero in the context written to the flash */
171
    uint32_t clean_flag;
172
 
173
    /* Seems to be unused, but gets loaded from flash by Whimory. */
174
    uint8_t field_130[0x15C];
175
 
176
} __attribute__((packed));
177
 
178
 
179
/* Keeps the state of the bank's VFL, both on flash and in memory.
180
   There is one of these per bank. */
181
struct ftl_vfl_cxt_type
182
{
183
 
184
    /* Cross-bank update sequence number, incremented on every VFL
185
       context commit on any bank. */
186
    uint32_t usn;
187
 
188
    /* See ftl_cxt.ftlctrlblocks. This is stored to the VFL contexts
189
       in order to be able to find the most recent FTL context copy
190
       when mounting the FTL. The VFL context number this will be
191
       written to on an FTL context commit is chosen semi-randomly. */
192
    uint16_t ftlctrlblocks[3];
193
 
194
    /* Alignment to 32 bits */
195
    uint8_t field_A[2];
196
 
197
    /* Decrementing update counter for VFL context commits per bank */
198
    uint32_t updatecount;
199
 
200
    /* Number of the currently active VFL context block, it's an index
201
       into vflcxtblocks. */
202
    uint16_t activecxtblock;
203
 
204
    /* Number of the first free page in the active FTL context block */
205
    uint16_t nextcxtpage;
206
 
207
    /* Seems to be unused */
208
    uint8_t field_14[4];
209
 
210
    /* Incremented every time a block erase error leads to a remap,
211
       but doesn't seem to be read anywhere. */
212
    uint16_t field_18;
213
 
214
    /* Number of spare blocks used */
215
    uint16_t spareused;
216
 
217
    /* pBlock number of the first spare block */
218
    uint16_t firstspare;
219
 
220
    /* Total number of spare blocks */
221
    uint16_t sparecount;
222
 
223
    /* Block remap table. Contains the vBlock number the n-th spare
224
       block is used as a replacement for. 0 = unused, 0xFFFF = bad. */
225
    uint16_t remaptable[0x334];
226
 
227
    /* Bad block table. Each bit represents 8 blocks. 1 = OK, 0 = Bad.
228
       If the entry is zero, you should look at the remap table to see
229
       if the block is remapped, and if yes, where the replacement is. */
230
    uint8_t bbt[0x11A];
231
 
232
    /* pBlock numbers used to store the VFL context. This is a ring
233
       buffer. On a VFL context write, always 8 pages are written,
234
       and it passes if at least 4 of them can be read back. */
235
    uint16_t vflcxtblocks[4];
236
 
237
    /* Blocks scheduled for remapping are stored at the end of the
238
       remap table. This is the first index used for them. */
239
    uint16_t scheduledstart;
240
 
241
    /* Probably padding */
242
    uint8_t field_7AC[0x4C];
243
 
244
    /* First checksum (addition) */
245
    uint32_t checksum1;
246
 
247
    /* Second checksum (XOR), there is a bug in whimory regarding this. */
248
    uint32_t checksum2;
249
 
250
} __attribute__((packed));
251
 
252
 
253
/* Layout of the spare bytes of each page on the flash */
254
union ftl_spare_data_type
255
{
256
 
257
    /* The layout used for actual user data (types 0x40 and 0x41) */
258
    struct ftl_spare_data_user_type
259
    {
260
 
261
        /* The lPage, i.e. Sector, number */
262
        uint32_t lpn;
263
 
264
        /* The update sequence number of that page,
265
           copied from ftl_cxt.nextblockusn on write */
266
        uint32_t usn;
267
 
268
        /* Seems to be unused */
269
        uint8_t field_8;
270
 
271
        /* Type field, 0x40 (data page) or 0x41 (last data page of block) */
272
        uint8_t type;
273
 
274
        /* ECC mark, usually 0xFF. If an error occurred while reading the
275
           page during a copying operation earlier, this will be 0x55. */
276
        uint8_t eccmark;
277
 
278
        /* Seems to be unused */
279
        uint8_t field_B;
280
 
281
        /* ECC data for the user data */
282
        uint8_t dataecc[0x28];
283
 
284
        /* ECC data for the first 0xC bytes above */
285
        uint8_t spareecc[0xC];
286
 
287
    } __attribute__((packed)) user;
288
 
289
    /* The layout used for meta data (other types) */
290
    struct ftl_spare_data_meta_type
291
    {
292
 
293
        /* ftl_cxt.usn for FTL stuff, ftl_vfl_cxt.updatecount for VFL stuff */
294
        uint32_t usn;
295
 
296
        /* Index of the thing inside the page,
297
           for example number / index of the map or erase counter page */
298
        uint16_t idx;
299
 
300
        /* Seems to be unused */
301
        uint8_t field_6;
302
 
303
        /* Seems to be unused */
304
        uint8_t field_7;
305
 
306
        /* Seems to be unused */
307
        uint8_t field_8;
308
 
309
       /* Type field:
310
            0x43: FTL context page
311
            0x44: Block map page
312
            0x46: Erase counter page
313
            0x47: "FTL is currently mounted", i.e. unclean shutdown, mark
314
            0x80: VFL context page */
315
        uint8_t type;
316
 
317
        /* ECC mark, usually 0xFF. If an error occurred while reading the
318
           page during a copying operation earlier, this will be 0x55. */
319
        uint8_t eccmark;
320
 
321
        /* Seems to be unused */
322
        uint8_t field_B;
323
 
324
        /* ECC data for the user data */
325
        uint8_t dataecc[0x28];
326
 
327
        /* ECC data for the first 0xC bytes above */
328
        uint8_t spareecc[0xC];
329
 
330
    } __attribute__((packed)) meta;
331
 
332
};
333
 
334
 
335
/* Keeps track of troublesome blocks, only in memory, lost on unmount. */
336
struct ftl_trouble_type
337
{
338
 
339
    /* vBlock number of the block giving trouble */
340
    uint16_t block;
341
 
342
    /* Bank of the block giving trouble */
343
    uint8_t bank;
344
 
345
    /* Error counter, incremented by 3 on error, decremented by 1 on erase,
346
       remaping will be done when it reaches 6. */
347
    uint8_t errors;
348
 
349
} __attribute__((packed));
350
 
351
 
352
 
353
/* Pointer to an info structure regarding the flash type used */
354
const struct nand_device_info_type* ftl_nand_type;
355
 
356
/* Number of banks we detected a chip on */
357
uint32_t ftl_banks;
358
 
359
/* Block map, used vor pBlock to vBlock mapping */
360
uint16_t ftl_map[0x2000];
361
 
362
/* VFL context for each bank */
363
struct ftl_vfl_cxt_type ftl_vfl_cxt[4];
364
 
365
/* FTL context */
366
struct ftl_cxt_type ftl_cxt;
367
 
368
/* Temporary data buffers for internal use by the FTL */
369
uint8_t ftl_buffer[0x800] __attribute__((aligned(16)));
370
 
371
/* Temporary spare byte buffer for internal use by the FTL */
372
union ftl_spare_data_type ftl_sparebuffer __attribute__((aligned(16)));
373
 
374
uint32_t ftl_initialized;
375
 
376
 
377
#ifndef FTL_READONLY
378
 
379
/* Lowlevel BBT for each bank */
380
uint8_t ftl_bbt[4][0x410];
381
 
382
/* Erase countes for the vBlocks */
383
uint16_t ftl_erasectr[0x2000];
384
 
385
/* Used by ftl_log */
386
uint16_t ftl_offsets[0x11][0x200];
387
 
388
/* Structs keeping record of scattered page blocks */
389
struct ftl_log_type ftl_log[0x11];
390
 
391
/* Global cross-bank update sequence number of the VFL context */
392
uint32_t ftl_vfl_usn;
393
 
394
/* Keeps track (temporarily) of troublesome blocks */
395
struct ftl_trouble_type ftl_troublelog[5];
396
 
397
/* Counts erase counter page changes, after 100 of them the affected
398
   page will be committed to the flash. */
399
uint8_t ftl_erasectr_dirt[8];
400
 
401
/* Buffer needed for copying pages around while moving or committing blocks.
402
   This can't be shared with ftl_buffer, because this one could be overwritten
403
   during the copying operation in order to e.g. commit a CXT. */
404
uint8_t ftl_copybuffer[0x800] __attribute__((aligned(16)));
405
 
406
/* Needed to store the old scattered page offsets in order to be able to roll
407
   back if something fails while compacting a scattered page block. */
408
uint16_t ftl_offsets_backup[0x200] __attribute__((aligned(16)));
409
 
410
#endif
411
 
412
 
413
 
414
 
415
 
416
/* Finds a device info page for the specified bank and returns its number.
417
   Used to check if one is present, and to read the lowlevel BBT. */
418
uint32_t ftl_find_devinfo(uint32_t bank)
419
{
420
    /* Scan the last 10% of the flash for device info pages */
421
    uint32_t lowestBlock = (*ftl_nand_type).blocks
422
                         - ((*ftl_nand_type).blocks / 10);
423
    uint32_t block, page, pagenum;
424
    for (block = (*ftl_nand_type).blocks - 1; block >= lowestBlock; block--)
425
    {
426
        page = (*ftl_nand_type).pagesperblock - 8;
427
        for (; page < (*ftl_nand_type).pagesperblock; page++)
428
        {
429
            pagenum = block * (*ftl_nand_type).pagesperblock + page;
430
            if ((nand_read_page(bank, pagenum, ftl_buffer,
431
                                &ftl_sparebuffer, 1, 0) & 0x11F) != 0)
432
                continue;
433
            if (memcmp(ftl_buffer, "DEVICEINFOSIGN\0", 0x10) == 0)
434
                return pagenum;
435
        }
436
    }
437
    return 0;
438
}
439
 
440
 
441
/* Checks if all banks have proper device info pages */
442
uint32_t ftl_has_devinfo(void)
443
{
444
    uint32_t i;
445
    for (i = 0; i < ftl_banks; i++) if (ftl_find_devinfo(i) == 0) return 0;
446
    return 1;
447
}
448
 
449
 
450
/* Loads the lowlevel BBT for a bank to the specified buffer.
451
   This is based on some cryptic disassembly and not fully understood yet. */
452
uint32_t ftl_load_bbt(uint32_t bank, uint8_t* bbt)
453
{
454
    uint32_t i, j;
455
    uint32_t pagebase, page = ftl_find_devinfo(bank), page2;
456
    uint32_t unk1, unk2, unk3;
457
    if (page == 0) return 1;
458
    pagebase = page & ~((*ftl_nand_type).pagesperblock - 1);
459
    if ((nand_read_page(bank, page, ftl_buffer,
460
                        (uint32_t*)0, 1, 0) & 0x11F) != 0) return 1;
461
    if (memcmp(&ftl_buffer[0x18], "BBT", 4) != 0) return 1;
462
    unk1 = ((uint16_t*)ftl_buffer)[0x10];
463
    unk2 = ((uint16_t*)ftl_buffer)[0x11];
464
    unk3 = ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 0xC + 10]
465
         + ((uint16_t*)ftl_buffer)[((uint32_t*)ftl_buffer)[4] * 0xC + 11];
466
    for (i = 0; i < unk1; i++)
467
    {
468
        for (j = 0; ; j++)
469
        {
470
            page2 = unk2 + i + unk3 * j;
471
            if (page2 >= (uint32_t)((*ftl_nand_type).pagesperblock - 8))
472
                break;
473
            if ((nand_read_page(bank, pagebase + page2, ftl_buffer,
474
                                (void*)0, 1, 0) & 0x11F) == 0)
475
            {
476
                memcpy(bbt, ftl_buffer, 0x410);
477
                return 0;
478
            }
479
        }
480
    }
481
    return 1;
482
}
483
 
484
 
485
/* Calculates the checksums for the VFL context page of the specified bank */
486
void ftl_vfl_calculate_checksum(uint32_t bank,
487
                                uint32_t* checksum1, uint32_t* checksum2)
488
{
489
    uint32_t i;
490
    *checksum1 = 0xAABBCCDD;
491
    *checksum2 = 0xAABBCCDD;
492
    for (i = 0; i < 0x1FE; i++)
493
    {
494
        *checksum1 += ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
495
        *checksum2 ^= ((uint32_t*)(&ftl_vfl_cxt[bank]))[i];
496
    }
497
}
498
 
499
 
500
/* Checks if the checksums of the VFL context
501
   of the specified bank are correct */
502
uint32_t ftl_vfl_verify_checksum(uint32_t bank)
503
{
504
    uint32_t checksum1, checksum2;
505
    ftl_vfl_calculate_checksum(bank, &checksum1, &checksum2);
506
    if (checksum1 == ftl_vfl_cxt[bank].checksum1) return 0;
507
    /* The following line is pretty obviously a bug in Whimory,
508
       but we do it the same way for compatibility. */
509
    if (checksum2 != ftl_vfl_cxt[bank].checksum2) return 0;
510
    return 1;
511
}
512
 
513
 
514
#ifndef FTL_READONLY
515
/* Updates the checksums of the VFL context of the specified bank */
516
void ftl_vfl_update_checksum(uint32_t bank)
517
{
518
    ftl_vfl_calculate_checksum(bank, &ftl_vfl_cxt[bank].checksum1,
519
                               &ftl_vfl_cxt[bank].checksum2);
520
}
521
#endif
522
 
523
 
524
#ifndef FTL_READONLY
525
/* Writes 8 copies of the VFL context of the specified bank to flash,
526
   and succeeds if at least 4 can be read back properly. */
527
uint32_t ftl_vfl_store_cxt(uint32_t bank)
528
{
529
    uint32_t i;
530
    ftl_vfl_cxt[bank].updatecount--;
531
    ftl_vfl_cxt[bank].usn = ++ftl_vfl_usn;
532
    ftl_vfl_cxt[bank].nextcxtpage += 8;
533
    ftl_vfl_update_checksum(bank);
534
    memset(&ftl_sparebuffer, 0xFF, 0x40);
535
    ftl_sparebuffer.meta.usn = ftl_vfl_cxt[bank].updatecount;
536
    ftl_sparebuffer.meta.field_8 = 0;
537
    ftl_sparebuffer.meta.type = 0x80;
538
    for (i = 1; i <= 8; i++)
539
    {
540
        uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
541
        uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
542
        uint32_t page = block * (*ftl_nand_type).pagesperblock;
543
        page += ftl_vfl_cxt[bank].nextcxtpage - i;
544
        nand_write_page(bank, page, &ftl_vfl_cxt[bank], &ftl_sparebuffer, 1);
545
    }
546
    uint32_t good = 0;
547
    for (i = 0; i < 8; i++)
548
    {
549
        uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
550
        uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
551
        uint32_t page = block * (*ftl_nand_type).pagesperblock;
552
        page += ftl_vfl_cxt[bank].nextcxtpage - i;
553
        if ((nand_read_page(bank, page, ftl_buffer,
554
                            &ftl_sparebuffer, 1, 0) & 0x11F) != 0)
555
            continue;
556
        if (memcmp(ftl_buffer, &ftl_vfl_cxt[bank], 0x7AC) != 0)
557
            continue;
558
        if (ftl_sparebuffer.meta.usn != ftl_vfl_cxt[bank].updatecount)
559
            continue;
560
        if (ftl_sparebuffer.meta.field_8 == 0
561
         && ftl_sparebuffer.meta.type == 0x80) good++;
562
    }
563
    return good > 3 ? 0 : 1;
564
}
565
#endif
566
 
567
 
568
#ifndef FTL_READONLY
569
/* Commits the VFL context of the specified bank to flash,
570
   retries until it works or all available pages have been tried */
571
uint32_t ftl_vfl_commit_cxt(uint32_t bank)
572
{
573
    if (ftl_vfl_cxt[bank].nextcxtpage + 8 <= (*ftl_nand_type).pagesperblock)
574
        if (ftl_vfl_store_cxt(bank) == 0) return 0;
575
    uint32_t current = ftl_vfl_cxt[bank].activecxtblock;
576
    uint32_t i = current, j;
577
    while (1)
578
    {
579
        i = (i + 1) & 3;
580
        if (i == current) break;
581
        if (ftl_vfl_cxt[bank].vflcxtblocks[i] == 0xFFFF) continue;
582
        for (j = 0; j < 4; j++)
583
            if (nand_block_erase(bank, ftl_vfl_cxt[bank].vflcxtblocks[i]
584
                                     * (*ftl_nand_type).pagesperblock) == 0)
585
                break;
586
        if (j == 4) continue;
587
        ftl_vfl_cxt[bank].activecxtblock = i;
588
        ftl_vfl_cxt[bank].nextcxtpage = 0;
589
        if (ftl_vfl_store_cxt(bank) == 0) return 0;
590
    }
591
    return 1;
592
}
593
#endif
594
 
595
 
596
/* Returns a pointer to the most recently updated VFL context,
597
   used to find out the current FTL context vBlock numbers
598
   (planetbeing's "maxthing") */
599
struct ftl_vfl_cxt_type* ftl_vfl_get_newest_cxt(void)
600
{
601
    uint32_t i, maxusn;
602
    struct ftl_vfl_cxt_type* cxt = (struct ftl_vfl_cxt_type*)0;
603
    maxusn = 0;
604
    for (i = 0; i < ftl_banks; i++)
605
        if (ftl_vfl_cxt[i].usn >= maxusn)
606
        {
607
            cxt = &ftl_vfl_cxt[i];
608
            maxusn = ftl_vfl_cxt[i].usn;
609
        }
610
    return cxt;
611
}
612
 
613
 
614
/* Checks if the specified pBlock is marked bad in the supplied lowlevel BBT.
615
   Only used while mounting the VFL. */
616
uint32_t ftl_is_good_block(uint8_t* bbt, uint32_t block)
617
{
618
    if ((bbt[block >> 3] & (1 << (block & 7))) == 0) return 0;
619
    else return 1;
620
}
621
 
622
 
623
/* Checks if the specified vBlock could be remapped */
624
uint32_t ftl_vfl_is_good_block(uint32_t bank, uint32_t block)
625
{
626
    uint8_t bbtentry = ftl_vfl_cxt[bank].bbt[block >> 6];
627
    if ((bbtentry & (1 << ((7 - (block >> 3)) & 7))) == 0) return 0;
628
    else return 1;
629
}
630
 
631
 
632
#ifndef FTL_READONLY
633
/* Sets or unsets the bad bit of the specified vBlock
634
   in the specified bank's VFL context */
635
void ftl_vfl_set_good_block(uint32_t bank, uint32_t block, uint32_t isgood)
636
{
637
    uint8_t bit = (1 << ((7 - (block >> 3)) & 7));
638
    if (isgood == 1) ftl_vfl_cxt[bank].bbt[block >> 6] |= bit;
639
    else ftl_vfl_cxt[bank].bbt[block >> 6] &= ~bit;
640
}
641
#endif
642
 
643
 
644
/* Tries to read a VFL context from the specified bank, pBlock and page */
645
uint32_t ftl_vfl_read_page(uint32_t bank, uint32_t block,
646
                           uint32_t startpage, void* databuffer,
647
                           union ftl_spare_data_type* sparebuffer)
648
{
649
    uint32_t i;
650
    for (i = 0; i < 8; i++)
651
    {
652
        uint32_t page = block * (*ftl_nand_type).pagesperblock
653
                      + startpage + i;
654
        if ((nand_read_page(bank, page, databuffer,
655
                            sparebuffer, 1, 1) & 0x11F) == 0)
656
            if ((*sparebuffer).meta.field_8 == 0
657
             && (*sparebuffer).meta.type == 0x80)
658
                return 0;
659
    }
660
    return 1;
661
}
662
 
663
 
664
/* Translates a bank and vBlock to a pBlock, following remaps */
665
uint32_t ftl_vfl_get_physical_block(uint32_t bank, uint32_t block)
666
{
667
    if (ftl_vfl_is_good_block(bank, block) == 1) return block;
668
 
669
    uint32_t spareindex;
670
    uint32_t spareused = ftl_vfl_cxt[bank].spareused;
671
    for (spareindex = 0; spareindex < spareused; spareindex++)
672
        if (ftl_vfl_cxt[bank].remaptable[spareindex] == block)
673
            return ftl_vfl_cxt[bank].firstspare + spareindex;
674
    return block;
675
}
676
 
677
 
678
#ifndef FTL_READONLY
679
/* Checks if remapping is scheduled for the specified bank and vBlock */
680
uint32_t ftl_vfl_check_remap_scheduled(uint32_t bank, uint32_t block)
681
{
682
    uint32_t i;
683
    for (i = 0x333; i > 0 && i > ftl_vfl_cxt[bank].scheduledstart; i--)
684
        if (ftl_vfl_cxt[bank].remaptable[i] == block) return 1;
685
    return 0;
686
}
687
#endif
688
 
689
 
690
#ifndef FTL_READONLY
691
/* Schedules remapping for the specified bank and vBlock */
692
void ftl_vfl_schedule_block_for_remap(uint32_t bank, uint32_t block)
693
{
694
    if (ftl_vfl_check_remap_scheduled(bank, block) == 1) return;
695
    if (ftl_vfl_cxt[bank].scheduledstart == ftl_vfl_cxt[bank].spareused)
696
        return;
697
    ftl_vfl_cxt[bank].remaptable[--ftl_vfl_cxt[bank].scheduledstart] = block;
698
    ftl_vfl_commit_cxt(bank);
699
}
700
#endif
701
 
702
 
703
#ifndef FTL_READONLY
704
/* Removes the specified bank and vBlock combination
705
   from the remap scheduled list */
706
void ftl_vfl_mark_remap_done(uint32_t bank, uint32_t block)
707
{
708
    uint32_t i;
709
    uint32_t start = ftl_vfl_cxt[bank].scheduledstart;
710
    uint32_t lastscheduled = ftl_vfl_cxt[bank].remaptable[start];
711
    for (i = 0x333; i > 0 && i > start; i--)
712
        if (ftl_vfl_cxt[bank].remaptable[i] == block)
713
        {
714
            if (i != start && i != 0x333)
715
                ftl_vfl_cxt[bank].remaptable[i] = lastscheduled;
716
            ftl_vfl_cxt[bank].scheduledstart++;
717
            return;
718
        }
719
}
720
#endif
721
 
722
 
723
#ifndef FTL_READONLY
724
/* Logs that there is trouble for the specified vBlock on the specified bank.
725
   The vBlock will be scheduled for remap
726
   if there is too much trouble with it. */
727
void ftl_vfl_log_trouble(uint32_t bank, uint32_t vblock)
728
{
729
    uint32_t i;
730
    for (i = 0; i < 5; i++)
731
        if (ftl_troublelog[i].block == vblock
732
         && ftl_troublelog[i].bank == bank)
733
        {
734
            ftl_troublelog[i].errors += 3;
735
            if (ftl_troublelog[i].errors > 5)
736
            {
737
                ftl_vfl_schedule_block_for_remap(bank, vblock);
738
                ftl_troublelog[i].block = 0xFFFF;
739
            }
740
            return;
741
        }
742
    for (i = 0; i < 5; i++)
743
        if (ftl_troublelog[i].block == 0xFFFF)
744
        {
745
            ftl_troublelog[i].block = vblock;
746
            ftl_troublelog[i].bank = bank;
747
            ftl_troublelog[i].errors = 3;
748
            return;
749
        }
750
}
751
#endif
752
 
753
 
754
#ifndef FTL_READONLY
755
/* Logs a successful erase for the specified vBlock on the specified bank */
756
void ftl_vfl_log_success(uint32_t bank, uint32_t vblock)
757
{
758
    uint32_t i;
759
    for (i = 0; i < 5; i++)
760
        if (ftl_troublelog[i].block == vblock
761
         && ftl_troublelog[i].bank == bank)
762
        {
763
            if (--ftl_troublelog[i].errors == 0)
764
                ftl_troublelog[i].block = 0xFFFF;
765
            return;
766
        }
767
}
768
#endif
769
 
770
 
771
#ifndef FTL_READONLY
772
/* Tries to remap the specified vBlock on the specified bank,
773
   not caring about data in there.
774
   If it worked, it will return the new pBlock number,
775
   if not (no more spare blocks available), it will return zero. */
776
uint32_t ftl_vfl_remap_block(uint32_t bank, uint32_t block)
777
{
778
    uint32_t i;
779
    uint32_t newblock = 0, newidx;
780
    if (bank >= ftl_banks || block >= (*ftl_nand_type).blocks) return 0;
781
    for (i = 0; i < ftl_vfl_cxt[bank].sparecount; i++)
782
        if (ftl_vfl_cxt[bank].remaptable[i] == 0)
783
        {
784
            newblock = ftl_vfl_cxt[bank].firstspare + i;
785
            newidx = i;
786
            break;
787
        }
788
    if (newblock == 0) return 0;
789
    for (i = 0; i < 9; i++)
790
        if (nand_block_erase(bank,
791
                             newblock * (*ftl_nand_type).pagesperblock) == 0)
792
            break;
793
    for (i = 0; i < newidx; i++)
794
        if (ftl_vfl_cxt[bank].remaptable[i] == block)
795
            ftl_vfl_cxt[bank].remaptable[i] = 0xFFFF;
796
    ftl_vfl_cxt[bank].remaptable[newidx] = block;
797
    ftl_vfl_cxt[bank].spareused++;
798
    ftl_vfl_set_good_block(bank, block, 0);
799
    return newblock;
800
}
801
#endif
802
 
803
 
804
// Reads the specified vPage, dealing with all kinds of trouble
805
uint32_t ftl_vfl_read(uint32_t vpage, void* buffer, void* sparebuffer,
806
                      uint32_t checkempty, uint32_t remaponfail)
807
{
808
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
809
    uint32_t syshyperblocks = (*ftl_nand_type).blocks
810
                            - (*ftl_nand_type).userblocks - 0x17;
811
    uint32_t abspage = vpage + ppb * syshyperblocks;
812
    if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
813
    {
814
        return 4;
815
    }
816
 
817
    uint32_t bank = abspage % ftl_banks;
818
    uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
819
    uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
820
    uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
821
    uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
822
 
823
    uint32_t ret = nand_read_page(bank, physpage, buffer,
824
                                  sparebuffer, 1, checkempty);
825
 
826
    if ((ret & 0x11D) != 0 && (ret & 2) == 0)
827
    {
828
        nand_reset(bank);
829
        ret = nand_read_page(bank, physpage, buffer,
830
                             sparebuffer, 1, checkempty);
831
#ifdef FTL_READONLY
832
        (void)remaponfail;
833
#else
834
        if (remaponfail == 1 &&(ret & 0x11D) != 0 && (ret & 2) == 0)
835
        {
836
            ftl_vfl_schedule_block_for_remap(bank, block);
837
        }
838
#endif
839
        return ret;
840
    }
841
 
842
    return ret;
843
}
844
 
845
 
846
#ifndef FTL_READONLY
847
/* Writes the specified vPage, dealing with all kinds of trouble */
848
uint32_t ftl_vfl_write(uint32_t vpage, void* buffer, void* sparebuffer)
849
{
850
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
851
    uint32_t syshyperblocks = (*ftl_nand_type).blocks
852
                            - (*ftl_nand_type).userblocks - 0x17;
853
    uint32_t abspage = vpage + ppb * syshyperblocks;
854
    if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
855
    {
856
        return 4;
857
    }
858
 
859
    uint32_t bank = abspage % ftl_banks;
860
    uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
861
    uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
862
    uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
863
    uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
864
 
865
    if (nand_write_page(bank, physpage, buffer, sparebuffer, 1) == 0)
866
        return 0;
867
    if ((nand_read_page(bank, physpage, ftl_buffer,
868
                        &ftl_sparebuffer, 1, 1) & 0x11F) == 0)
869
        return 0;
870
    ftl_vfl_log_trouble(bank, block);
871
    return 1;
872
}
873
#endif
874
 
875
 
876
/* Mounts the VFL on all banks */
877
uint32_t ftl_vfl_open(void)
878
{
879
    uint32_t i, j, k;
880
    uint32_t minusn, vflcxtidx, last;
881
    struct ftl_vfl_cxt_type* cxt;
882
    uint16_t vflcxtblock[4];
883
#ifndef FTL_READONLY
884
    ftl_vfl_usn = 0;
885
#else
886
    /* Temporary BBT buffer if we're readonly,
887
       as we won't need it again after mounting */
888
    uint8_t bbt[0x410];
889
#endif
890
 
891
    uint32_t syshyperblocks = (*ftl_nand_type).blocks
892
                            - (*ftl_nand_type).userblocks - 0x18;
893
 
894
    for (i = 0; i < ftl_banks; i++)
895
#ifndef FTL_READONLY
896
        if (ftl_load_bbt(i, ftl_bbt[i]) == 0)
897
#else
898
        if (ftl_load_bbt(i, bbt) == 0)
899
#endif
900
        {
901
            for (j = 1; j <= syshyperblocks; j++)
902
#ifndef FTL_READONLY
903
                if (ftl_is_good_block(ftl_bbt[i], j) != 0)
904
#else
905
                if (ftl_is_good_block(bbt, j) != 0)
906
#endif
907
                    if (ftl_vfl_read_page(i, j, 0, ftl_buffer,
908
                                          &ftl_sparebuffer) == 0)
909
                    {
910
                        struct ftl_vfl_cxt_type* cxt;
911
                        cxt = (struct ftl_vfl_cxt_type*)ftl_buffer;
912
                        memcpy(vflcxtblock, &(*cxt).vflcxtblocks, 8);
913
                        minusn = 0xFFFFFFFF;
914
                        vflcxtidx = 4;
915
                        for (k = 0; k < 4; k++)
916
                            if (vflcxtblock[k] != 0xFFFF)
917
                                if (ftl_vfl_read_page(i, vflcxtblock[k], 0,
918
                                                      ftl_buffer,
919
                                                      &ftl_sparebuffer) == 0)
920
                                    if (ftl_sparebuffer.meta.usn > 0
921
                                     && ftl_sparebuffer.meta.usn <= minusn)
922
                                    {
923
                                        minusn = ftl_sparebuffer.meta.usn;
924
                                        vflcxtidx = k;
925
                                    }
926
                        if (vflcxtidx == 4) return 1;
927
                        last = 0;
928
                        uint32_t max = (*ftl_nand_type).pagesperblock;
929
                        for (k = 8; k < max; k += 8)
930
                        {
931
                            if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
932
                                                  k, ftl_buffer,
933
                                                  &ftl_sparebuffer) != 0)
934
                                break;
935
                            last = k;
936
                        }
937
                        if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
938
                                              last, ftl_buffer,
939
                                              &ftl_sparebuffer) != 0)
940
                            return 1;
941
                        memcpy(&ftl_vfl_cxt[i], ftl_buffer, 0x800);
942
                        if (ftl_vfl_verify_checksum(i) != 0) return 1;
943
#ifndef FTL_READONLY
944
                        if (ftl_vfl_usn < ftl_vfl_cxt[i].usn)
945
                            ftl_vfl_usn = ftl_vfl_cxt[i].usn;
946
#endif
947
                        break;
948
                    }
949
        }
950
        else
951
		{
952
		    return 1;
953
		}
954
    cxt = ftl_vfl_get_newest_cxt();
955
    for (i = 0; i < ftl_banks; i++)
956
        memcpy(ftl_vfl_cxt[i].ftlctrlblocks, (*cxt).ftlctrlblocks, 6);
957
    return 0;
958
}
959
 
960
 
961
/* Mounts the actual FTL */
962
uint32_t ftl_open(void)
963
{
964
    uint32_t i;
965
    uint32_t ret;
966
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
967
    struct ftl_vfl_cxt_type* cxt = ftl_vfl_get_newest_cxt();
968
 
969
    uint32_t ftlcxtblock = 0xffffffff;
970
    uint32_t minlpn = 0xffffffff;
971
    for (i = 0; i < 3; i++)
972
    {
973
        ret = ftl_vfl_read(ppb * (*cxt).ftlctrlblocks[i],
974
                           ftl_buffer, &ftl_sparebuffer, 1, 0);
975
        if ((ret &= 0x11F) != 0) continue;
976
        if (ftl_sparebuffer.user.type - 0x43 > 4) continue;
977
        if (ftlcxtblock != 0xffffffff && ftl_sparebuffer.user.lpn >= minlpn)
978
            continue;
979
        minlpn = ftl_sparebuffer.user.lpn;
980
        ftlcxtblock = (*cxt).ftlctrlblocks[i];
981
    }
982
 
983
    if (ftlcxtblock == 0xffffffff) return 1;
984
 
985
    uint32_t ftlcxtfound = 0;
986
    for (i = (*ftl_nand_type).pagesperblock * ftl_banks - 1; i > 0; i--)
987
    {
988
        ret = ftl_vfl_read(ppb * ftlcxtblock + i,
989
                           ftl_buffer, &ftl_sparebuffer, 1, 0);
990
        if ((ret & 0x11F) != 0) continue;
991
        else if (ftl_sparebuffer.user.type == 0x43)
992
        {
993
            memcpy(&ftl_cxt, ftl_buffer, 0x28C);
994
            ftlcxtfound = 1;
995
            break;
996
        }
997
        else
998
        {
999
            // This will trip if there was an unclean unmount before.
1000
#ifndef FTL_FORCEMOUNT
1001
            break;
1002
#endif
1003
        }
1004
    }
1005
 
1006
    if (ftlcxtfound == 0) return 1;
1007
 
1008
    uint32_t pagestoread = (*ftl_nand_type).userblocks >> 10;
1009
    if (((*ftl_nand_type).userblocks & 0x1FF) != 0) pagestoread++;
1010
 
1011
    for (i = 0; i < pagestoread; i++)
1012
    {
1013
        if ((ftl_vfl_read(ftl_cxt.ftl_map_pages[i],
1014
                          ftl_buffer, &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
1015
            return 1;
1016
 
1017
        uint32_t toread = 2048;
1018
        if (toread > ((*ftl_nand_type).userblocks << 1) - (i << 11))
1019
            toread = ((*ftl_nand_type).userblocks << 1) - (i << 11);
1020
 
1021
        memcpy(&ftl_map[i << 10], ftl_buffer, toread);
1022
    }
1023
 
1024
#ifndef FTL_READONLY
1025
    pagestoread = ((*ftl_nand_type).userblocks + 23) >> 10;
1026
    if ((((*ftl_nand_type).userblocks + 23) & 0x1FF) != 0) pagestoread++;
1027
 
1028
    for (i = 0; i < pagestoread; i++)
1029
    {
1030
        if ((ftl_vfl_read(ftl_cxt.ftl_erasectr_pages[i],
1031
                          ftl_buffer, &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
1032
            return 1;
1033
 
1034
        uint32_t toread = 2048;
1035
        if (toread > (((*ftl_nand_type).userblocks + 23) << 1) - (i << 11))
1036
            toread = (((*ftl_nand_type).userblocks + 23) << 1) - (i << 11);
1037
 
1038
        memcpy(&ftl_erasectr[i << 10], ftl_buffer, toread);
1039
    }
1040
 
1041
    for (i = 0; i < 0x11; i++)
1042
    {
1043
        ftl_log[i].scatteredvblock = 0xFFFF;
1044
        ftl_log[i].logicalvblock = 0xFFFF;
1045
        ftl_log[i].pageoffsets = ftl_offsets[i];
1046
    }
1047
 
1048
    memset(ftl_troublelog, 0xFF, 20);
1049
    memset(ftl_erasectr_dirt, 0, 8);
1050
#endif
1051
 
1052
    return 0;
1053
}
1054
 
1055
 
1056
#ifndef FTL_READONLY
1057
/* Returns a pointer to the ftl_log entry for the specified vBlock,
1058
   or null, if there is none */
1059
struct ftl_log_type* ftl_get_log_entry(uint32_t block)
1060
{
1061
    uint32_t i;
1062
    for (i = 0; i < 0x11; i++)
1063
    {
1064
        if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1065
        if (ftl_log[i].logicalvblock == block) return &ftl_log[i];
1066
    }
1067
    return (struct ftl_log_type*)0;
1068
}
1069
#endif
1070
 
1071
/* Exposed function: Read highlevel sectors */
1072
uint32_t storage_read(uint32_t sector, uint32_t count, void* buffer)
1073
{
1074
    uint32_t i;
1075
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1076
    uint32_t error = 0;
1077
 
1078
    if (sector + count > (*ftl_nand_type).userblocks * ppb)
1079
        return 1;
1080
    if (count == 0) return 0;
1081
 
1082
    for (i = 0; i < count; i++)
1083
    {
1084
        uint32_t block = (sector + i) / ppb;
1085
        uint32_t page = (sector + i) % ppb;
1086
 
1087
        uint32_t abspage = ftl_map[block] * ppb + page;
1088
#ifndef FTL_READONLY
1089
        struct ftl_log_type* logentry = ftl_get_log_entry(block);
1090
        if (logentry != (struct ftl_log_type*)0)
1091
        {
1092
            if ((*logentry).scatteredvblock != 0xFFFF
1093
             && (*logentry).pageoffsets[page] != 0xFFFF)
1094
            {
1095
                abspage = (*logentry).scatteredvblock * ppb
1096
                        + (*logentry).pageoffsets[page];
1097
            }
1098
        }
1099
#endif
1100
 
1101
        uint32_t ret = ftl_vfl_read(abspage, &((uint8_t*)buffer)[i << 11],
1102
                                    &ftl_sparebuffer, 1, 1);
1103
        if ((ret & 2) != 0) memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1104
        else if ((ret & 0x11D) != 0 || ftl_sparebuffer.user.eccmark != 0xFF)
1105
        {
1106
            error = 1;
1107
            memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
1108
        }
1109
    }
1110
    return error;
1111
}
1112
 
1113
 
1114
#ifndef FTL_READONLY
1115
/* Performs a vBlock erase, dealing with hardware,
1116
   remapping and all kinds of trouble */
1117
uint32_t ftl_erase_block_internal(uint32_t block)
1118
{
1119
    uint32_t i, j;
1120
    block = block + (*ftl_nand_type).blocks
1121
          - (*ftl_nand_type).userblocks - 0x17;
1122
    if (block == 0 || block >= (*ftl_nand_type).blocks) return 1;
1123
    for (i = 0; i < ftl_banks; i++)
1124
    {
1125
        if (ftl_vfl_check_remap_scheduled(i, block) == 1)
1126
        {
1127
            ftl_vfl_remap_block(i, block);
1128
            ftl_vfl_mark_remap_done(i, block);
1129
        }
1130
        ftl_vfl_log_success(i, block);
1131
        uint32_t pblock = ftl_vfl_get_physical_block(i, block);
1132
        uint32_t rc;
1133
        for (j = 0; j < 3; j++)
1134
        {
1135
            rc = nand_block_erase(i, pblock * (*ftl_nand_type).pagesperblock);
1136
            if (rc == 0) break;
1137
        }
1138
        if (rc != 0)
1139
        {
1140
            if (pblock != block)
1141
            {
1142
                uint32_t spareindex = pblock - ftl_vfl_cxt[i].firstspare;
1143
                ftl_vfl_cxt[i].remaptable[spareindex] = 0xFFFF;
1144
            }
1145
            ftl_vfl_cxt[i].field_18++;
1146
            if (ftl_vfl_remap_block(i, block) == 0) return 1;
1147
            if (ftl_vfl_commit_cxt(i) != 0) return 1;
1148
            memset(&ftl_sparebuffer, 0, 0x40);
1149
            nand_write_page(i, pblock, &ftl_vfl_cxt[0], &ftl_sparebuffer, 1);
1150
        }
1151
    }
1152
    return 0;
1153
}
1154
#endif
1155
 
1156
 
1157
#ifndef FTL_READONLY
1158
/* Highlevel vBlock erase, that increments the erase counter for the block */
1159
uint32_t ftl_erase_block(uint32_t block)
1160
{
1161
    ftl_erasectr[block]++;
1162
    if (ftl_erasectr_dirt[block >> 10] == 100) ftl_cxt.erasedirty = 1;
1163
    else ftl_erasectr_dirt[block >> 10]++;
1164
    return ftl_erase_block_internal(block);
1165
}
1166
#endif
1167
 
1168
 
1169
#ifndef FTL_READONLY
1170
/* Allocates a block from the pool,
1171
   returning its vBlock number, or 0xFFFFFFFF on error */
1172
uint32_t ftl_allocate_pool_block(void)
1173
{
1174
    uint32_t i;
1175
    uint32_t erasectr = 0xFFFFFFFF, bestidx = 0xFFFFFFFF, block;
1176
    for (i = 0; i < ftl_cxt.freecount; i++)
1177
    {
1178
        uint32_t idx = ftl_cxt.nextfreeidx + i;
1179
        if (idx >= 0x14) idx -= 0x14;
1180
        if (!ftl_cxt.blockpool[idx]) continue;
1181
        if (ftl_erasectr[ftl_cxt.blockpool[idx]] < erasectr)
1182
        {
1183
            erasectr = ftl_erasectr[ftl_cxt.blockpool[idx]];
1184
            bestidx = idx;
1185
        }
1186
    }
1187
    if (bestidx == 0xFFFFFFFF) return 0xFFFFFFFF;
1188
    block = ftl_cxt.blockpool[bestidx];
1189
    if (bestidx != ftl_cxt.nextfreeidx)
1190
    {
1191
        ftl_cxt.blockpool[bestidx] = ftl_cxt.blockpool[ftl_cxt.nextfreeidx];
1192
        ftl_cxt.blockpool[ftl_cxt.nextfreeidx] = block;
1193
    }
1194
    if (block > (uint32_t)(*ftl_nand_type).userblocks + 0x17) return 0xFFFFFFFF;
1195
    if (ftl_erase_block(block) != 0) return 0xFFFFFFFF;
1196
    if (++ftl_cxt.nextfreeidx == 0x14) ftl_cxt.nextfreeidx = 0;
1197
    ftl_cxt.freecount--;
1198
    return block;
1199
}
1200
#endif
1201
 
1202
 
1203
#ifndef FTL_READONLY
1204
/* Releases a vBlock back into the pool */
1205
void ftl_release_pool_block(uint32_t block)
1206
{
1207
    uint32_t idx = ftl_cxt.nextfreeidx + ftl_cxt.freecount++;
1208
    if (idx >= 0x14) idx -= 0x14;
1209
    ftl_cxt.blockpool[idx] = block;
1210
}
1211
#endif
1212
 
1213
 
1214
#ifndef FTL_READONLY
1215
/* Commits the location of the FTL context blocks
1216
   to a semi-randomly chosen VFL context */
1217
uint32_t ftl_store_ctrl_block_list(void)
1218
{
1219
    uint32_t i;
1220
    for (i = 0; i < ftl_banks; i++)
1221
        memcpy(ftl_vfl_cxt[i].ftlctrlblocks, ftl_cxt.ftlctrlblocks, 6);
1222
    return ftl_vfl_commit_cxt(ftl_vfl_usn % ftl_banks);
1223
}
1224
#endif
1225
 
1226
 
1227
#ifndef FTL_READONLY
1228
/* Saves the n-th erase counter page to the flash,
1229
   because it is too dirty or needs to be moved. */
1230
uint32_t ftl_save_erasectr_page(uint32_t index)
1231
{
1232
    memset(&ftl_sparebuffer, 0xFF, 0x40);
1233
    ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1234
    ftl_sparebuffer.meta.idx = index;
1235
    ftl_sparebuffer.meta.type = 0x46;
1236
    if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_erasectr[index << 10],
1237
                      &ftl_sparebuffer) != 0)
1238
        return 1;
1239
    if ((ftl_vfl_read(ftl_cxt.ftlctrlpage, ftl_buffer,
1240
                      &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
1241
        return 1;
1242
    if (memcmp(ftl_buffer, &ftl_erasectr[index << 10], 0x800) != 0) return 1;
1243
    if (ftl_sparebuffer.meta.type != 0x46) return 1;
1244
    if (ftl_sparebuffer.meta.idx != index) return 1;
1245
    if (ftl_sparebuffer.meta.usn != ftl_cxt.usn) return 1;
1246
    ftl_cxt.ftl_erasectr_pages[index] = ftl_cxt.ftlctrlpage;
1247
    ftl_erasectr_dirt[index] = 0;
1248
    return 0;
1249
}
1250
#endif
1251
 
1252
 
1253
#ifndef FTL_READONLY
1254
/* Increments ftl_cxt.ftlctrlpage to the next available FTL context page,
1255
   allocating a new context block if neccessary. */
1256
uint32_t ftl_next_ctrl_pool_page(void)
1257
{
1258
    uint32_t i;
1259
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1260
    if (++ftl_cxt.ftlctrlpage % ppb != 0) return 0;
1261
    for (i = 0; i < 3; i++)
1262
        if ((ftl_cxt.ftlctrlblocks[i] + 1) * ppb == ftl_cxt.ftlctrlpage)
1263
            break;
1264
    i = (i + 1) % 3;
1265
    uint32_t oldblock = ftl_cxt.ftlctrlblocks[i];
1266
    uint32_t newblock = ftl_allocate_pool_block();
1267
    if (newblock == 0xFFFFFFFF) return 1;
1268
    ftl_cxt.ftlctrlblocks[i] = newblock;
1269
    ftl_cxt.ftlctrlpage = newblock * ppb;
1270
    uint32_t pagestoread = ((*ftl_nand_type).userblocks + 23) >> 10;
1271
    if ((((*ftl_nand_type).userblocks + 23) & 0x1FF) != 0) pagestoread++;
1272
    for (i = 0; i < pagestoread; i++)
1273
        if (oldblock * ppb <= ftl_cxt.ftl_erasectr_pages[i]
1274
         && (oldblock + 1) * ppb > ftl_cxt.ftl_erasectr_pages[i])
1275
         {
1276
            ftl_cxt.usn--;
1277
            if (ftl_save_erasectr_page(i) != 0)
1278
            {
1279
                ftl_cxt.ftlctrlblocks[i] = oldblock;
1280
                ftl_cxt.ftlctrlpage = oldblock * (ppb + 1) - 1;
1281
                ftl_release_pool_block(newblock);
1282
                return 1;
1283
            }
1284
            ftl_cxt.ftlctrlpage++;
1285
         }
1286
    ftl_release_pool_block(oldblock);
1287
    return ftl_store_ctrl_block_list();
1288
}
1289
#endif
1290
 
1291
 
1292
#ifndef FTL_READONLY
1293
/* Copies a vPage from one location to another */
1294
uint32_t ftl_copy_page(uint32_t source, uint32_t destination,
1295
                       uint32_t lpn, uint32_t type)
1296
{
1297
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1298
    uint32_t rc = ftl_vfl_read(source, ftl_copybuffer,
1299
                               &ftl_sparebuffer, 1, 1) & 0x11F;
1300
    memset(&ftl_sparebuffer, 0xFF, 0x40);
1301
    ftl_sparebuffer.user.lpn = lpn;
1302
    ftl_sparebuffer.user.usn = ++ftl_cxt.nextblockusn;
1303
    ftl_sparebuffer.user.type = 0x40;
1304
    if ((rc & 2) != 0) memset(ftl_copybuffer, 0, 0x800);
1305
    else if (rc != 0) ftl_sparebuffer.user.eccmark = 0x55;
1306
    if (type == 1 && destination % ppb == ppb - 1)
1307
        ftl_sparebuffer.user.type = 0x41;
1308
    return ftl_vfl_write(destination, ftl_copybuffer, &ftl_sparebuffer);
1309
}
1310
#endif
1311
 
1312
 
1313
#ifndef FTL_READONLY
1314
/* Copies a pBlock to a vBlock */
1315
uint32_t ftl_copy_block(uint32_t source, uint32_t destination)
1316
{
1317
    uint32_t i;
1318
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1319
    uint32_t error = 0;
1320
    ftl_cxt.nextblockusn++;
1321
    for (i = 0; i < ppb; i++)
1322
    {
1323
        uint32_t rc = storage_read(source * ppb + i, 1, ftl_copybuffer);
1324
        memset(&ftl_sparebuffer, 0xFF, 0x40);
1325
        ftl_sparebuffer.user.lpn = source * ppb + i;
1326
        ftl_sparebuffer.user.usn = ftl_cxt.nextblockusn;
1327
        ftl_sparebuffer.user.type = 0x40;
1328
        if (rc != 0) ftl_sparebuffer.user.eccmark = 0x55;
1329
        if (i == ppb - 1) ftl_sparebuffer.user.type = 0x41;
1330
        if (ftl_vfl_write(destination * ppb + i,
1331
                          ftl_copybuffer, &ftl_sparebuffer) != 0)
1332
        {
1333
            error = 1;
1334
            break;
1335
        }
1336
    }
1337
    if (error != 0)
1338
    {
1339
        ftl_erase_block(destination);
1340
        return 1;
1341
    }
1342
    return 0;
1343
}
1344
#endif
1345
 
1346
 
1347
#ifndef FTL_READONLY
1348
/* Clears ftl_log.issequential, if something violating that is written. */
1349
void ftl_check_still_sequential(struct ftl_log_type* entry, uint32_t page)
1350
{
1351
    if ((*entry).pagesused != (*entry).pagescurrent
1352
     || (*entry).pageoffsets[page] != page)
1353
        (*entry).issequential = 0;
1354
}
1355
#endif
1356
 
1357
 
1358
#ifndef FTL_READONLY
1359
/* Copies all pages that are currently used from the scattered page block in
1360
   use by the supplied ftl_log entry to a newly-allocated one, and releases
1361
   the old one.
1362
   In other words: It kicks the pages containing old garbage out of it to make
1363
   space again. This is usually done when a scattered page block is being
1364
   removed because it is full, but less than half of the pages in there are
1365
   still in use and rest is just filled with old crap. */
1366
uint32_t ftl_compact_scattered(struct ftl_log_type* entry)
1367
{
1368
    uint32_t i, j;
1369
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1370
    uint32_t error;
1371
    struct ftl_log_type backup;
1372
    if ((*entry).pagescurrent == 0)
1373
    {
1374
        ftl_release_pool_block((*entry).scatteredvblock);
1375
        (*entry).scatteredvblock = 0xFFFF;
1376
        return 0;
1377
    }
1378
    backup = *entry;
1379
    memcpy(ftl_offsets_backup, (*entry).pageoffsets, 0x400);
1380
    for (i = 0; i < 4; i++)
1381
    {
1382
        uint32_t block = ftl_allocate_pool_block();
1383
        if (block == 0xFFFFFFFF) return 1;
1384
        (*entry).pagesused = 0;
1385
        (*entry).pagescurrent = 0;
1386
        (*entry).issequential = 1;
1387
        (*entry).scatteredvblock = block;
1388
        error = 0;
1389
        for (j = 0; j < ppb; j++)
1390
            if ((*entry).pageoffsets[j] != 0xFFFF)
1391
            {
1392
                uint32_t lpn = (*entry).logicalvblock * ppb + j;
1393
                uint32_t newpage = block * ppb + (*entry).pagesused;
1394
                uint32_t oldpage = backup.scatteredvblock * ppb
1395
                                 + (*entry).pageoffsets[j];
1396
                if (ftl_copy_page(oldpage, newpage, lpn,
1397
                                  (*entry).issequential) != 0)
1398
                {
1399
                    error = 1;
1400
                    break;
1401
                }
1402
                (*entry).pageoffsets[j] = (*entry).pagesused++;
1403
                (*entry).pagescurrent++;
1404
                ftl_check_still_sequential(entry, j);
1405
            }
1406
        if (backup.pagescurrent != (*entry).pagescurrent) error = 1;
1407
        if (error == 0)
1408
        {
1409
            ftl_release_pool_block(backup.scatteredvblock);
1410
            break;
1411
        }
1412
        *entry = backup;
1413
        memcpy((*entry).pageoffsets, ftl_offsets_backup, 0x400);
1414
    }
1415
    return error;
1416
}
1417
#endif
1418
 
1419
 
1420
#ifndef FTL_READONLY
1421
/* Commits an ftl_log entry to proper blocks, no matter what's in there. */
1422
uint32_t ftl_commit_scattered(struct ftl_log_type* entry)
1423
{
1424
    uint32_t i;
1425
    uint32_t error;
1426
    uint32_t block;
1427
    for (i = 0; i < 4; i++)
1428
    {
1429
        block = ftl_allocate_pool_block();
1430
        if (block == 0xFFFFFFFF) return 1;
1431
        error = ftl_copy_block((*entry).logicalvblock, block);
1432
        if (error == 0) break;
1433
        ftl_release_pool_block(block);
1434
    }
1435
    if (error != 0) return 1;
1436
    ftl_release_pool_block((*entry).scatteredvblock);
1437
    (*entry).scatteredvblock = 0xFFFF;
1438
    ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
1439
    ftl_map[(*entry).logicalvblock] = block;
1440
    return 0;
1441
}
1442
#endif
1443
 
1444
 
1445
#ifndef FTL_READONLY
1446
/* Fills the rest of a scattered page block that was actually written
1447
   sequentially until now, in order to be able to save a block erase by
1448
   committing it without needing to copy it again.
1449
   If this fails for whichever reason, it will be committed the usual way. */
1450
uint32_t ftl_commit_sequential(struct ftl_log_type* entry)
1451
{
1452
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1453
 
1454
    if ((*entry).issequential != 1
1455
     || (*entry).pagescurrent != (*entry).pagesused)
1456
        return 1;
1457
 
1458
    for (; (*entry).pagesused < ppb; (*entry).pagesused++)
1459
    {
1460
        uint32_t lpn = (*entry).logicalvblock * ppb + (*entry).pagesused;
1461
        uint32_t newpage = (*entry).scatteredvblock * ppb
1462
                         + (*entry).pagesused;
1463
        uint32_t oldpage = ftl_map[(*entry).logicalvblock] * ppb
1464
                         + (*entry).pagesused;
1465
        if ((*entry).pageoffsets[(*entry).pagesused] != 0xFFFF
1466
         || ftl_copy_page(oldpage, newpage, lpn, 1) != 0)
1467
            return ftl_commit_scattered(entry);
1468
    }
1469
    ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
1470
    ftl_map[(*entry).logicalvblock] = (*entry).scatteredvblock;
1471
    (*entry).scatteredvblock = 0xFFFF;
1472
    return 0;
1473
}
1474
#endif
1475
 
1476
 
1477
#ifndef FTL_READONLY
1478
/* If a log entry is supplied, its scattered page block will be removed in
1479
   whatever way seems most appropriate. Else, the oldest scattered page block
1480
   will be freed by committing it. */
1481
uint32_t ftl_remove_scattered_block(struct ftl_log_type* entry)
1482
{
1483
    uint32_t i;
1484
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1485
    uint32_t age = 0xFFFFFFFF, used = 0;
1486
    if (entry == (struct ftl_log_type*)0)
1487
    {
1488
        for (i = 0; i < 0x11; i++)
1489
        {
1490
            if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1491
            if (ftl_log[i].pagesused == 0 || ftl_log[i].pagescurrent == 0)
1492
                return 1;
1493
            if (ftl_log[i].usn < age
1494
             || (ftl_log[i].usn == age && ftl_log[i].pagescurrent > used))
1495
            {
1496
                age = ftl_log[i].usn;
1497
                used = ftl_log[i].pagescurrent;
1498
                entry = &ftl_log[i];
1499
            }
1500
        }
1501
        if (entry == (struct ftl_log_type*)0) return 1;
1502
    }
1503
    else if ((*entry).pagescurrent < ppb / 2)
1504
    {
1505
        ftl_cxt.swapcounter++;
1506
        return ftl_compact_scattered(entry);
1507
    }
1508
    ftl_cxt.swapcounter++;
1509
    if ((*entry).issequential == 1) return ftl_commit_sequential(entry);
1510
    else return ftl_commit_scattered(entry);
1511
}
1512
#endif
1513
 
1514
 
1515
#ifndef FTL_READONLY
1516
/* Initialize a log entry to the values for an empty scattered page block */
1517
void ftl_init_log_entry(struct ftl_log_type* entry)
1518
{
1519
    (*entry).issequential = 1;
1520
    (*entry).pagescurrent = 0;
1521
    (*entry).pagesused = 0;
1522
    memset((*entry).pageoffsets, 0xFF, 0x400);
1523
}
1524
#endif
1525
 
1526
 
1527
#ifndef FTL_READONLY
1528
/* Allocates a log entry for the specified vBlock,
1529
   first making space, if neccessary. */
1530
struct ftl_log_type* ftl_allocate_log_entry(uint32_t block)
1531
{
1532
    uint32_t i;
1533
    struct ftl_log_type* entry = ftl_get_log_entry(block);
1534
    if (entry != (struct ftl_log_type*)0) return entry;
1535
 
1536
    for (i = 0; i < 0x11; i++)
1537
    {
1538
        if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1539
        if (ftl_log[i].pagesused == 0)
1540
        {
1541
            entry = &ftl_log[i];
1542
            break;
1543
        }
1544
    }
1545
 
1546
    if (entry == (struct ftl_log_type*)0)
1547
    {
1548
        if (ftl_cxt.freecount <= 3)
1549
            if (ftl_remove_scattered_block((struct ftl_log_type*)0) != 0)
1550
                return (struct ftl_log_type*)0;
1551
        entry = ftl_log;
1552
        while ((*entry).scatteredvblock != 0xFFFF) entry = &entry[1];
1553
        (*entry).scatteredvblock = ftl_allocate_pool_block();
1554
        if ((*entry).scatteredvblock == 0xFFFF)
1555
        {
1556
            (*entry).scatteredvblock = 0xFFFF;
1557
            return (struct ftl_log_type*)0;
1558
        }
1559
    }
1560
 
1561
    ftl_init_log_entry(entry);
1562
    (*entry).logicalvblock = block;
1563
    (*entry).usn = ftl_cxt.nextblockusn - 1;
1564
 
1565
    return entry;
1566
}
1567
#endif
1568
 
1569
 
1570
#ifndef FTL_READONLY
1571
/* Commits the FTL block map, erase counters, and context to flash */
1572
uint32_t ftl_commit_cxt(void)
1573
{
1574
    uint32_t i;
1575
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1576
    uint32_t mappages = ((*ftl_nand_type).userblocks + 0x3ff) >> 10;
1577
    uint32_t ctrpages = ((*ftl_nand_type).userblocks + 23 + 0x3ff) >> 10;
1578
    uint32_t endpage = ftl_cxt.ftlctrlpage + mappages + ctrpages + 1;
1579
    if (endpage % ppb > ppb - 1)
1580
        ftl_cxt.ftlctrlpage |= ppb - 1;
1581
    for (i = 0; i < ctrpages; i++)
1582
    {
1583
        if (ftl_next_ctrl_pool_page() != 0) return 1;
1584
        if (ftl_save_erasectr_page(i) != 0) return 1;
1585
    }
1586
    for (i = 0; i < mappages; i++)
1587
    {
1588
        if (ftl_next_ctrl_pool_page() != 0) return 1;
1589
        memset(&ftl_sparebuffer, 0xFF, 0x40);
1590
        ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1591
        ftl_sparebuffer.meta.idx = i;
1592
        ftl_sparebuffer.meta.type = 0x44;
1593
        if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_map[i << 10],
1594
                          &ftl_sparebuffer) != 0)
1595
            return 1;
1596
        ftl_cxt.ftl_map_pages[i] = ftl_cxt.ftlctrlpage;
1597
    }
1598
    if (ftl_next_ctrl_pool_page() != 0) return 1;
1599
    ftl_cxt.clean_flag = 1;
1600
    memset(&ftl_sparebuffer, 0xFF, 0x40);
1601
    ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1602
    ftl_sparebuffer.meta.type = 0x43;
1603
    if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_cxt, &ftl_sparebuffer) != 0)
1604
        return 1;
1605
    return 0;
1606
}
1607
#endif
1608
 
1609
 
1610
#ifndef FTL_READONLY
1611
/* Swaps the most and least worn block on the flash,
1612
   to better distribute wear. It will refuse to do anything
1613
   if the wear spread is lower than 5 erases. */
1614
uint32_t ftl_swap_blocks(void)
1615
{
1616
    uint32_t i;
1617
    uint32_t min = 0xFFFFFFFF, max = 0, maxidx = 0x14;
1618
    uint32_t minidx = 0, minvb = 0, maxvb = 0;
1619
    for (i = 0; i < ftl_cxt.freecount; i++)
1620
    {
1621
        uint32_t idx = ftl_cxt.nextfreeidx + i;
1622
        if (idx >= 0x14) idx -= 0x14;
1623
        if (ftl_erasectr[ftl_cxt.blockpool[idx]] > max)
1624
        {
1625
            maxidx = idx;
1626
            maxvb = ftl_cxt.blockpool[idx];
1627
            max = ftl_erasectr[maxidx];
1628
        }
1629
    }
1630
    if (maxidx == 0x14) return 0;
1631
    for (i = 0; i < (*ftl_nand_type).userblocks; i++)
1632
    {
1633
        if (ftl_erasectr[ftl_map[i]] > max) max = ftl_erasectr[ftl_map[i]];
1634
        if (ftl_get_log_entry(i) != (struct ftl_log_type*)0) continue;
1635
        if (ftl_erasectr[ftl_map[i]] < min)
1636
        {
1637
            minidx = i;
1638
            minvb = ftl_map[i];
1639
            min = ftl_erasectr[minidx];
1640
        }
1641
    }
1642
    if (max - min < 5) return 0;
1643
    if (minvb == maxvb) return 0;
1644
    if (ftl_erase_block(maxvb) != 0) return 1;
1645
    if (ftl_copy_block(minidx, maxvb) != 0) return 1;
1646
    ftl_cxt.blockpool[maxidx] = minvb;
1647
    ftl_map[minidx] = maxvb;
1648
    return 0;
1649
}
1650
#endif
1651
 
1652
 
1653
#ifndef FTL_READONLY
1654
/* Exposed function: Write highlevel sectors */
1655
uint32_t storage_write(uint32_t sector, uint32_t count, const void* buffer)
1656
{
1657
    uint32_t i, j;
1658
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1659
 
1660
    if (sector + count > (*ftl_nand_type).userblocks * ppb)
1661
        return 1;
1662
 
1663
    if (count == 0) return 0;
1664
 
1665
    if (ftl_cxt.clean_flag == 1)
1666
    {
1667
        for (i = 0; i < 3; i++)
1668
        {
1669
            if (ftl_next_ctrl_pool_page() != 0) return 1;
1670
            memset(ftl_buffer, 0xFF, 0x800);
1671
            memset(&ftl_sparebuffer, 0xFF, 0x40);
1672
            ftl_sparebuffer.meta.usn = ftl_cxt.usn;
1673
            ftl_sparebuffer.meta.type = 0x47;
1674
            if (ftl_vfl_write(ftl_cxt.ftlctrlpage, ftl_buffer,
1675
                              &ftl_sparebuffer) == 0)
1676
                break;
1677
        }
1678
        if (i == 3) return 1;
1679
        ftl_cxt.clean_flag = 0;
1680
    }
1681
 
1682
    for (i = 0; i < count; )
1683
    {
1684
        uint32_t block = (sector + i) / ppb;
1685
        uint32_t page = (sector + i) % ppb;
1686
 
1687
        struct ftl_log_type* logentry = ftl_allocate_log_entry(block);
1688
        if (logentry == (struct ftl_log_type*)0) return 1;
1689
        if (page == 0 && count - i >= ppb)
1690
        {
1691
            uint32_t vblock = (*logentry).scatteredvblock;
1692
            (*logentry).scatteredvblock = 0xFFFF;
1693
            if ((*logentry).pagesused != 0)
1694
            {
1695
                ftl_release_pool_block(vblock);
1696
                vblock = ftl_allocate_pool_block();
1697
                if (vblock == 0xFFFFFFFF) return 1;
1698
            }
1699
            ftl_cxt.nextblockusn++;
1700
            for (j = 0; j < ppb; j++)
1701
            {
1702
                memset(&ftl_sparebuffer, 0xFF, 0x40);
1703
                ftl_sparebuffer.user.lpn = sector + i + j;
1704
                ftl_sparebuffer.user.usn = ftl_cxt.nextblockusn;
1705
                ftl_sparebuffer.user.type = 0x40;
1706
                if (j == ppb - 1) ftl_sparebuffer.user.type = 0x41;
1707
                while (ftl_vfl_write(vblock * ppb + j,
1708
                                     &((uint8_t*)buffer)[(i + j) << 11],
1709
                                     &ftl_sparebuffer) != 0);
1710
            }
1711
            ftl_release_pool_block(ftl_map[block]);
1712
            ftl_map[block] = vblock;
1713
            i += ppb;
1714
        }
1715
        else
1716
        {
1717
            if ((*logentry).pagesused == ppb)
1718
            {
1719
                ftl_remove_scattered_block(logentry);
1720
                logentry = ftl_allocate_log_entry(block);
1721
                if (logentry == (struct ftl_log_type*)0) return 1;
1722
            }
1723
            memset(&ftl_sparebuffer, 0xFF, 0x40);
1724
            ftl_sparebuffer.user.lpn = sector + i;
1725
            ftl_sparebuffer.user.usn = ++ftl_cxt.nextblockusn;
1726
            ftl_sparebuffer.user.type = 0x40;
1727
            uint32_t abspage = (*logentry).scatteredvblock * ppb
1728
                             + (*logentry).pagesused++;
1729
            if (ftl_vfl_write(abspage, &((uint8_t*)buffer)[i << 11],
1730
                              &ftl_sparebuffer) == 0)
1731
            {
1732
                if ((*logentry).pageoffsets[page] == 0xFFFF)
1733
                    (*logentry).pagescurrent++;
1734
                (*logentry).pageoffsets[page] = (*logentry).pagesused - 1;
1735
                ftl_check_still_sequential(logentry, page);
1736
                i++;
1737
            }
1738
        }
1739
    }
1740
    if (ftl_cxt.swapcounter >= 300)
1741
    {
1742
        ftl_cxt.swapcounter -= 20;
1743
        for (i = 0; i < 4; i++) if (ftl_swap_blocks() == 0) break;
1744
    }
1745
    if (ftl_cxt.erasedirty == 1)
1746
    {
1747
        ftl_cxt.erasedirty = 0;
1748
        for (i = 0; i < 8; i++)
1749
            if (ftl_erasectr_dirt[i] >= 100)
1750
            {
1751
                ftl_next_ctrl_pool_page();
1752
                ftl_save_erasectr_page(i);
1753
            }
1754
    }
1755
    return 0;
1756
}
1757
#endif
1758
 
1759
 
1760
#ifndef FTL_READONLY
1761
// Exposed function: Performes a sync / unmount, i.e. commits all scattered page blocks,
1762
// distributes wear, and commits the FTL context.
1763
uint32_t storage_sync()
1764
{
1765
    uint32_t i;
1766
    uint32_t rc = 0;
1767
    uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
1768
    if (ftl_cxt.clean_flag == 1) return 0;
1769
 
1770
    if (ftl_cxt.swapcounter >= 20)
1771
        for (i = 0; i < 4; i++)
1772
            if (ftl_swap_blocks() == 0)
1773
            {
1774
                ftl_cxt.swapcounter -= 20;
1775
                break;
1776
            }
1777
    for (i = 0; i < 0x11; i++)
1778
    {
1779
        if (ftl_log[i].scatteredvblock == 0xFFFF) continue;
1780
        ftl_cxt.nextblockusn++;
1781
        if (ftl_log[i].issequential == 1)
1782
            rc |= ftl_commit_sequential(&ftl_log[i]);
1783
        else rc |= ftl_commit_scattered(&ftl_log[i]);
1784
    }
1785
    if (rc == 0)
1786
        for (i = 0; i < 5; i++)
1787
            if (ftl_commit_cxt() == 0) return 0;
1788
            else ftl_cxt.ftlctrlpage |= ppb - 1;
1789
    return 1;
1790
}
1791
#endif
1792
 
1793
 
1794
uint32_t storage_get_sector_count()
1795
{
1796
    return (*ftl_nand_type).pagesperblock * ftl_banks * (*ftl_nand_type).userblocks;
1797
}
1798
 
1799
 
1800
/* Initializes and mounts the FTL.
1801
   As long as nothing was written, you won't need to unmount it.
1802
   Before shutting down after writing something, call storage_sync(),
1803
   which will just do nothing if everything was already clean. */
1804
uint32_t storage_init(void)
1805
{
1806
    uint32_t i;
1807
    uint32_t result = 0;
1808
    uint32_t foundsignature, founddevinfo, blockwiped, repaired, skip;
1809
 
1810
    if (ftl_initialized) return 0;
1811
 
1812
    if (nand_init() != 0) return 1;
1813
    ftl_banks = 0;
1814
    for (i = 0; i < 4; i++)
1815
        if (nand_get_device_type(i) != 0) ftl_banks = i + 1;
1816
    ftl_nand_type = nand_get_device_type(0);
1817
    foundsignature = 0;
1818
    blockwiped = 1;
1819
    for (i = 0; i < (*ftl_nand_type).pagesperblock; i++)
1820
    {
1821
        result = nand_read_page(0, i, ftl_buffer, (uint32_t*)0, 1, 1);
1822
        if ((result & 0x11F) == 0)
1823
        {
1824
            blockwiped = 0;
1825
            if (((uint32_t*)ftl_buffer)[0] != 0x41303034) continue;
1826
            foundsignature = 1;
1827
            break;
1828
        }
1829
        else if ((result & 2) != 2) blockwiped = 0;
1830
    }
1831
    founddevinfo = ftl_has_devinfo();
1832
 
1833
    repaired = 0;
1834
    skip = 0;
1835
 
1836
    if (founddevinfo == 0) return 1;
1837
 
1838
    if (foundsignature != 0 && (result & 0x11F) != 0) return 1;
1839
 
1840
    if (ftl_vfl_open() == 0)
1841
        if (ftl_open() == 0)
1842
        {
1843
            ftl_initialized = 1;
1844
            return 0;
1845
        }
1846
 
1847
    return 1;
1848
}