Revert r25099, r25101, r25109 and r25137 for now. This doesn't seem to be quite stable on some NAND types yet.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@25139 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sparmann 2010-03-13 00:43:34 +00:00
parent a4caff91f9
commit 131bb698ad
3 changed files with 157 additions and 541 deletions

View file

@ -31,8 +31,6 @@
#define FTL_COPYBUF_SIZE 32
#define FTL_WRITESPARE_SIZE 32
//#define FTL_FORCEMOUNT
@ -374,7 +372,7 @@ struct ftl_cxt_type ftl_cxt;
uint8_t ftl_buffer[0x800] __attribute__((aligned(16)));
/* Temporary spare byte buffer for internal use by the FTL */
union ftl_spare_data_type ftl_sparebuffer[FTL_WRITESPARE_SIZE] __attribute__((aligned(16)));
union ftl_spare_data_type ftl_sparebuffer __attribute__((aligned(16)));
#ifndef FTL_READONLY
@ -404,8 +402,7 @@ uint8_t ftl_erasectr_dirt[8];
/* Buffer needed for copying pages around while moving or committing blocks.
This can't be shared with ftl_buffer, because this one could be overwritten
during the copying operation in order to e.g. commit a CXT. */
uint8_t ftl_copybuffer[FTL_COPYBUF_SIZE][0x800] __attribute__((aligned(16)));
union ftl_spare_data_type ftl_copyspare[FTL_COPYBUF_SIZE] __attribute__((aligned(16)));
uint8_t ftl_copybuffer[0x800] __attribute__((aligned(16)));
/* Needed to store the old scattered page offsets in order to be able to roll
back if something fails while compacting a scattered page block. */
@ -433,7 +430,7 @@ uint32_t ftl_find_devinfo(uint32_t bank)
{
pagenum = block * (*ftl_nand_type).pagesperblock + page;
if ((nand_read_page(bank, pagenum, ftl_buffer,
&ftl_sparebuffer[0], 1, 0) & 0x11F) != 0)
&ftl_sparebuffer, 1, 0) & 0x11F) != 0)
continue;
if (memcmp(ftl_buffer, "DEVICEINFOSIGN\0", 0x10) == 0)
return pagenum;
@ -537,34 +534,34 @@ uint32_t ftl_vfl_store_cxt(uint32_t bank)
ftl_vfl_cxt[bank].usn = ++ftl_vfl_usn;
ftl_vfl_cxt[bank].nextcxtpage += 8;
ftl_vfl_update_checksum(bank);
memset(&ftl_sparebuffer[0], 0xFF, 0x40);
ftl_sparebuffer[0].meta.usn = ftl_vfl_cxt[bank].updatecount;
ftl_sparebuffer[0].meta.field_8 = 0;
ftl_sparebuffer[0].meta.type = 0x80;
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.meta.usn = ftl_vfl_cxt[bank].updatecount;
ftl_sparebuffer.meta.field_8 = 0;
ftl_sparebuffer.meta.type = 0x80;
for (i = 1; i <= 8; i++)
{
uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
uint32_t page = block * (*ftl_nand_type).pagesperblock;
page += ftl_vfl_cxt[bank].nextcxtpage - i;
nand_write_page(bank, page, &ftl_vfl_cxt[bank], &ftl_sparebuffer[0], 1);
nand_write_page(bank, page, &ftl_vfl_cxt[bank], &ftl_sparebuffer, 1);
}
uint32_t good = 0;
for (i = 1; i <= 8; i++)
for (i = 0; i < 8; i++)
{
uint32_t index = ftl_vfl_cxt[bank].activecxtblock;
uint32_t block = ftl_vfl_cxt[bank].vflcxtblocks[index];
uint32_t page = block * (*ftl_nand_type).pagesperblock;
page += ftl_vfl_cxt[bank].nextcxtpage - i;
if ((nand_read_page(bank, page, ftl_buffer,
&ftl_sparebuffer[0], 1, 0) & 0x11F) != 0)
&ftl_sparebuffer, 1, 0) & 0x11F) != 0)
continue;
if (memcmp(ftl_buffer, &ftl_vfl_cxt[bank], 0x7AC) != 0)
continue;
if (ftl_sparebuffer[0].meta.usn != ftl_vfl_cxt[bank].updatecount)
if (ftl_sparebuffer.meta.usn != ftl_vfl_cxt[bank].updatecount)
continue;
if (ftl_sparebuffer[0].meta.field_8 == 0
&& ftl_sparebuffer[0].meta.type == 0x80) good++;
if (ftl_sparebuffer.meta.field_8 == 0
&& ftl_sparebuffer.meta.type == 0x80) good++;
}
return good > 3 ? 0 : 1;
}
@ -850,140 +847,36 @@ uint32_t ftl_vfl_read(uint32_t vpage, void* buffer, void* sparebuffer,
}
/* Multi-bank version of ftl_vfl_read, will read ftl_banks pages in parallel */
uint32_t ftl_vfl_read_fast(uint32_t vpage, void* buffer, void* sparebuffer,
uint32_t checkempty, uint32_t remaponfail)
#ifndef FTL_READONLY
/* Writes the specified vPage, dealing with all kinds of trouble */
uint32_t ftl_vfl_write(uint32_t vpage, void* buffer, void* sparebuffer)
{
uint32_t i, rc = 0;
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
uint32_t syshyperblocks = (*ftl_nand_type).blocks
- (*ftl_nand_type).userblocks - 0x17;
uint32_t abspage = vpage + ppb * syshyperblocks;
if (abspage + ftl_banks - 1 >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
panicf("FTL: Trying to read out-of-bounds vPage %u", (unsigned)vpage);
if (abspage >= (*ftl_nand_type).blocks * ppb || abspage < ppb)
panicf("FTL: Trying to write out-of-bounds vPage %u",
(unsigned)vpage);
//return 4;
uint32_t bank = abspage % ftl_banks;
uint32_t block = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
if (bank)
{
for (i = 0; i < ftl_banks; i++)
{
void* databuf = (void*)0;
void* sparebuf = (void*)0;
if (buffer) databuf = (void*)((uint32_t)buffer + 0x800 * i);
if (sparebuffer) sparebuf = (void*)((uint32_t)sparebuffer + 0x40 * i);
uint32_t ret = ftl_vfl_read(vpage + i, databuf, sparebuf, checkempty, remaponfail);
if (ret & 1) rc |= 1 << (i << 2);
if (ret & 2) rc |= 2 << (i << 2);
if (ret & 0x10) rc |= 4 << (i << 2);
if (ret & 0x100) rc |= 8 << (i << 2);
}
return rc;
}
uint32_t physblock = ftl_vfl_get_physical_block(bank, block);
uint32_t physpage = physblock * (*ftl_nand_type).pagesperblock + page;
rc = nand_read_page_fast(physpage, buffer, sparebuffer, 1, checkempty);
if (!(rc & 0xdddd)) return rc;
if (nand_write_page(bank, physpage, buffer, sparebuffer, 1) == 0)
return 0;
for (i = 0; i < ftl_banks; i++)
{
if ((rc >> (i << 2)) & 0x2) continue;
if ((rc >> (i << 2)) & 0xf)
{
rc &= ~(0xf << (i << 2));
nand_reset(i);
uint32_t ret = nand_read_page(i, physpage,
(void*)((uint32_t)buffer + 0x800 * i),
(void*)((uint32_t)sparebuffer + 0x40 * i),
1, checkempty);
#ifdef FTL_READONLY
(void)remaponfail;
#else
if (remaponfail == 1 && (ret & 0x11D) != 0 && (ret & 2) == 0)
ftl_vfl_schedule_block_for_remap(i, block);
#endif
if (ret & 1) rc |= 1 << (i << 2);
if (ret & 2) rc |= 2 << (i << 2);
if (ret & 0x10) rc |= 4 << (i << 2);
if (ret & 0x100) rc |= 8 << (i << 2);
}
}
if ((nand_read_page(bank, physpage, ftl_buffer,
&ftl_sparebuffer, 1, 1) & 0x11F) == 0)
return 0;
return rc;
}
#ifndef FTL_READONLY
/* Writes the specified vPage, dealing with all kinds of trouble */
uint32_t ftl_vfl_write(uint32_t vpage, uint32_t count,
void* buffer, void* sparebuffer)
{
uint32_t i, j;
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
uint32_t syshyperblocks = (*ftl_nand_type).blocks
- (*ftl_nand_type).userblocks - 0x17;
uint32_t abspage = vpage + ppb * syshyperblocks;
if (abspage + count > (*ftl_nand_type).blocks * ppb || abspage < ppb)
panicf("FTL: Trying to write out-of-bounds vPage %u",
(unsigned)vpage);
//return 4;
uint32_t bank[5];
uint32_t block[5];
uint32_t physpage[5];
for (i = 0; i < count; i++, abspage++)
{
for (j = ftl_banks; j > 0; j--)
{
bank[j] = bank[j - 1];
block[j] = block[j - 1];
physpage[j] = physpage[j - 1];
}
bank[0] = abspage % ftl_banks;
block[0] = abspage / ((*ftl_nand_type).pagesperblock * ftl_banks);
uint32_t page = (abspage / ftl_banks) % (*ftl_nand_type).pagesperblock;
uint32_t physblock = ftl_vfl_get_physical_block(bank[0], block[0]);
physpage[0] = physblock * (*ftl_nand_type).pagesperblock + page;
if (i >= ftl_banks)
if (nand_write_page_collect(bank[ftl_banks]))
if (nand_read_page(bank[ftl_banks], physpage[ftl_banks],
ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F)
{
panicf("FTL: write error (2) on vPage %u, bank %u, pPage %u",
(unsigned)(vpage + i - ftl_banks),
(unsigned)bank[ftl_banks],
(unsigned)physpage[ftl_banks]);
ftl_vfl_log_trouble(bank[ftl_banks], block[ftl_banks]);
}
if (nand_write_page_start(bank[0], physpage[0],
(void*)((uint32_t)buffer + 0x800 * i),
(void*)((uint32_t)sparebuffer + 0x40 * i), 1))
if (nand_read_page(bank[0], physpage[0], ftl_buffer,
&ftl_sparebuffer[0], 1, 1) & 0x11F)
{
panicf("FTL: write error (1) on vPage %u, bank %u, pPage %u",
(unsigned)(vpage + i), (unsigned)bank[0], (unsigned)physpage[0]);
ftl_vfl_log_trouble(bank[0], block[0]);
}
}
for (i = count < ftl_banks ? count : ftl_banks; i > 0; i--)
if (nand_write_page_collect(bank[i - 1]))
if (nand_read_page(bank[i - 1], physpage[i - 1],
ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F)
{
panicf("FTL: write error (2) on vPage %u, bank %u, pPage %u",
(unsigned)(vpage + count - i),
(unsigned)bank[i - 1], (unsigned)physpage[i - 1]);
ftl_vfl_log_trouble(bank[i - 1], block[i - 1]);
}
return 0;
panicf("FTL: write error on vPage %u, bank %u, pPage %u",
(unsigned)vpage, (unsigned)bank, (unsigned)physpage);
ftl_vfl_log_trouble(bank, block);
return 1;
}
#endif
@ -1020,7 +913,7 @@ uint32_t ftl_vfl_open(void)
if (ftl_is_good_block(bbt, j) != 0)
#endif
if (ftl_vfl_read_page(i, j, 0, ftl_buffer,
&ftl_sparebuffer[0]) == 0)
&ftl_sparebuffer) == 0)
{
struct ftl_vfl_cxt_type* cxt;
cxt = (struct ftl_vfl_cxt_type*)ftl_buffer;
@ -1031,11 +924,11 @@ uint32_t ftl_vfl_open(void)
if (vflcxtblock[k] != 0xFFFF)
if (ftl_vfl_read_page(i, vflcxtblock[k], 0,
ftl_buffer,
&ftl_sparebuffer[0]) == 0)
if (ftl_sparebuffer[0].meta.usn > 0
&& ftl_sparebuffer[0].meta.usn <= minusn)
&ftl_sparebuffer) == 0)
if (ftl_sparebuffer.meta.usn > 0
&& ftl_sparebuffer.meta.usn <= minusn)
{
minusn = ftl_sparebuffer[0].meta.usn;
minusn = ftl_sparebuffer.meta.usn;
vflcxtidx = k;
}
if (vflcxtidx == 4) //return 1;
@ -1047,13 +940,13 @@ uint32_t ftl_vfl_open(void)
{
if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
k, ftl_buffer,
&ftl_sparebuffer[0]) != 0)
&ftl_sparebuffer) != 0)
break;
last = k;
}
if (ftl_vfl_read_page(i, vflcxtblock[vflcxtidx],
last, ftl_buffer,
&ftl_sparebuffer[0]) != 0)
&ftl_sparebuffer) != 0)
panicf("FTL: Re-reading VFL CXT block "
"on bank %u failed!?", (unsigned)i);
//return 1;
@ -1088,12 +981,12 @@ uint32_t ftl_open(void)
for (i = 0; i < 3; i++)
{
ret = ftl_vfl_read(ppb * (*cxt).ftlctrlblocks[i],
ftl_buffer, &ftl_sparebuffer[0], 1, 0);
ftl_buffer, &ftl_sparebuffer, 1, 0);
if ((ret &= 0x11F) != 0) continue;
if (ftl_sparebuffer[0].meta.type - 0x43 > 4) continue;
if (ftlcxtblock != 0xffffffff && ftl_sparebuffer[0].meta.usn >= minusn)
if (ftl_sparebuffer.meta.type - 0x43 > 4) continue;
if (ftlcxtblock != 0xffffffff && ftl_sparebuffer.meta.usn >= minusn)
continue;
minusn = ftl_sparebuffer[0].meta.usn;
minusn = ftl_sparebuffer.meta.usn;
ftlcxtblock = (*cxt).ftlctrlblocks[i];
}
@ -1104,9 +997,9 @@ uint32_t ftl_open(void)
for (i = (*ftl_nand_type).pagesperblock * ftl_banks - 1; i > 0; i--)
{
ret = ftl_vfl_read(ppb * ftlcxtblock + i,
ftl_buffer, &ftl_sparebuffer[0], 1, 0);
ftl_buffer, &ftl_sparebuffer, 1, 0);
if ((ret & 0x11F) != 0) continue;
else if (ftl_sparebuffer[0].meta.type == 0x43)
else if (ftl_sparebuffer.meta.type == 0x43)
{
memcpy(&ftl_cxt, ftl_buffer, 0x28C);
ftlcxtfound = 1;
@ -1131,7 +1024,7 @@ uint32_t ftl_open(void)
for (i = 0; i < pagestoread; i++)
{
if ((ftl_vfl_read(ftl_cxt.ftl_map_pages[i],
ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
ftl_buffer, &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
panicf("FTL: Failed to read block map page %u", (unsigned)i);
//return 1;
@ -1149,7 +1042,7 @@ uint32_t ftl_open(void)
for (i = 0; i < pagestoread; i++)
{
if ((ftl_vfl_read(ftl_cxt.ftl_erasectr_pages[i],
ftl_buffer, &ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
ftl_buffer, &ftl_sparebuffer, 1, 1) & 0x11F) != 0)
panicf("FTL: Failed to read erase counter page %u", (unsigned)i);
//return 1;
@ -1193,7 +1086,7 @@ struct ftl_log_type* ftl_get_log_entry(uint32_t block)
/* Exposed function: Read highlevel sectors */
uint32_t ftl_read(uint32_t sector, uint32_t count, void* buffer)
{
uint32_t i, j;
uint32_t i;
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
uint32_t error = 0;
@ -1219,35 +1112,13 @@ uint32_t ftl_read(uint32_t sector, uint32_t count, void* buffer)
+ (*logentry).pageoffsets[page];
#endif
#ifndef FTL_READONLY
if (count >= i + ftl_banks && !(page & (ftl_banks - 1))
&& logentry == (struct ftl_log_type*)0)
#else
if (count >= i + ftl_banks && !(page & (ftl_banks - 1)))
#endif
uint32_t ret = ftl_vfl_read(abspage, &((uint8_t*)buffer)[i << 11],
&ftl_sparebuffer, 1, 1);
if ((ret & 2) != 0) memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
else if ((ret & 0x11D) != 0 || ftl_sparebuffer.user.eccmark != 0xFF)
{
uint32_t ret = ftl_vfl_read_fast(abspage, &((uint8_t*)buffer)[i << 11],
&ftl_sparebuffer[0], 1, 1);
for (j = 0; j < ftl_banks; j++)
if (ret & (2 << (j << 2)))
memset(&((uint8_t*)buffer)[(i + j) << 11], 0, 0x800);
else if ((ret & (0xd << (j << 2))) || ftl_sparebuffer[j].user.eccmark != 0xFF)
{
error = 1;
memset(&((uint8_t*)buffer)[(i + j) << 11], 0, 0x800);
}
i += ftl_banks - 1;
}
else
{
uint32_t ret = ftl_vfl_read(abspage, &((uint8_t*)buffer)[i << 11],
&ftl_sparebuffer[0], 1, 1);
if (ret & 2) memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
else if ((ret & 0x11D) != 0 || ftl_sparebuffer[0].user.eccmark != 0xFF)
{
error = 1;
memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
}
error = 1;
memset(&((uint8_t*)buffer)[i << 11], 0, 0x800);
}
}
@ -1380,20 +1251,20 @@ uint32_t ftl_store_ctrl_block_list(void)
because it is too dirty or needs to be moved. */
uint32_t ftl_save_erasectr_page(uint32_t index)
{
memset(&ftl_sparebuffer[0], 0xFF, 0x40);
ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
ftl_sparebuffer[0].meta.idx = index;
ftl_sparebuffer[0].meta.type = 0x46;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_erasectr[index << 10],
&ftl_sparebuffer[0]) != 0)
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.meta.usn = ftl_cxt.usn;
ftl_sparebuffer.meta.idx = index;
ftl_sparebuffer.meta.type = 0x46;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_erasectr[index << 10],
&ftl_sparebuffer) != 0)
return 1;
if ((ftl_vfl_read(ftl_cxt.ftlctrlpage, ftl_buffer,
&ftl_sparebuffer[0], 1, 1) & 0x11F) != 0)
&ftl_sparebuffer, 1, 1) & 0x11F) != 0)
return 1;
if (memcmp(ftl_buffer, &ftl_erasectr[index << 10], 0x800) != 0) return 1;
if (ftl_sparebuffer[0].meta.type != 0x46) return 1;
if (ftl_sparebuffer[0].meta.idx != index) return 1;
if (ftl_sparebuffer[0].meta.usn != ftl_cxt.usn) return 1;
if (ftl_sparebuffer.meta.type != 0x46) return 1;
if (ftl_sparebuffer.meta.idx != index) return 1;
if (ftl_sparebuffer.meta.usn != ftl_cxt.usn) return 1;
ftl_cxt.ftl_erasectr_pages[index] = ftl_cxt.ftlctrlpage;
ftl_erasectr_dirt[index] = 0;
return 0;
@ -1446,17 +1317,17 @@ uint32_t ftl_copy_page(uint32_t source, uint32_t destination,
uint32_t lpn, uint32_t type)
{
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
uint32_t rc = ftl_vfl_read(source, ftl_copybuffer[0],
&ftl_copyspare[0], 1, 1) & 0x11F;
memset(&ftl_copyspare[0], 0xFF, 0x40);
ftl_copyspare[0].user.lpn = lpn;
ftl_copyspare[0].user.usn = ++ftl_cxt.nextblockusn;
ftl_copyspare[0].user.type = 0x40;
if ((rc & 2) != 0) memset(ftl_copybuffer[0], 0, 0x800);
else if (rc != 0) ftl_copyspare[0].user.eccmark = 0x55;
uint32_t rc = ftl_vfl_read(source, ftl_copybuffer,
&ftl_sparebuffer, 1, 1) & 0x11F;
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.user.lpn = lpn;
ftl_sparebuffer.user.usn = ++ftl_cxt.nextblockusn;
ftl_sparebuffer.user.type = 0x40;
if ((rc & 2) != 0) memset(ftl_copybuffer, 0, 0x800);
else if (rc != 0) ftl_sparebuffer.user.eccmark = 0x55;
if (type == 1 && destination % ppb == ppb - 1)
ftl_copyspare[0].user.type = 0x41;
return ftl_vfl_write(destination, 1, ftl_copybuffer[0], &ftl_copyspare[0]);
ftl_sparebuffer.user.type = 0x41;
return ftl_vfl_write(destination, ftl_copybuffer, &ftl_sparebuffer);
}
#endif
@ -1465,29 +1336,21 @@ uint32_t ftl_copy_page(uint32_t source, uint32_t destination,
/* Copies a pBlock to a vBlock */
uint32_t ftl_copy_block(uint32_t source, uint32_t destination)
{
uint32_t i, j;
uint32_t i;
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
uint32_t error = 0;
ftl_cxt.nextblockusn++;
for (i = 0; i < ppb; i += FTL_COPYBUF_SIZE)
for (i = 0; i < ppb; i++)
{
uint32_t rc = ftl_read(source * ppb + i,
FTL_COPYBUF_SIZE, ftl_copybuffer[0]);
memset(&ftl_copyspare[0], 0xFF, 0x40 * FTL_COPYBUF_SIZE);
for (j = 0; j < FTL_COPYBUF_SIZE; j++)
{
ftl_copyspare[j].user.lpn = source * ppb + i + j;
ftl_copyspare[j].user.usn = ftl_cxt.nextblockusn;
ftl_copyspare[j].user.type = 0x40;
if (rc)
{
if (ftl_read(source * ppb + i + j, 1, ftl_copybuffer[j]))
ftl_copyspare[j].user.eccmark = 0x55;
}
if (i + j == ppb - 1) ftl_copyspare[j].user.type = 0x41;
}
if (ftl_vfl_write(destination * ppb + i, FTL_COPYBUF_SIZE,
ftl_copybuffer[0], &ftl_copyspare[0]))
uint32_t rc = ftl_read(source * ppb + i, 1, ftl_copybuffer);
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.user.lpn = source * ppb + i;
ftl_sparebuffer.user.usn = ftl_cxt.nextblockusn;
ftl_sparebuffer.user.type = 0x40;
if (rc != 0) ftl_sparebuffer.user.eccmark = 0x55;
if (i == ppb - 1) ftl_sparebuffer.user.type = 0x41;
if (ftl_vfl_write(destination * ppb + i,
ftl_copybuffer, &ftl_sparebuffer) != 0)
{
error = 1;
break;
@ -1608,37 +1471,22 @@ uint32_t ftl_commit_scattered(struct ftl_log_type* entry)
If this fails for whichever reason, it will be committed the usual way. */
uint32_t ftl_commit_sequential(struct ftl_log_type* entry)
{
uint32_t i;
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
if ((*entry).issequential != 1
|| (*entry).pagescurrent != (*entry).pagesused)
return 1;
for (; (*entry).pagesused < ppb; )
for (; (*entry).pagesused < ppb; (*entry).pagesused++)
{
uint32_t lpn = (*entry).logicalvblock * ppb + (*entry).pagesused;
uint32_t newpage = (*entry).scatteredvblock * ppb
+ (*entry).pagesused;
uint32_t count = FTL_COPYBUF_SIZE < ppb - (*entry).pagesused
? FTL_COPYBUF_SIZE : ppb - (*entry).pagesused;
for (i = 0; i < count; i++)
if ((*entry).pageoffsets[(*entry).pagesused + i] != 0xFFFF)
return ftl_commit_scattered(entry);
uint32_t rc = ftl_read(lpn, count, ftl_copybuffer[0]);
memset(&ftl_copyspare[0], 0xFF, 0x40 * FTL_COPYBUF_SIZE);
for (i = 0; i < count; i++)
{
ftl_copyspare[i].user.lpn = lpn + i;
ftl_copyspare[i].user.usn = ++ftl_cxt.nextblockusn;
ftl_copyspare[i].user.type = 0x40;
if (rc) ftl_copyspare[i].user.eccmark = 0x55;
if ((*entry).pagesused + i == ppb - 1)
ftl_copyspare[i].user.type = 0x41;
}
if (ftl_vfl_write(newpage, count, ftl_copybuffer[0], &ftl_copyspare[0]))
uint32_t oldpage = ftl_map[(*entry).logicalvblock] * ppb
+ (*entry).pagesused;
if ((*entry).pageoffsets[(*entry).pagesused] != 0xFFFF
|| ftl_copy_page(oldpage, newpage, lpn, 1) != 0)
return ftl_commit_scattered(entry);
(*entry).pagesused += count;
}
ftl_release_pool_block(ftl_map[(*entry).logicalvblock]);
ftl_map[(*entry).logicalvblock] = (*entry).scatteredvblock;
@ -1705,7 +1553,6 @@ struct ftl_log_type* ftl_allocate_log_entry(uint32_t block)
{
uint32_t i;
struct ftl_log_type* entry = ftl_get_log_entry(block);
(*entry).usn = ftl_cxt.nextblockusn - 1;
if (entry != (struct ftl_log_type*)0) return entry;
for (i = 0; i < 0x11; i++)
@ -1749,7 +1596,7 @@ uint32_t ftl_commit_cxt(void)
uint32_t mappages = ((*ftl_nand_type).userblocks + 0x3ff) >> 10;
uint32_t ctrpages = ((*ftl_nand_type).userblocks + 23 + 0x3ff) >> 10;
uint32_t endpage = ftl_cxt.ftlctrlpage + mappages + ctrpages + 1;
if (endpage >= (ftl_cxt.ftlctrlpage / ppb + 1) * ppb)
if (endpage % ppb > ppb - 1)
ftl_cxt.ftlctrlpage |= ppb - 1;
for (i = 0; i < ctrpages; i++)
{
@ -1759,21 +1606,21 @@ uint32_t ftl_commit_cxt(void)
for (i = 0; i < mappages; i++)
{
if (ftl_next_ctrl_pool_page() != 0) return 1;
memset(&ftl_sparebuffer[0], 0xFF, 0x40);
ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
ftl_sparebuffer[0].meta.idx = i;
ftl_sparebuffer[0].meta.type = 0x44;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_map[i << 10],
&ftl_sparebuffer[0]) != 0)
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.meta.usn = ftl_cxt.usn;
ftl_sparebuffer.meta.idx = i;
ftl_sparebuffer.meta.type = 0x44;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_map[i << 10],
&ftl_sparebuffer) != 0)
return 1;
ftl_cxt.ftl_map_pages[i] = ftl_cxt.ftlctrlpage;
}
if (ftl_next_ctrl_pool_page() != 0) return 1;
ftl_cxt.clean_flag = 1;
memset(&ftl_sparebuffer[0], 0xFF, 0x40);
ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
ftl_sparebuffer[0].meta.type = 0x43;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, &ftl_cxt, &ftl_sparebuffer[0]) != 0)
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.meta.usn = ftl_cxt.usn;
ftl_sparebuffer.meta.type = 0x43;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, &ftl_cxt, &ftl_sparebuffer) != 0)
return 1;
return 0;
}
@ -1827,7 +1674,7 @@ uint32_t ftl_swap_blocks(void)
/* Exposed function: Write highlevel sectors */
uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
{
uint32_t i, j, k;
uint32_t i, j;
uint32_t ppb = (*ftl_nand_type).pagesperblock * ftl_banks;
if (sector + count > (*ftl_nand_type).userblocks * ppb)
@ -1847,11 +1694,11 @@ uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
return 1;
}
memset(ftl_buffer, 0xFF, 0x800);
memset(&ftl_sparebuffer[0], 0xFF, 0x40);
ftl_sparebuffer[0].meta.usn = ftl_cxt.usn;
ftl_sparebuffer[0].meta.type = 0x47;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, 1, ftl_buffer,
&ftl_sparebuffer[0]) == 0)
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.meta.usn = ftl_cxt.usn;
ftl_sparebuffer.meta.type = 0x47;
if (ftl_vfl_write(ftl_cxt.ftlctrlpage, ftl_buffer,
&ftl_sparebuffer) == 0)
break;
}
if (i == 3)
@ -1888,27 +1735,16 @@ uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
}
}
ftl_cxt.nextblockusn++;
for (j = 0; j < ppb; j += FTL_WRITESPARE_SIZE)
for (j = 0; j < ppb; j++)
{
memset(&ftl_sparebuffer[0], 0xFF, 0x40 * FTL_WRITESPARE_SIZE);
for (k = 0; k < FTL_WRITESPARE_SIZE; k++)
{
ftl_sparebuffer[k].user.lpn = sector + i + j + k;
ftl_sparebuffer[k].user.usn = ftl_cxt.nextblockusn;
ftl_sparebuffer[k].user.type = 0x40;
if (j == ppb - 1) ftl_sparebuffer[k].user.type = 0x41;
}
uint32_t rc = ftl_vfl_write(vblock * ppb + j, FTL_WRITESPARE_SIZE,
&((uint8_t*)buffer)[(i + j) << 11],
&ftl_sparebuffer[0]);
if (rc)
for (k = 0; k < ftl_banks; k++)
if (rc & (1 << k))
{
while (ftl_vfl_write(vblock * ppb + j + k, 1,
&((uint8_t*)buffer)[(i + j + k) << 11],
&ftl_sparebuffer[k]));
}
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.user.lpn = sector + i + j;
ftl_sparebuffer.user.usn = ftl_cxt.nextblockusn;
ftl_sparebuffer.user.type = 0x40;
if (j == ppb - 1) ftl_sparebuffer.user.type = 0x41;
while (ftl_vfl_write(vblock * ppb + j,
&((uint8_t*)buffer)[(i + j) << 11],
&ftl_sparebuffer) != 0);
}
ftl_release_pool_block(ftl_map[block]);
ftl_map[block] = vblock;
@ -1926,40 +1762,22 @@ uint32_t ftl_write(uint32_t sector, uint32_t count, const void* buffer)
return 1;
}
}
uint32_t cnt = FTL_WRITESPARE_SIZE;
if (cnt > count - i) cnt = count - i;
if (cnt > ppb - (*logentry).pagesused) cnt = ppb - (*logentry).pagesused;
if (cnt > ppb - page) cnt = ppb - page;
memset(&ftl_sparebuffer[0], 0xFF, 0x40 * cnt);
for (j = 0; j < cnt; j++)
{
ftl_sparebuffer[j].user.lpn = sector + i + j;
ftl_sparebuffer[j].user.usn = ++ftl_cxt.nextblockusn;
ftl_sparebuffer[j].user.type = 0x40;
if ((*logentry).pagesused + j == ppb - 1 && (*logentry).issequential)
ftl_sparebuffer[j].user.type = 0x41;
}
memset(&ftl_sparebuffer, 0xFF, 0x40);
ftl_sparebuffer.user.lpn = sector + i;
ftl_sparebuffer.user.usn = ++ftl_cxt.nextblockusn;
ftl_sparebuffer.user.type = 0x40;
uint32_t abspage = (*logentry).scatteredvblock * ppb
+ (*logentry).pagesused;
(*logentry).pagesused += cnt;
if (ftl_vfl_write(abspage, cnt, &((uint8_t*)buffer)[i << 11],
&ftl_sparebuffer[0]) == 0)
+ (*logentry).pagesused++;
if (ftl_vfl_write(abspage, &((uint8_t*)buffer)[i << 11],
&ftl_sparebuffer) == 0)
{
for (j = 0; j < cnt; j++)
{
if ((*logentry).pageoffsets[page + j] == 0xFFFF)
(*logentry).pagescurrent++;
(*logentry).pageoffsets[page + j] = (*logentry).pagesused - cnt + j;
if ((*logentry).pagesused - cnt + j + 1 != (*logentry).pagescurrent
|| (*logentry).pageoffsets[page + j] != page + j)
(*logentry).issequential = 0;
}
i += cnt;
if ((*logentry).pageoffsets[page] == 0xFFFF)
(*logentry).pagescurrent++;
(*logentry).pageoffsets[page] = (*logentry).pagesused - 1;
ftl_check_still_sequential(logentry, page);
i++;
}
else panicf("FTL: Write error: %u %u %u!",
(unsigned)sector, (unsigned)count, (unsigned)i);
}
if ((*logentry).pagesused == ppb) ftl_remove_scattered_block(logentry);
}
if (ftl_cxt.swapcounter >= 300)
{

View file

@ -89,7 +89,6 @@ uint8_t nand_tunk2[4];
uint8_t nand_tunk3[4];
uint32_t nand_type[4];
int nand_powered = 0;
int nand_sequential = 0;
long nand_last_activity_value = -1;
static long nand_stack[32];
@ -100,14 +99,13 @@ static struct wakeup ecc_wakeup;
static uint8_t nand_data[0x800] __attribute__((aligned(16)));
static uint8_t nand_ctrl[0x200] __attribute__((aligned(16)));
static uint8_t nand_spare[4][0x40] __attribute__((aligned(16)));
static uint8_t nand_spare[0x40] __attribute__((aligned(16)));
static uint8_t nand_ecc[0x30] __attribute__((aligned(16)));
uint32_t nand_unlock(uint32_t rc)
{
led(false);
nand_last_activity_value = current_tick;
mutex_unlock(&nand_mtx);
return rc;
}
@ -216,9 +214,10 @@ uint32_t nand_wait_status_ready(uint32_t bank)
return nand_send_cmd(NAND_CMD_READ);
}
void nand_transfer_data_start(uint32_t bank, uint32_t direction,
void* buffer, uint32_t size)
uint32_t nand_transfer_data(uint32_t bank, uint32_t direction,
void* buffer, uint32_t size)
{
long timeout = current_tick + HZ / 50;
nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
FMDNUM = size - 1;
FMCTRL1 = FMCTRL1_DOREADDATA << direction;
@ -232,11 +231,6 @@ void nand_transfer_data_start(uint32_t bank, uint32_t direction,
DMATCNT3 = (size >> 4) - 1;
clean_dcache();
DMACOM3 = 4;
}
uint32_t nand_transfer_data_collect(uint32_t direction)
{
long timeout = current_tick + HZ / 50;
while ((DMAALLST & DMAALLST_DMABUSY3))
if (nand_timeout(timeout)) return 1;
if (!direction) invalidate_dcache();
@ -246,29 +240,17 @@ uint32_t nand_transfer_data_collect(uint32_t direction)
return 0;
}
uint32_t nand_transfer_data(uint32_t bank, uint32_t direction,
void* buffer, uint32_t size)
{
nand_transfer_data_start(bank, direction, buffer, size);
uint32_t rc = nand_transfer_data_collect(direction);
return rc;
}
void ecc_start(uint32_t size, void* databuffer, void* sparebuffer, uint32_t type)
uint32_t ecc_decode(uint32_t size, void* databuffer, void* sparebuffer)
{
mutex_lock(&ecc_mtx);
long timeout = current_tick + HZ / 50;
ECC_INT_CLR = 1;
SRCPND = INTMSK_ECC;
ECC_UNK1 = size;
ECC_DATA_PTR = (uint32_t)databuffer;
ECC_SPARE_PTR = (uint32_t)sparebuffer;
clean_dcache();
ECC_CTRL = type;
}
uint32_t ecc_collect(void)
{
long timeout = current_tick + HZ / 50;
ECC_CTRL = ECCCTRL_STARTDECODING;
while (!(SRCPND & INTMSK_ECC))
if (nand_timeout(timeout)) return ecc_unlock(1);
invalidate_dcache();
@ -277,18 +259,23 @@ uint32_t ecc_collect(void)
return ecc_unlock(ECC_RESULT);
}
uint32_t ecc_decode(uint32_t size, void* databuffer, void* sparebuffer)
{
ecc_start(size, databuffer, sparebuffer, ECCCTRL_STARTDECODING);
uint32_t rc = ecc_collect();
return rc;
}
uint32_t ecc_encode(uint32_t size, void* databuffer, void* sparebuffer)
{
ecc_start(size, databuffer, sparebuffer, ECCCTRL_STARTENCODING);
ecc_collect();
return 0;
mutex_lock(&ecc_mtx);
long timeout = current_tick + HZ / 50;
ECC_INT_CLR = 1;
SRCPND = INTMSK_ECC;
ECC_UNK1 = size;
ECC_DATA_PTR = (uint32_t)databuffer;
ECC_SPARE_PTR = (uint32_t)sparebuffer;
clean_dcache();
ECC_CTRL = ECCCTRL_STARTENCODING;
while (!(SRCPND & INTMSK_ECC))
if (nand_timeout(timeout)) return ecc_unlock(1);
invalidate_dcache();
ECC_INT_CLR = 1;
SRCPND = INTMSK_ECC;
return ecc_unlock(0);
}
uint32_t nand_check_empty(uint8_t* buffer)
@ -384,7 +371,7 @@ uint32_t nand_read_page(uint32_t bank, uint32_t page, void* databuffer,
uint32_t checkempty)
{
uint8_t* data = nand_data;
uint8_t* spare = nand_spare[0];
uint8_t* spare = nand_spare;
if (databuffer && !((uint32_t)databuffer & 0xf))
data = (uint8_t*)databuffer;
if (sparebuffer && !((uint32_t)sparebuffer & 0xf))
@ -441,11 +428,11 @@ uint32_t nand_read_page(uint32_t bank, uint32_t page, void* databuffer,
return nand_unlock(rc);
}
uint32_t nand_write_page_int(uint32_t bank, uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc, uint32_t wait)
uint32_t nand_write_page(uint32_t bank, uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc)
{
uint8_t* data = nand_data;
uint8_t* spare = nand_spare[0];
uint8_t* spare = nand_spare;
if (databuffer && !((uint32_t)databuffer & 0xf))
data = (uint8_t*)databuffer;
if (sparebuffer && !((uint32_t)sparebuffer & 0xf))
@ -459,14 +446,9 @@ uint32_t nand_write_page_int(uint32_t bank, uint32_t page, void* databuffer,
if (spare != sparebuffer) memcpy(spare, sparebuffer, 0x40);
}
else memset(spare, 0xFF, 0x40);
nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
if (nand_send_cmd(NAND_CMD_PROGRAM)) return nand_unlock(1);
if (nand_send_address(page, databuffer ? 0 : 0x800))
return nand_unlock(1);
if (databuffer && data != databuffer) memcpy(data, databuffer, 0x800);
if (databuffer) nand_transfer_data_start(bank, 1, data, 0x800);
if (doecc)
{
if (databuffer && data != databuffer) memcpy(data, databuffer, 0x800);
if (ecc_encode(3, data, nand_ecc)) return nand_unlock(1);
memcpy(&spare[0xC], nand_ecc, 0x28);
memset(nand_ctrl, 0xFF, 0x200);
@ -474,15 +456,18 @@ uint32_t nand_write_page_int(uint32_t bank, uint32_t page, void* databuffer,
if (ecc_encode(0, nand_ctrl, nand_ecc)) return nand_unlock(1);
memcpy(&spare[0x34], nand_ecc, 0xC);
}
nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
if (nand_send_cmd(NAND_CMD_PROGRAM)) return nand_unlock(1);
if (nand_send_address(page, databuffer ? 0 : 0x800))
return nand_unlock(1);
if (databuffer)
if (nand_transfer_data_collect(1))
if (nand_transfer_data(bank, 1, data, 0x800))
return nand_unlock(1);
if (sparebuffer || doecc)
if (nand_transfer_data(bank, 1, spare, 0x40))
return nand_unlock(1);
if (nand_send_cmd(NAND_CMD_PROGCNFRM)) return nand_unlock(1);
if (wait) if (nand_wait_status_ready(bank)) return nand_unlock(1);
return nand_unlock(0);
return nand_unlock(nand_wait_status_ready(bank));
}
uint32_t nand_block_erase(uint32_t bank, uint32_t page)
@ -502,185 +487,6 @@ uint32_t nand_block_erase(uint32_t bank, uint32_t page)
return nand_unlock(0);
}
uint32_t nand_read_page_fast(uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc,
uint32_t checkempty)
{
uint32_t i, rc = 0;
if (((uint32_t)databuffer & 0xf) || ((uint32_t)sparebuffer & 0xf)
|| !databuffer || !sparebuffer || !doecc)
{
for (i = 0; i < 4; i++)
{
if (nand_type[i] == 0xFFFFFFFF) continue;
void* databuf = (void*)0;
void* sparebuf = (void*)0;
if (databuffer) databuf = (void*)((uint32_t)databuffer + 0x800 * i);
if (sparebuffer) sparebuf = (void*)((uint32_t)sparebuffer + 0x40 * i);
uint32_t ret = nand_read_page(i, page, databuf, sparebuf, doecc, checkempty);
if (ret & 1) rc |= 1 << (i << 2);
if (ret & 2) rc |= 2 << (i << 2);
if (ret & 0x10) rc |= 4 << (i << 2);
if (ret & 0x100) rc |= 8 << (i << 2);
}
return rc;
}
mutex_lock(&nand_mtx);
nand_last_activity_value = current_tick;
led(true);
if (!nand_powered) nand_power_up();
uint8_t status[4];
for (i = 0; i < 4; i++) status[i] = (nand_type[i] == 0xFFFFFFFF);
if (!status[0])
{
nand_set_fmctrl0(0, FMCTRL0_ENABLEDMA);
if (nand_send_cmd(NAND_CMD_READ))
status[0] = 1;
}
if (!status[0])
if (nand_send_address(page, 0))
status[0] = 1;
if (!status[0])
if (nand_send_cmd(NAND_CMD_READ2))
status[0] = 1;
if (!status[0])
if (nand_wait_status_ready(0))
status[0] = 1;
if (!status[0])
if (nand_transfer_data(0, 0, databuffer, 0x800))
status[0] = 1;
if (!status[0])
if (nand_transfer_data(0, 0, sparebuffer, 0x40))
status[0] = 1;
for (i = 1; i < 4; i++)
{
if (!status[i])
{
nand_set_fmctrl0(i, FMCTRL0_ENABLEDMA);
if (nand_send_cmd(NAND_CMD_READ))
status[i] = 1;
}
if (!status[i])
if (nand_send_address(page, 0))
status[i] = 1;
if (!status[i])
if (nand_send_cmd(NAND_CMD_READ2))
status[i] = 1;
if (!status[i])
if (nand_wait_status_ready(i))
status[i] = 1;
if (!status[i])
nand_transfer_data_start(i, 0, (void*)((uint32_t)databuffer
+ 0x800 * i), 0x800);
if (!status[i - 1])
{
memcpy(nand_ecc, (void*)((uint32_t)sparebuffer + 0x40 * (i - 1) + 0xC), 0x28);
ecc_start(3, (void*)((uint32_t)databuffer
+ 0x800 * (i - 1)), nand_ecc, ECCCTRL_STARTDECODING);
}
if (!status[i])
if (nand_transfer_data_collect(0))
status[i] = 1;
if (!status[i])
nand_transfer_data_start(i, 0, (void*)((uint32_t)sparebuffer
+ 0x40 * i), 0x40);
if (!status[i - 1])
if (ecc_collect() & 1)
status[i - 1] = 4;
if (!status[i - 1])
{
memset(nand_ctrl, 0xFF, 0x200);
memcpy(nand_ctrl, (void*)((uint32_t)sparebuffer + 0x40 * (i - 1)), 0xC);
memcpy(nand_ecc, (void*)((uint32_t)sparebuffer + 0x40 * (i - 1) + 0x34), 0xC);
ecc_start(0, nand_ctrl, nand_ecc, ECCCTRL_STARTDECODING);
}
if (!status[i])
if (nand_transfer_data_collect(0))
status[i] = 1;
if (!status[i - 1])
{
if (ecc_collect() & 1)
{
status[i - 1] |= 8;
memset((void*)((uint32_t)sparebuffer + 0x40 * (i - 1)), 0xFF, 0xC);
}
else memcpy((void*)((uint32_t)sparebuffer + 0x40 * (i - 1)), nand_ctrl, 0xC);
if (checkempty)
status[i - 1] |= nand_check_empty((void*)((uint32_t)sparebuffer
+ 0x40 * (i - 1))) << 1;
}
}
if (!status[i - 1])
{
memcpy(nand_ecc,(void*)((uint32_t)sparebuffer + 0x40 * (i - 1) + 0xC), 0x28);
if (ecc_decode(3, (void*)((uint32_t)databuffer
+ 0x800 * (i - 1)), nand_ecc) & 1)
status[i - 1] = 4;
}
if (!status[i - 1])
{
memset(nand_ctrl, 0xFF, 0x200);
memcpy(nand_ctrl, (void*)((uint32_t)sparebuffer + 0x40 * (i - 1)), 0xC);
memcpy(nand_ecc, (void*)((uint32_t)sparebuffer + 0x40 * (i - 1) + 0x34), 0xC);
if (ecc_decode(0, nand_ctrl, nand_ecc) & 1)
{
status[i - 1] |= 8;
memset((void*)((uint32_t)sparebuffer + 0x40 * (i - 1)), 0xFF, 0xC);
}
else memcpy((void*)((uint32_t)sparebuffer + 0x40 * (i - 1)), nand_ctrl, 0xC);
if (checkempty)
status[i - 1] |= nand_check_empty((void*)((uint32_t)sparebuffer
+ 0x40 * (i - 1))) << 1;
}
for (i = 0; i < 4; i++)
if (nand_type[i] != 0xFFFFFFFF)
rc |= status[i] << (i << 2);
return nand_unlock(rc);
}
uint32_t nand_write_page(uint32_t bank, uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc)
{
return nand_write_page_int(bank, page, databuffer, sparebuffer, doecc, 1);
}
uint32_t nand_write_page_start(uint32_t bank, uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc)
{
if (((uint32_t)databuffer & 0xf) || ((uint32_t)sparebuffer & 0xf)
|| !databuffer || !sparebuffer || !doecc || nand_sequential)
return nand_write_page_int(bank, page, databuffer, sparebuffer, doecc, nand_sequential);
mutex_lock(&nand_mtx);
nand_last_activity_value = current_tick;
led(true);
if (!nand_powered) nand_power_up();
nand_set_fmctrl0(bank, FMCTRL0_ENABLEDMA);
if (nand_send_cmd(NAND_CMD_PROGRAM))
return nand_unlock(1);
if (nand_send_address(page, 0))
return nand_unlock(1);
nand_transfer_data_start(bank, 1, databuffer, 0x800);
if (ecc_encode(3, databuffer, nand_ecc))
return nand_unlock(1);
memcpy((void*)((uint32_t)sparebuffer + 0xC), nand_ecc, 0x28);
memset(nand_ctrl, 0xFF, 0x200);
memcpy(nand_ctrl, sparebuffer, 0xC);
if (ecc_encode(0, nand_ctrl, nand_ecc))
return nand_unlock(1);
memcpy((void*)((uint32_t)sparebuffer + 0x34), nand_ecc, 0xC);
if (nand_transfer_data_collect(0))
return nand_unlock(1);
if (nand_transfer_data(bank, 1, sparebuffer, 0x40))
return nand_unlock(1);
return nand_unlock(nand_send_cmd(NAND_CMD_PROGCNFRM));
}
uint32_t nand_write_page_collect(uint32_t bank)
{
return nand_wait_status_ready(bank);
}
const struct nand_device_info_type* nand_get_device_type(uint32_t bank)
{
if (nand_type[bank] == 0xFFFFFFFF)
@ -739,7 +545,6 @@ uint32_t nand_device_init(void)
nand_tunk3[i] = nand_deviceinfotable[nand_type[i]].tunk3;
}
if (nand_type[0] == 0xFFFFFFFF) return 1;
nand_sequential = !((nand_type[0] >> 22) & 1);
nand_last_activity_value = current_tick;
create_thread(nand_thread, nand_stack,

View file

@ -46,13 +46,6 @@ uint32_t nand_write_page(uint32_t bank, uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc);
uint32_t nand_block_erase(uint32_t bank, uint32_t page);
uint32_t nand_read_page_fast(uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc,
uint32_t checkempty);
uint32_t nand_write_page_start(uint32_t bank, uint32_t page, void* databuffer,
void* sparebuffer, uint32_t doecc);
uint32_t nand_write_page_collect(uint32_t bank);
const struct nand_device_info_type* nand_get_device_type(uint32_t bank);
uint32_t nand_reset(uint32_t bank);
uint32_t nand_device_init(void);