]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/spectra/flash.c
Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
[net-next-2.6.git] / drivers / staging / spectra / flash.c
CommitLineData
494a43bb
AO
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/fs.h>
21#include <linux/slab.h>
22
23#include "flash.h"
24#include "ffsdefs.h"
25#include "lld.h"
26#include "lld_nand.h"
27#if CMD_DMA
28#include "lld_cdma.h"
29#endif
30
31#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
34
35#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
37
38#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
39
40#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
42
43#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
44
45#if DEBUG_BNDRY
46void debug_boundary_lineno_error(int chnl, int limit, int no,
47 int lineno, char *filename)
48{
49 if (chnl >= limit)
50 printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl, limit, filename, lineno, no);
53}
54/* static int globalmemsize; */
55#endif
56
57static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
58static int FTL_Cache_Read(u64 dwPageAddr);
59static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
60 u16 cache_blk);
61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag);
63static int FTL_Cache_Write(void);
494a43bb
AO
64static void FTL_Calculate_LRU(void);
65static u32 FTL_Get_Block_Index(u32 wBlockNum);
66
67static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
68 u8 BT_Tag, u16 *Page);
69static int FTL_Read_Block_Table(void);
70static int FTL_Write_Block_Table(int wForce);
71static int FTL_Write_Block_Table_Data(void);
72static int FTL_Check_Block_Table(int wOldTable);
73static int FTL_Static_Wear_Leveling(void);
74static u32 FTL_Replace_Block_Table(void);
75static int FTL_Write_IN_Progress_Block_Table_Page(void);
76
77static u32 FTL_Get_Page_Num(u64 length);
78static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
79
80static u32 FTL_Replace_OneBlock(u32 wBlockNum,
81 u32 wReplaceNum);
82static u32 FTL_Replace_LWBlock(u32 wBlockNum,
83 int *pGarbageCollect);
84static u32 FTL_Replace_MWBlock(void);
85static int FTL_Replace_Block(u64 blk_addr);
86static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
87
494a43bb
AO
88struct device_info_tag DeviceInfo;
89struct flash_cache_tag Cache;
90static struct spectra_l2_cache_info cache_l2;
91
92static u8 *cache_l2_page_buf;
93static u8 *cache_l2_blk_buf;
94
95u8 *g_pBlockTable;
96u8 *g_pWearCounter;
97u16 *g_pReadCounter;
98u32 *g_pBTBlocks;
99static u16 g_wBlockTableOffset;
100static u32 g_wBlockTableIndex;
101static u8 g_cBlockTableStatus;
102
103static u8 *g_pTempBuf;
104static u8 *flag_check_blk_table;
105static u8 *tmp_buf_search_bt_in_block;
106static u8 *spare_buf_search_bt_in_block;
107static u8 *spare_buf_bt_search_bt_in_block;
108static u8 *tmp_buf1_read_blk_table;
109static u8 *tmp_buf2_read_blk_table;
110static u8 *flags_static_wear_leveling;
111static u8 *tmp_buf_write_blk_table_data;
112static u8 *tmp_buf_read_disturbance;
113
114u8 *buf_read_page_main_spare;
115u8 *buf_write_page_main_spare;
116u8 *buf_read_page_spare;
117u8 *buf_get_bad_block;
118
119#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
120struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
121struct flash_cache_tag cache_start_copy;
122#endif
123
124int g_wNumFreeBlocks;
125u8 g_SBDCmdIndex;
126
127static u8 *g_pIPF;
128static u8 bt_flag = FIRST_BT_ID;
129static u8 bt_block_changed;
130
131static u16 cache_block_to_write;
132static u8 last_erased = FIRST_BT_ID;
133
134static u8 GC_Called;
135static u8 BT_GC_Called;
136
137#if CMD_DMA
138#define COPY_BACK_BUF_NUM 10
139
140static u8 ftl_cmd_cnt; /* Init value is 0 */
141u8 *g_pBTDelta;
142u8 *g_pBTDelta_Free;
143u8 *g_pBTStartingCopy;
144u8 *g_pWearCounterCopy;
145u16 *g_pReadCounterCopy;
146u8 *g_pBlockTableCopies;
147u8 *g_pNextBlockTable;
148static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
149static int cp_back_buf_idx;
150
151static u8 *g_temp_buf;
152
153#pragma pack(push, 1)
154#pragma pack(1)
155struct BTableChangesDelta {
156 u8 ftl_cmd_cnt;
157 u8 ValidFields;
158 u16 g_wBlockTableOffset;
159 u32 g_wBlockTableIndex;
160 u32 BT_Index;
161 u32 BT_Entry_Value;
162 u32 WC_Index;
163 u8 WC_Entry_Value;
164 u32 RC_Index;
165 u16 RC_Entry_Value;
166};
167
168#pragma pack(pop)
169
170struct BTableChangesDelta *p_BTableChangesDelta;
171#endif
172
173
174#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
175#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
176
177#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
178 sizeof(u32))
179#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
180 sizeof(u8))
181#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
182 sizeof(u16))
183#if SUPPORT_LARGE_BLOCKNUM
184#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
185 sizeof(u8) * 3)
186#else
187#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
188 sizeof(u16))
189#endif
190#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
191 FTL_Get_WearCounter_Table_Mem_Size_Bytes
192#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
193 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
194
195static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
196{
197 u32 byte_num;
198
199 if (DeviceInfo.MLCDevice) {
200 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
201 DeviceInfo.wDataBlockNum * sizeof(u8) +
202 DeviceInfo.wDataBlockNum * sizeof(u16);
203 } else {
204 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
205 DeviceInfo.wDataBlockNum * sizeof(u8);
206 }
207
208 byte_num += 4 * sizeof(u8);
209
210 return byte_num;
211}
212
213static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
214{
215 return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
216}
217
218static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
219 u32 sizeTxed)
220{
221 u32 wBytesCopied, blk_tbl_size, wBytes;
222 u32 *pbt = (u32 *)g_pBlockTable;
223
224 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
225 for (wBytes = 0;
226 (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
227 wBytes++) {
228#if SUPPORT_LARGE_BLOCKNUM
229 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
230 >> (((wBytes + sizeTxed) % 3) ?
231 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
232#else
233 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
234 >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
235#endif
236 }
237
238 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
239 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
240 wBytesCopied = wBytes;
241 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
242 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
243 memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
244
245 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
246
247 if (DeviceInfo.MLCDevice) {
248 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
249 wBytesCopied += wBytes;
250 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
251 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
252 flashBuf[wBytes + wBytesCopied] =
253 (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
254 (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
255 }
256
257 return wBytesCopied + wBytes;
258}
259
260static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
261 u32 sizeToTx, u32 sizeTxed)
262{
263 u32 wBytesCopied, blk_tbl_size, wBytes;
264 u32 *pbt = (u32 *)g_pBlockTable;
265
266 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
267 for (wBytes = 0; (wBytes < sizeToTx) &&
268 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
269#if SUPPORT_LARGE_BLOCKNUM
270 if (!((wBytes + sizeTxed) % 3))
271 pbt[(wBytes + sizeTxed) / 3] = 0;
272 pbt[(wBytes + sizeTxed) / 3] |=
273 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
274 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
275#else
276 if (!((wBytes + sizeTxed) % 2))
277 pbt[(wBytes + sizeTxed) / 2] = 0;
278 pbt[(wBytes + sizeTxed) / 2] |=
279 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
280 0 : 8));
281#endif
282 }
283
284 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
285 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
286 wBytesCopied = wBytes;
287 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
288 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
289 memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
290 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
291
292 if (DeviceInfo.MLCDevice) {
293 wBytesCopied += wBytes;
294 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
295 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
296 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
297 if (((wBytes + sizeTxed) % 2))
298 g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
299 g_pReadCounter[(wBytes + sizeTxed) / 2] |=
300 (flashBuf[wBytes] <<
301 (((wBytes + sizeTxed) % 2) ? 0 : 8));
302 }
303 }
304
305 return wBytesCopied+wBytes;
306}
307
308static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
309{
310 int i;
311
312 for (i = 0; i < BTSIG_BYTES; i++)
313 buf[BTSIG_OFFSET + i] =
314 ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
315 (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
316
317 return PASS;
318}
319
320static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
321{
322 static u8 tag[BTSIG_BYTES >> 1];
323 int i, j, k, tagi, tagtemp, status;
324
325 *tagarray = (u8 *)tag;
326 tagi = 0;
327
328 for (i = 0; i < (BTSIG_BYTES - 1); i++) {
329 for (j = i + 1; (j < BTSIG_BYTES) &&
330 (tagi < (BTSIG_BYTES >> 1)); j++) {
331 tagtemp = buf[BTSIG_OFFSET + j] -
332 buf[BTSIG_OFFSET + i];
333 if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
334 tagtemp = (buf[BTSIG_OFFSET + i] +
335 (1 + LAST_BT_ID - FIRST_BT_ID) -
336 (i * BTSIG_DELTA)) %
337 (1 + LAST_BT_ID - FIRST_BT_ID);
338 status = FAIL;
339 for (k = 0; k < tagi; k++) {
340 if (tagtemp == tag[k])
341 status = PASS;
342 }
343
344 if (status == FAIL) {
345 tag[tagi++] = tagtemp;
346 i = (j == (i + 1)) ? i + 1 : i;
347 j = (j == (i + 1)) ? i + 1 : i;
348 }
349 }
350 }
351 }
352
353 return tagi;
354}
355
356
357static int FTL_Execute_SPL_Recovery(void)
358{
359 u32 j, block, blks;
360 u32 *pbt = (u32 *)g_pBlockTable;
361 int ret;
362
363 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
364 __FILE__, __LINE__, __func__);
365
366 blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
367 for (j = 0; j <= blks; j++) {
368 block = (pbt[j]);
369 if (((block & BAD_BLOCK) != BAD_BLOCK) &&
370 ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
371 ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
372 if (FAIL == ret) {
373 nand_dbg_print(NAND_DBG_WARN,
374 "NAND Program fail in %s, Line %d, "
375 "Function: %s, new Bad Block %d "
376 "generated!\n",
377 __FILE__, __LINE__, __func__,
378 (int)(block & ~BAD_BLOCK));
379 MARK_BLOCK_AS_BAD(pbt[j]);
380 }
381 }
382 }
383
384 return PASS;
385}
386
387/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
388* Function: GLOB_FTL_IdentifyDevice
389* Inputs: pointer to identify data structure
390* Outputs: PASS / FAIL
391* Description: the identify data structure is filled in with
392* information for the block driver.
393*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
394int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
395{
396 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
397 __FILE__, __LINE__, __func__);
398
399 dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
400 dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
401 dev_data->PageDataSize = DeviceInfo.wPageDataSize;
402 dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
403 dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
404
405 return PASS;
406}
407
408/* ..... */
409static int allocate_memory(void)
410{
411 u32 block_table_size, page_size, block_size, mem_size;
412 u32 total_bytes = 0;
413 int i;
414#if CMD_DMA
415 int j;
416#endif
417
418 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
419 __FILE__, __LINE__, __func__);
420
421 page_size = DeviceInfo.wPageSize;
422 block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
423
424 block_table_size = DeviceInfo.wDataBlockNum *
425 (sizeof(u32) + sizeof(u8) + sizeof(u16));
426 block_table_size += (DeviceInfo.wPageDataSize -
427 (block_table_size % DeviceInfo.wPageDataSize)) %
428 DeviceInfo.wPageDataSize;
429
430 /* Malloc memory for block tables */
431 g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
432 if (!g_pBlockTable)
433 goto block_table_fail;
434 memset(g_pBlockTable, 0, block_table_size);
435 total_bytes += block_table_size;
436
437 g_pWearCounter = (u8 *)(g_pBlockTable +
438 DeviceInfo.wDataBlockNum * sizeof(u32));
439
440 if (DeviceInfo.MLCDevice)
441 g_pReadCounter = (u16 *)(g_pBlockTable +
442 DeviceInfo.wDataBlockNum *
443 (sizeof(u32) + sizeof(u8)));
444
445 /* Malloc memory and init for cache items */
446 for (i = 0; i < CACHE_ITEM_NUM; i++) {
447 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
448 Cache.array[i].use_cnt = 0;
449 Cache.array[i].changed = CLEAR;
450 Cache.array[i].buf = kmalloc(Cache.cache_item_size,
451 GFP_ATOMIC);
452 if (!Cache.array[i].buf)
453 goto cache_item_fail;
454 memset(Cache.array[i].buf, 0, Cache.cache_item_size);
455 total_bytes += Cache.cache_item_size;
456 }
457
458 /* Malloc memory for IPF */
459 g_pIPF = kmalloc(page_size, GFP_ATOMIC);
460 if (!g_pIPF)
461 goto ipf_fail;
462 memset(g_pIPF, 0, page_size);
463 total_bytes += page_size;
464
465 /* Malloc memory for data merging during Level2 Cache flush */
466 cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
467 if (!cache_l2_page_buf)
468 goto cache_l2_page_buf_fail;
469 memset(cache_l2_page_buf, 0xff, page_size);
470 total_bytes += page_size;
471
472 cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
473 if (!cache_l2_blk_buf)
474 goto cache_l2_blk_buf_fail;
475 memset(cache_l2_blk_buf, 0xff, block_size);
476 total_bytes += block_size;
477
478 /* Malloc memory for temp buffer */
479 g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
480 if (!g_pTempBuf)
481 goto Temp_buf_fail;
482 memset(g_pTempBuf, 0, Cache.cache_item_size);
483 total_bytes += Cache.cache_item_size;
484
485 /* Malloc memory for block table blocks */
486 mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
487 g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
488 if (!g_pBTBlocks)
489 goto bt_blocks_fail;
490 memset(g_pBTBlocks, 0xff, mem_size);
491 total_bytes += mem_size;
492
493 /* Malloc memory for function FTL_Check_Block_Table */
494 flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
495 if (!flag_check_blk_table)
496 goto flag_check_blk_table_fail;
497 total_bytes += DeviceInfo.wDataBlockNum;
498
499 /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
500 tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
501 if (!tmp_buf_search_bt_in_block)
502 goto tmp_buf_search_bt_in_block_fail;
503 memset(tmp_buf_search_bt_in_block, 0xff, page_size);
504 total_bytes += page_size;
505
506 mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
507 spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
508 if (!spare_buf_search_bt_in_block)
509 goto spare_buf_search_bt_in_block_fail;
510 memset(spare_buf_search_bt_in_block, 0xff, mem_size);
511 total_bytes += mem_size;
512
513 spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
514 if (!spare_buf_bt_search_bt_in_block)
515 goto spare_buf_bt_search_bt_in_block_fail;
516 memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
517 total_bytes += mem_size;
518
519 /* Malloc memory for function FTL_Read_Block_Table */
520 tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
521 if (!tmp_buf1_read_blk_table)
522 goto tmp_buf1_read_blk_table_fail;
523 memset(tmp_buf1_read_blk_table, 0xff, page_size);
524 total_bytes += page_size;
525
526 tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
527 if (!tmp_buf2_read_blk_table)
528 goto tmp_buf2_read_blk_table_fail;
529 memset(tmp_buf2_read_blk_table, 0xff, page_size);
530 total_bytes += page_size;
531
532 /* Malloc memory for function FTL_Static_Wear_Leveling */
533 flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
534 GFP_ATOMIC);
535 if (!flags_static_wear_leveling)
536 goto flags_static_wear_leveling_fail;
537 total_bytes += DeviceInfo.wDataBlockNum;
538
539 /* Malloc memory for function FTL_Write_Block_Table_Data */
540 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
541 mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
542 2 * DeviceInfo.wPageSize;
543 else
544 mem_size = DeviceInfo.wPageSize;
545 tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
546 if (!tmp_buf_write_blk_table_data)
547 goto tmp_buf_write_blk_table_data_fail;
548 memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
549 total_bytes += mem_size;
550
551 /* Malloc memory for function FTL_Read_Disturbance */
552 tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
553 if (!tmp_buf_read_disturbance)
554 goto tmp_buf_read_disturbance_fail;
555 memset(tmp_buf_read_disturbance, 0xff, block_size);
556 total_bytes += block_size;
557
558 /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
559 buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
560 if (!buf_read_page_main_spare)
561 goto buf_read_page_main_spare_fail;
562 total_bytes += DeviceInfo.wPageSize;
563
564 /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
565 buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
566 if (!buf_write_page_main_spare)
567 goto buf_write_page_main_spare_fail;
568 total_bytes += DeviceInfo.wPageSize;
569
570 /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
571 buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
572 if (!buf_read_page_spare)
573 goto buf_read_page_spare_fail;
574 memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
575 total_bytes += DeviceInfo.wPageSpareSize;
576
577 /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
578 buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
579 if (!buf_get_bad_block)
580 goto buf_get_bad_block_fail;
581 memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
582 total_bytes += DeviceInfo.wPageSpareSize;
583
584#if CMD_DMA
585 g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
586 if (!g_temp_buf)
587 goto temp_buf_fail;
588 memset(g_temp_buf, 0xff, block_size);
589 total_bytes += block_size;
590
591 /* Malloc memory for copy of block table used in CDMA mode */
592 g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
593 if (!g_pBTStartingCopy)
594 goto bt_starting_copy;
595 memset(g_pBTStartingCopy, 0, block_table_size);
596 total_bytes += block_table_size;
597
598 g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
599 DeviceInfo.wDataBlockNum * sizeof(u32));
600
601 if (DeviceInfo.MLCDevice)
602 g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
603 DeviceInfo.wDataBlockNum *
604 (sizeof(u32) + sizeof(u8)));
605
606 /* Malloc memory for block table copies */
607 mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
608 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
609 if (DeviceInfo.MLCDevice)
610 mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
611 g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
612 if (!g_pBlockTableCopies)
613 goto blk_table_copies_fail;
614 memset(g_pBlockTableCopies, 0, mem_size);
615 total_bytes += mem_size;
616 g_pNextBlockTable = g_pBlockTableCopies;
617
618 /* Malloc memory for Block Table Delta */
619 mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
620 g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
621 if (!g_pBTDelta)
622 goto bt_delta_fail;
623 memset(g_pBTDelta, 0, mem_size);
624 total_bytes += mem_size;
625 g_pBTDelta_Free = g_pBTDelta;
626
627 /* Malloc memory for Copy Back Buffers */
628 for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
629 cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
630 if (!cp_back_buf_copies[j])
631 goto cp_back_buf_copies_fail;
632 memset(cp_back_buf_copies[j], 0, block_size);
633 total_bytes += block_size;
634 }
635 cp_back_buf_idx = 0;
636
637 /* Malloc memory for pending commands list */
638 mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
639 info.pcmds = kzalloc(mem_size, GFP_KERNEL);
640 if (!info.pcmds)
641 goto pending_cmds_buf_fail;
642 total_bytes += mem_size;
643
644 /* Malloc memory for CDMA descripter table */
645 mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
646 info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
647 if (!info.cdma_desc_buf)
648 goto cdma_desc_buf_fail;
649 total_bytes += mem_size;
650
651 /* Malloc memory for Memcpy descripter table */
652 mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
653 info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
654 if (!info.memcp_desc_buf)
655 goto memcp_desc_buf_fail;
656 total_bytes += mem_size;
657#endif
658
659 nand_dbg_print(NAND_DBG_WARN,
660 "Total memory allocated in FTL layer: %d\n", total_bytes);
661
662 return PASS;
663
664#if CMD_DMA
665memcp_desc_buf_fail:
666 kfree(info.cdma_desc_buf);
667cdma_desc_buf_fail:
668 kfree(info.pcmds);
669pending_cmds_buf_fail:
670cp_back_buf_copies_fail:
671 j--;
672 for (; j >= 0; j--)
673 kfree(cp_back_buf_copies[j]);
674 kfree(g_pBTDelta);
675bt_delta_fail:
676 kfree(g_pBlockTableCopies);
677blk_table_copies_fail:
678 kfree(g_pBTStartingCopy);
679bt_starting_copy:
680 kfree(g_temp_buf);
681temp_buf_fail:
682 kfree(buf_get_bad_block);
683#endif
684
685buf_get_bad_block_fail:
686 kfree(buf_read_page_spare);
687buf_read_page_spare_fail:
688 kfree(buf_write_page_main_spare);
689buf_write_page_main_spare_fail:
690 kfree(buf_read_page_main_spare);
691buf_read_page_main_spare_fail:
692 kfree(tmp_buf_read_disturbance);
693tmp_buf_read_disturbance_fail:
694 kfree(tmp_buf_write_blk_table_data);
695tmp_buf_write_blk_table_data_fail:
696 kfree(flags_static_wear_leveling);
697flags_static_wear_leveling_fail:
698 kfree(tmp_buf2_read_blk_table);
699tmp_buf2_read_blk_table_fail:
700 kfree(tmp_buf1_read_blk_table);
701tmp_buf1_read_blk_table_fail:
702 kfree(spare_buf_bt_search_bt_in_block);
703spare_buf_bt_search_bt_in_block_fail:
704 kfree(spare_buf_search_bt_in_block);
705spare_buf_search_bt_in_block_fail:
706 kfree(tmp_buf_search_bt_in_block);
707tmp_buf_search_bt_in_block_fail:
708 kfree(flag_check_blk_table);
709flag_check_blk_table_fail:
710 kfree(g_pBTBlocks);
711bt_blocks_fail:
712 kfree(g_pTempBuf);
713Temp_buf_fail:
714 kfree(cache_l2_blk_buf);
715cache_l2_blk_buf_fail:
716 kfree(cache_l2_page_buf);
717cache_l2_page_buf_fail:
718 kfree(g_pIPF);
719ipf_fail:
720cache_item_fail:
721 i--;
722 for (; i >= 0; i--)
723 kfree(Cache.array[i].buf);
724 kfree(g_pBlockTable);
725block_table_fail:
726 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
727 __FILE__, __LINE__);
728
729 return -ENOMEM;
730}
731
732/* .... */
733static int free_memory(void)
734{
735 int i;
736
737#if CMD_DMA
738 kfree(info.memcp_desc_buf);
739 kfree(info.cdma_desc_buf);
740 kfree(info.pcmds);
741 for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
742 kfree(cp_back_buf_copies[i]);
743 kfree(g_pBTDelta);
744 kfree(g_pBlockTableCopies);
745 kfree(g_pBTStartingCopy);
746 kfree(g_temp_buf);
747 kfree(buf_get_bad_block);
748#endif
749 kfree(buf_read_page_spare);
750 kfree(buf_write_page_main_spare);
751 kfree(buf_read_page_main_spare);
752 kfree(tmp_buf_read_disturbance);
753 kfree(tmp_buf_write_blk_table_data);
754 kfree(flags_static_wear_leveling);
755 kfree(tmp_buf2_read_blk_table);
756 kfree(tmp_buf1_read_blk_table);
757 kfree(spare_buf_bt_search_bt_in_block);
758 kfree(spare_buf_search_bt_in_block);
759 kfree(tmp_buf_search_bt_in_block);
760 kfree(flag_check_blk_table);
761 kfree(g_pBTBlocks);
762 kfree(g_pTempBuf);
763 kfree(g_pIPF);
764 for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
765 kfree(Cache.array[i].buf);
766 kfree(g_pBlockTable);
767
768 return 0;
769}
770
771static void dump_cache_l2_table(void)
772{
773 struct list_head *p;
774 struct spectra_l2_cache_list *pnd;
676cecaa 775 int n;
494a43bb
AO
776
777 n = 0;
778 list_for_each(p, &cache_l2.table.list) {
779 pnd = list_entry(p, struct spectra_l2_cache_list, list);
780 nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
781/*
782 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
783 if (pnd->pages_array[i] != MAX_U32_VALUE)
784 nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
785 }
786*/
787 n++;
788 }
789}
790
791/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
792* Function: GLOB_FTL_Init
793* Inputs: none
794* Outputs: PASS=0 / FAIL=1
795* Description: allocates the memory for cache array,
796* important data structures
797* clears the cache array
798* reads the block table from flash into array
799*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
800int GLOB_FTL_Init(void)
801{
802 int i;
803
804 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
805 __FILE__, __LINE__, __func__);
806
807 Cache.pages_per_item = 1;
808 Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
809
810 if (allocate_memory() != PASS)
811 return FAIL;
812
813#if CMD_DMA
814#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
815 memcpy((void *)&cache_start_copy, (void *)&Cache,
816 sizeof(struct flash_cache_tag));
817 memset((void *)&int_cache, -1,
818 sizeof(struct flash_cache_delta_list_tag) *
819 (MAX_CHANS + MAX_DESCS));
820#endif
821 ftl_cmd_cnt = 0;
822#endif
823
824 if (FTL_Read_Block_Table() != PASS)
825 return FAIL;
826
827 /* Init the Level2 Cache data structure */
828 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
829 cache_l2.blk_array[i] = MAX_U32_VALUE;
830 cache_l2.cur_blk_idx = 0;
831 cache_l2.cur_page_num = 0;
832 INIT_LIST_HEAD(&cache_l2.table.list);
833 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
834
835 dump_cache_l2_table();
836
837 return 0;
838}
839
840
841#if CMD_DMA
842#if 0
843static void save_blk_table_changes(u16 idx)
844{
845 u8 ftl_cmd;
846 u32 *pbt = (u32 *)g_pBTStartingCopy;
847
848#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
849 u16 id;
850 u8 cache_blks;
851
852 id = idx - MAX_CHANS;
853 if (int_cache[id].item != -1) {
854 cache_blks = int_cache[id].item;
855 cache_start_copy.array[cache_blks].address =
856 int_cache[id].cache.address;
857 cache_start_copy.array[cache_blks].changed =
858 int_cache[id].cache.changed;
859 }
860#endif
861
862 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
863
864 while (ftl_cmd <= PendingCMD[idx].Tag) {
865 if (p_BTableChangesDelta->ValidFields == 0x01) {
866 g_wBlockTableOffset =
867 p_BTableChangesDelta->g_wBlockTableOffset;
868 } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
869 pbt[p_BTableChangesDelta->BT_Index] =
870 p_BTableChangesDelta->BT_Entry_Value;
871 debug_boundary_error(((
872 p_BTableChangesDelta->BT_Index)),
873 DeviceInfo.wDataBlockNum, 0);
874 } else if (p_BTableChangesDelta->ValidFields == 0x03) {
875 g_wBlockTableOffset =
876 p_BTableChangesDelta->g_wBlockTableOffset;
877 g_wBlockTableIndex =
878 p_BTableChangesDelta->g_wBlockTableIndex;
879 } else if (p_BTableChangesDelta->ValidFields == 0x30) {
880 g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
881 p_BTableChangesDelta->WC_Entry_Value;
882 } else if ((DeviceInfo.MLCDevice) &&
883 (p_BTableChangesDelta->ValidFields == 0xC0)) {
884 g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
885 p_BTableChangesDelta->RC_Entry_Value;
886 nand_dbg_print(NAND_DBG_DEBUG,
887 "In event status setting read counter "
888 "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
889 ftl_cmd,
890 p_BTableChangesDelta->RC_Entry_Value,
891 (unsigned int)p_BTableChangesDelta->RC_Index);
892 } else {
893 nand_dbg_print(NAND_DBG_DEBUG,
894 "This should never occur \n");
895 }
896 p_BTableChangesDelta += 1;
897 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
898 }
899}
900
901static void discard_cmds(u16 n)
902{
903 u32 *pbt = (u32 *)g_pBTStartingCopy;
904 u8 ftl_cmd;
905 unsigned long k;
906#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
907 u8 cache_blks;
908 u16 id;
909#endif
910
911 if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
912 (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
913 for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
914 if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
915 MARK_BLK_AS_DISCARD(pbt[k]);
916 }
917 }
918
919 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
920 while (ftl_cmd <= PendingCMD[n].Tag) {
921 p_BTableChangesDelta += 1;
922 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
923 }
924
925#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
926 id = n - MAX_CHANS;
927
928 if (int_cache[id].item != -1) {
929 cache_blks = int_cache[id].item;
930 if (PendingCMD[n].CMD == MEMCOPY_CMD) {
931 if ((cache_start_copy.array[cache_blks].buf <=
932 PendingCMD[n].DataDestAddr) &&
933 ((cache_start_copy.array[cache_blks].buf +
934 Cache.cache_item_size) >
935 PendingCMD[n].DataDestAddr)) {
936 cache_start_copy.array[cache_blks].address =
937 NAND_CACHE_INIT_ADDR;
938 cache_start_copy.array[cache_blks].use_cnt =
939 0;
940 cache_start_copy.array[cache_blks].changed =
941 CLEAR;
942 }
943 } else {
944 cache_start_copy.array[cache_blks].address =
945 int_cache[id].cache.address;
946 cache_start_copy.array[cache_blks].changed =
947 int_cache[id].cache.changed;
948 }
949 }
950#endif
951}
952
953static void process_cmd_pass(int *first_failed_cmd, u16 idx)
954{
955 if (0 == *first_failed_cmd)
956 save_blk_table_changes(idx);
957 else
958 discard_cmds(idx);
959}
960
961static void process_cmd_fail_abort(int *first_failed_cmd,
962 u16 idx, int event)
963{
964 u32 *pbt = (u32 *)g_pBTStartingCopy;
965 u8 ftl_cmd;
966 unsigned long i;
967 int erase_fail, program_fail;
968#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
969 u8 cache_blks;
970 u16 id;
971#endif
972
973 if (0 == *first_failed_cmd)
974 *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
975
976 nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
977 "while executing %u Command %u accesing Block %u\n",
978 (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
979 PendingCMD[idx].CMD,
980 (unsigned int)PendingCMD[idx].Block);
981
982 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
983 while (ftl_cmd <= PendingCMD[idx].Tag) {
984 p_BTableChangesDelta += 1;
985 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
986 }
987
988#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
989 id = idx - MAX_CHANS;
990
991 if (int_cache[id].item != -1) {
992 cache_blks = int_cache[id].item;
993 if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
994 cache_start_copy.array[cache_blks].address =
995 int_cache[id].cache.address;
996 cache_start_copy.array[cache_blks].changed = SET;
997 } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
998 cache_start_copy.array[cache_blks].address =
999 NAND_CACHE_INIT_ADDR;
1000 cache_start_copy.array[cache_blks].use_cnt = 0;
1001 cache_start_copy.array[cache_blks].changed =
1002 CLEAR;
1003 } else if (PendingCMD[idx].CMD == ERASE_CMD) {
1004 /* ? */
1005 } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
1006 /* ? */
1007 }
1008 }
1009#endif
1010
1011 erase_fail = (event == EVENT_ERASE_FAILURE) &&
1012 (PendingCMD[idx].CMD == ERASE_CMD);
1013
1014 program_fail = (event == EVENT_PROGRAM_FAILURE) &&
1015 ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
1016 (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
1017
1018 if (erase_fail || program_fail) {
1019 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1020 if (PendingCMD[idx].Block ==
1021 (pbt[i] & (~BAD_BLOCK)))
1022 MARK_BLOCK_AS_BAD(pbt[i]);
1023 }
1024 }
1025}
1026
1027static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1028{
1029 u8 ftl_cmd;
1030 int cmd_match = 0;
1031
1032 if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
1033 cmd_match = 1;
1034
1035 if (PendingCMD[idx].Status == CMD_PASS) {
1036 process_cmd_pass(first_failed_cmd, idx);
1037 } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
1038 (PendingCMD[idx].Status == CMD_ABORT)) {
1039 process_cmd_fail_abort(first_failed_cmd, idx, event);
1040 } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
1041 PendingCMD[idx].Tag) {
1042 nand_dbg_print(NAND_DBG_DEBUG,
1043 " Command no. %hu is not executed\n",
1044 (unsigned int)PendingCMD[idx].Tag);
1045 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1046 while (ftl_cmd <= PendingCMD[idx].Tag) {
1047 p_BTableChangesDelta += 1;
1048 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1049 }
1050 }
1051}
1052#endif
1053
1054static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1055{
1056 printk(KERN_ERR "temporary workaround function. "
1057 "Should not be called! \n");
1058}
1059
1060/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1061* Function: GLOB_FTL_Event_Status
1062* Inputs: none
1063* Outputs: Event Code
1064* Description: It is called by SBD after hardware interrupt signalling
1065* completion of commands chain
1066* It does following things
1067* get event status from LLD
1068* analyze command chain status
1069* determine last command executed
1070* analyze results
1071* rebuild the block table in case of uncorrectable error
1072* return event code
1073*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1074int GLOB_FTL_Event_Status(int *first_failed_cmd)
1075{
1076 int event_code = PASS;
1077 u16 i_P;
1078
1079 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1080 __FILE__, __LINE__, __func__);
1081
1082 *first_failed_cmd = 0;
1083
1084 event_code = GLOB_LLD_Event_Status();
1085
1086 switch (event_code) {
1087 case EVENT_PASS:
1088 nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
1089 break;
1090 case EVENT_UNCORRECTABLE_DATA_ERROR:
1091 nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
1092 break;
1093 case EVENT_PROGRAM_FAILURE:
1094 case EVENT_ERASE_FAILURE:
1095 nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
1096 "Event code: 0x%x\n", event_code);
1097 p_BTableChangesDelta =
1098 (struct BTableChangesDelta *)g_pBTDelta;
1099 for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
1100 i_P++)
1101 process_cmd(first_failed_cmd, i_P, event_code);
1102 memcpy(g_pBlockTable, g_pBTStartingCopy,
1103 DeviceInfo.wDataBlockNum * sizeof(u32));
1104 memcpy(g_pWearCounter, g_pWearCounterCopy,
1105 DeviceInfo.wDataBlockNum * sizeof(u8));
1106 if (DeviceInfo.MLCDevice)
1107 memcpy(g_pReadCounter, g_pReadCounterCopy,
1108 DeviceInfo.wDataBlockNum * sizeof(u16));
1109
1110#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1111 memcpy((void *)&Cache, (void *)&cache_start_copy,
1112 sizeof(struct flash_cache_tag));
1113 memset((void *)&int_cache, -1,
1114 sizeof(struct flash_cache_delta_list_tag) *
1115 (MAX_DESCS + MAX_CHANS));
1116#endif
1117 break;
1118 default:
1119 nand_dbg_print(NAND_DBG_WARN,
1120 "Handling unexpected event code - 0x%x\n",
1121 event_code);
1122 event_code = ERR;
1123 break;
1124 }
1125
1126 memcpy(g_pBTStartingCopy, g_pBlockTable,
1127 DeviceInfo.wDataBlockNum * sizeof(u32));
1128 memcpy(g_pWearCounterCopy, g_pWearCounter,
1129 DeviceInfo.wDataBlockNum * sizeof(u8));
1130 if (DeviceInfo.MLCDevice)
1131 memcpy(g_pReadCounterCopy, g_pReadCounter,
1132 DeviceInfo.wDataBlockNum * sizeof(u16));
1133
1134 g_pBTDelta_Free = g_pBTDelta;
1135 ftl_cmd_cnt = 0;
1136 g_pNextBlockTable = g_pBlockTableCopies;
1137 cp_back_buf_idx = 0;
1138
1139#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1140 memcpy((void *)&cache_start_copy, (void *)&Cache,
1141 sizeof(struct flash_cache_tag));
1142 memset((void *)&int_cache, -1,
1143 sizeof(struct flash_cache_delta_list_tag) *
1144 (MAX_DESCS + MAX_CHANS));
1145#endif
1146
1147 return event_code;
1148}
1149
1150/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1151* Function: glob_ftl_execute_cmds
1152* Inputs: none
1153* Outputs: none
1154* Description: pass thru to LLD
1155***************************************************************/
1156u16 glob_ftl_execute_cmds(void)
1157{
1158 nand_dbg_print(NAND_DBG_TRACE,
1159 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1160 (unsigned int)ftl_cmd_cnt);
1161 g_SBDCmdIndex = 0;
1162 return glob_lld_execute_cmds();
1163}
1164
1165#endif
1166
1167#if !CMD_DMA
1168/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1169* Function: GLOB_FTL_Read Immediate
1170* Inputs: pointer to data
1171* address of data
1172* Outputs: PASS / FAIL
1173* Description: Reads one page of data into RAM directly from flash without
1174* using or disturbing cache.It is assumed this function is called
1175* with CMD-DMA disabled.
1176*****************************************************************/
1177int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
1178{
1179 int wResult = FAIL;
1180 u32 Block;
1181 u16 Page;
1182 u32 phy_blk;
1183 u32 *pbt = (u32 *)g_pBlockTable;
1184
1185 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1186 __FILE__, __LINE__, __func__);
1187
1188 Block = BLK_FROM_ADDR(addr);
1189 Page = PAGE_FROM_ADDR(addr, Block);
1190
1191 if (!IS_SPARE_BLOCK(Block))
1192 return FAIL;
1193
1194 phy_blk = pbt[Block];
1195 wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
1196
1197 if (DeviceInfo.MLCDevice) {
1198 g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
1199 if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
1200 >= MAX_READ_COUNTER)
1201 FTL_Read_Disturbance(phy_blk);
1202 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1203 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1204 FTL_Write_IN_Progress_Block_Table_Page();
1205 }
1206 }
1207
1208 return wResult;
1209}
1210#endif
1211
1212#ifdef SUPPORT_BIG_ENDIAN
1213/*********************************************************************
1214* Function: FTL_Invert_Block_Table
1215* Inputs: none
1216* Outputs: none
1217* Description: Re-format the block table in ram based on BIG_ENDIAN and
1218* LARGE_BLOCKNUM if necessary
1219**********************************************************************/
1220static void FTL_Invert_Block_Table(void)
1221{
1222 u32 i;
1223 u32 *pbt = (u32 *)g_pBlockTable;
1224
1225 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1226 __FILE__, __LINE__, __func__);
1227
1228#ifdef SUPPORT_LARGE_BLOCKNUM
1229 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1230 pbt[i] = INVERTUINT32(pbt[i]);
1231 g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1232 }
1233#else
1234 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1235 pbt[i] = INVERTUINT16(pbt[i]);
1236 g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1237 }
1238#endif
1239}
1240#endif
1241
1242/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1243* Function: GLOB_FTL_Flash_Init
1244* Inputs: none
1245* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1246* Description: The flash controller is initialized
1247* The flash device is reset
1248* Perform a flash READ ID command to confirm that a
1249* valid device is attached and active.
1250* The DeviceInfo structure gets filled in
1251*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1252int GLOB_FTL_Flash_Init(void)
1253{
1254 int status = FAIL;
1255
1256 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1257 __FILE__, __LINE__, __func__);
1258
1259 g_SBDCmdIndex = 0;
1260
1261 GLOB_LLD_Flash_Init();
1262
1263 status = GLOB_LLD_Read_Device_ID();
1264
1265 return status;
1266}
1267
1268/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1269* Inputs: none
1270* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1271* Description: The flash controller is released
1272*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1273int GLOB_FTL_Flash_Release(void)
1274{
1275 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1276 __FILE__, __LINE__, __func__);
1277
1278 return GLOB_LLD_Flash_Release();
1279}
1280
1281
1282/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1283* Function: GLOB_FTL_Cache_Release
1284* Inputs: none
1285* Outputs: none
1286* Description: release all allocated memory in GLOB_FTL_Init
1287* (allocated in GLOB_FTL_Init)
1288*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1289void GLOB_FTL_Cache_Release(void)
1290{
1291 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1292 __FILE__, __LINE__, __func__);
1293
1294 free_memory();
1295}
1296
1297/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1298* Function: FTL_Cache_If_Hit
1299* Inputs: Page Address
1300* Outputs: Block number/UNHIT BLOCK
1301* Description: Determines if the addressed page is in cache
1302*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1303static u16 FTL_Cache_If_Hit(u64 page_addr)
1304{
1305 u16 item;
1306 u64 addr;
1307 int i;
1308
1309 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1310 __FILE__, __LINE__, __func__);
1311
1312 item = UNHIT_CACHE_ITEM;
1313 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1314 addr = Cache.array[i].address;
1315 if ((page_addr >= addr) &&
1316 (page_addr < (addr + Cache.cache_item_size))) {
1317 item = i;
1318 break;
1319 }
1320 }
1321
1322 return item;
1323}
1324
1325/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1326* Function: FTL_Calculate_LRU
1327* Inputs: None
1328* Outputs: None
1329* Description: Calculate the least recently block in a cache and record its
1330* index in LRU field.
1331*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1332static void FTL_Calculate_LRU(void)
1333{
1334 u16 i, bCurrentLRU, bTempCount;
1335
1336 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1337 __FILE__, __LINE__, __func__);
1338
1339 bCurrentLRU = 0;
1340 bTempCount = MAX_WORD_VALUE;
1341
1342 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1343 if (Cache.array[i].use_cnt < bTempCount) {
1344 bCurrentLRU = i;
1345 bTempCount = Cache.array[i].use_cnt;
1346 }
1347 }
1348
1349 Cache.LRU = bCurrentLRU;
1350}
1351
1352/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1353* Function: FTL_Cache_Read_Page
1354* Inputs: pointer to read buffer, logical address and cache item number
1355* Outputs: None
1356* Description: Read the page from the cached block addressed by blocknumber
1357*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1358static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1359{
1360 u8 *start_addr;
1361
1362 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1363 __FILE__, __LINE__, __func__);
1364
1365 start_addr = Cache.array[cache_item].buf;
1366 start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1367 DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1368
1369#if CMD_DMA
1370 GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1371 DeviceInfo.wPageDataSize, 0);
1372 ftl_cmd_cnt++;
1373#else
1374 memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1375#endif
1376
1377 if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1378 Cache.array[cache_item].use_cnt++;
1379}
1380
1381/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1382* Function: FTL_Cache_Read_All
1383* Inputs: pointer to read buffer,block address
1384* Outputs: PASS=0 / FAIL =1
1385* Description: It reads pages in cache
1386*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1387static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1388{
1389 int wResult = PASS;
1390 u32 Block;
1391 u32 lba;
1392 u16 Page;
1393 u16 PageCount;
1394 u32 *pbt = (u32 *)g_pBlockTable;
1395 u32 i;
1396
1397 Block = BLK_FROM_ADDR(phy_addr);
1398 Page = PAGE_FROM_ADDR(phy_addr, Block);
1399 PageCount = Cache.pages_per_item;
1400
1401 nand_dbg_print(NAND_DBG_DEBUG,
1402 "%s, Line %d, Function: %s, Block: 0x%x\n",
1403 __FILE__, __LINE__, __func__, Block);
1404
1405 lba = 0xffffffff;
1406 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1407 if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1408 lba = i;
1409 if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1410 IS_DISCARDED_BLOCK(i)) {
1411 /* Add by yunpeng -2008.12.3 */
1412#if CMD_DMA
1413 GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1414 PageCount * DeviceInfo.wPageDataSize, 0);
1415 ftl_cmd_cnt++;
1416#else
1417 memset(pData, 0xFF,
1418 PageCount * DeviceInfo.wPageDataSize);
1419#endif
1420 return wResult;
1421 } else {
1422 continue; /* break ?? */
1423 }
1424 }
1425 }
1426
1427 if (0xffffffff == lba)
1428 printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1429
1430#if CMD_DMA
1431 wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1432 PageCount, LLD_CMD_FLAG_MODE_CDMA);
1433 if (DeviceInfo.MLCDevice) {
1434 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1435 nand_dbg_print(NAND_DBG_DEBUG,
1436 "Read Counter modified in ftl_cmd_cnt %u"
1437 " Block %u Counter%u\n",
1438 ftl_cmd_cnt, (unsigned int)Block,
1439 g_pReadCounter[Block -
1440 DeviceInfo.wSpectraStartBlock]);
1441
1442 p_BTableChangesDelta =
1443 (struct BTableChangesDelta *)g_pBTDelta_Free;
1444 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1445 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1446 p_BTableChangesDelta->RC_Index =
1447 Block - DeviceInfo.wSpectraStartBlock;
1448 p_BTableChangesDelta->RC_Entry_Value =
1449 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1450 p_BTableChangesDelta->ValidFields = 0xC0;
1451
1452 ftl_cmd_cnt++;
1453
1454 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1455 MAX_READ_COUNTER)
1456 FTL_Read_Disturbance(Block);
1457 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1458 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1459 FTL_Write_IN_Progress_Block_Table_Page();
1460 }
1461 } else {
1462 ftl_cmd_cnt++;
1463 }
1464#else
1465 wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1466 if (wResult == FAIL)
1467 return wResult;
1468
1469 if (DeviceInfo.MLCDevice) {
1470 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1471 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1472 MAX_READ_COUNTER)
1473 FTL_Read_Disturbance(Block);
1474 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1475 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1476 FTL_Write_IN_Progress_Block_Table_Page();
1477 }
1478 }
1479#endif
1480 return wResult;
1481}
1482
1483/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1484* Function: FTL_Cache_Write_All
1485* Inputs: pointer to cache in sys memory
1486* address of free block in flash
1487* Outputs: PASS=0 / FAIL=1
1488* Description: writes all the pages of the block in cache to flash
1489*
1490* NOTE:need to make sure this works ok when cache is limited
1491* to a partial block. This is where copy-back would be
1492* activated. This would require knowing which pages in the
1493* cached block are clean/dirty.Right now we only know if
1494* the whole block is clean/dirty.
1495*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1496static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1497{
1498 u16 wResult = PASS;
1499 u32 Block;
1500 u16 Page;
1501 u16 PageCount;
1502
1503 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1504 __FILE__, __LINE__, __func__);
1505
1506 nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1507 "on %d\n", cache_block_to_write,
1508 (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1509
1510 Block = BLK_FROM_ADDR(blk_addr);
1511 Page = PAGE_FROM_ADDR(blk_addr, Block);
1512 PageCount = Cache.pages_per_item;
1513
1514#if CMD_DMA
1515 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1516 Block, Page, PageCount)) {
1517 nand_dbg_print(NAND_DBG_WARN,
1518 "NAND Program fail in %s, Line %d, "
1519 "Function: %s, new Bad Block %d generated! "
1520 "Need Bad Block replacing.\n",
1521 __FILE__, __LINE__, __func__, Block);
1522 wResult = FAIL;
1523 }
1524 ftl_cmd_cnt++;
1525#else
1526 if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1527 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1528 " Line %d, Function %s, new Bad Block %d generated!"
1529 "Need Bad Block replacing.\n",
1530 __FILE__, __LINE__, __func__, Block);
1531 wResult = FAIL;
1532 }
1533#endif
1534 return wResult;
1535}
1536
494a43bb
AO
1537/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1538* Function: FTL_Copy_Block
1539* Inputs: source block address
1540* Destination block address
1541* Outputs: PASS=0 / FAIL=1
1542* Description: used only for static wear leveling to move the block
1543* containing static data to new blocks(more worn)
1544*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1545int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1546{
1547 int i, r1, r2, wResult = PASS;
1548
1549 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1550 __FILE__, __LINE__, __func__);
1551
1552 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1553 r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1554 i * DeviceInfo.wPageDataSize);
1555 r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1556 i * DeviceInfo.wPageDataSize);
1557 if ((ERR == r1) || (FAIL == r2)) {
1558 wResult = FAIL;
1559 break;
1560 }
1561 }
1562
1563 return wResult;
1564}
1565
1566/* Search the block table to find out the least wear block and then return it */
1567static u32 find_least_worn_blk_for_l2_cache(void)
1568{
1569 int i;
1570 u32 *pbt = (u32 *)g_pBlockTable;
1571 u8 least_wear_cnt = MAX_BYTE_VALUE;
1572 u32 least_wear_blk_idx = MAX_U32_VALUE;
1573 u32 phy_idx;
1574
1575 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1576 if (IS_SPARE_BLOCK(i)) {
1577 phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1578 if (phy_idx > DeviceInfo.wSpectraEndBlock)
1579 printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1580 "Too big phy block num (%d)\n", phy_idx);
1581 if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1582 least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1583 least_wear_blk_idx = i;
1584 }
1585 }
1586 }
1587
1588 nand_dbg_print(NAND_DBG_WARN,
1589 "find_least_worn_blk_for_l2_cache: "
1590 "find block %d with least worn counter (%d)\n",
1591 least_wear_blk_idx, least_wear_cnt);
1592
1593 return least_wear_blk_idx;
1594}
1595
1596
1597
1598/* Get blocks for Level2 Cache */
1599static int get_l2_cache_blks(void)
1600{
1601 int n;
1602 u32 blk;
1603 u32 *pbt = (u32 *)g_pBlockTable;
1604
1605 for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1606 blk = find_least_worn_blk_for_l2_cache();
1607 if (blk > DeviceInfo.wDataBlockNum) {
1608 nand_dbg_print(NAND_DBG_WARN,
1609 "find_least_worn_blk_for_l2_cache: "
1610 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1611 return FAIL;
1612 }
1613 /* Tag the free block as discard in block table */
1614 pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1615 /* Add the free block to the L2 Cache block array */
1616 cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1617 }
1618
1619 return PASS;
1620}
1621
1622static int erase_l2_cache_blocks(void)
1623{
1624 int i, ret = PASS;
fd484b86 1625 u32 pblk, lblk = BAD_BLOCK;
494a43bb
AO
1626 u64 addr;
1627 u32 *pbt = (u32 *)g_pBlockTable;
1628
1629 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1630 __FILE__, __LINE__, __func__);
1631
1632 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1633 pblk = cache_l2.blk_array[i];
1634
1635 /* If the L2 cache block is invalid, then just skip it */
1636 if (MAX_U32_VALUE == pblk)
1637 continue;
1638
1639 BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1640
1641 addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1642 if (PASS == GLOB_FTL_Block_Erase(addr)) {
1643 /* Get logical block number of the erased block */
1644 lblk = FTL_Get_Block_Index(pblk);
1645 BUG_ON(BAD_BLOCK == lblk);
1646 /* Tag it as free in the block table */
1647 pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1648 pbt[lblk] |= (u32)(SPARE_BLOCK);
1649 } else {
1650 MARK_BLOCK_AS_BAD(pbt[lblk]);
1651 ret = ERR;
1652 }
1653 }
1654
1655 return ret;
1656}
1657
1658/*
1659 * Merge the valid data page in the L2 cache blocks into NAND.
1660*/
1661static int flush_l2_cache(void)
1662{
1663 struct list_head *p;
1664 struct spectra_l2_cache_list *pnd, *tmp_pnd;
1665 u32 *pbt = (u32 *)g_pBlockTable;
1666 u32 phy_blk, l2_blk;
1667 u64 addr;
1668 u16 l2_page;
1669 int i, ret = PASS;
1670
1671 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1672 __FILE__, __LINE__, __func__);
1673
1674 if (list_empty(&cache_l2.table.list)) /* No data to flush */
1675 return ret;
1676
1677 //dump_cache_l2_table();
1678
1679 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1680 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1681 FTL_Write_IN_Progress_Block_Table_Page();
1682 }
1683
1684 list_for_each(p, &cache_l2.table.list) {
1685 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1686 if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1687 IS_BAD_BLOCK(pnd->logical_blk_num) ||
1688 IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1689 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1690 memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
1691 } else {
1692 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1693 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1694 ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1695 phy_blk, 0, DeviceInfo.wPagesPerBlock);
1696 if (ret == FAIL) {
1697 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1698 }
1699 }
1700
1701 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1702 if (pnd->pages_array[i] != MAX_U32_VALUE) {
1703 l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1704 l2_page = pnd->pages_array[i] & 0xffff;
1705 ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1706 if (ret == FAIL) {
1707 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1708 }
1709 memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1710 }
1711 }
1712
1713 /* Find a free block and tag the original block as discarded */
1714 addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1715 ret = FTL_Replace_Block(addr);
1716 if (ret == FAIL) {
1717 printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1718 }
1719
1720 /* Write back the updated data into NAND */
1721 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1722 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1723 nand_dbg_print(NAND_DBG_WARN,
1724 "Program NAND block %d fail in %s, Line %d\n",
1725 phy_blk, __FILE__, __LINE__);
1726 /* This may not be really a bad block. So just tag it as discarded. */
1727 /* Then it has a chance to be erased when garbage collection. */
1728 /* If it is really bad, then the erase will fail and it will be marked */
1729 /* as bad then. Otherwise it will be marked as free and can be used again */
1730 MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1731 /* Find another free block and write it again */
1732 FTL_Replace_Block(addr);
1733 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1734 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1735 printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1736 "Some data will be lost!\n", phy_blk);
1737 MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1738 }
1739 } else {
1740 /* tag the new free block as used block */
1741 pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1742 }
1743 }
1744
1745 /* Destroy the L2 Cache table and free the memory of all nodes */
1746 list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1747 list_del(&pnd->list);
1748 kfree(pnd);
1749 }
1750
1751 /* Erase discard L2 cache blocks */
1752 if (erase_l2_cache_blocks() != PASS)
1753 nand_dbg_print(NAND_DBG_WARN,
1754 " Erase L2 cache blocks error in %s, Line %d\n",
1755 __FILE__, __LINE__);
1756
1757 /* Init the Level2 Cache data structure */
1758 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1759 cache_l2.blk_array[i] = MAX_U32_VALUE;
1760 cache_l2.cur_blk_idx = 0;
1761 cache_l2.cur_page_num = 0;
1762 INIT_LIST_HEAD(&cache_l2.table.list);
1763 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1764
1765 return ret;
1766}
1767
1768/*
1769 * Write back a changed victim cache item to the Level2 Cache
1770 * and update the L2 Cache table to map the change.
1771 * If the L2 Cache is full, then start to do the L2 Cache flush.
1772*/
1773static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1774{
1775 u32 logical_blk_num;
1776 u16 logical_page_num;
1777 struct list_head *p;
1778 struct spectra_l2_cache_list *pnd, *pnd_new;
1779 u32 node_size;
1780 int i, found;
1781
1782 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1783 __FILE__, __LINE__, __func__);
1784
1785 /*
1786 * If Level2 Cache table is empty, then it means either:
1787 * 1. This is the first time that the function called after FTL_init
1788 * or
1789 * 2. The Level2 Cache has just been flushed
1790 *
1791 * So, 'steal' some free blocks from NAND for L2 Cache using
1792 * by just mask them as discard in the block table
1793 */
1794 if (list_empty(&cache_l2.table.list)) {
1795 BUG_ON(cache_l2.cur_blk_idx != 0);
1796 BUG_ON(cache_l2.cur_page_num!= 0);
1797 BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1798 if (FAIL == get_l2_cache_blks()) {
1799 GLOB_FTL_Garbage_Collection();
1800 if (FAIL == get_l2_cache_blks()) {
1801 printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1802 return FAIL;
1803 }
1804 }
1805 }
1806
1807 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1808 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1809 BUG_ON(logical_blk_num == MAX_U32_VALUE);
1810
1811 /* Write the cache item data into the current position of L2 Cache */
1812#if CMD_DMA
1813 /*
1814 * TODO
1815 */
1816#else
1817 if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1818 cache_l2.blk_array[cache_l2.cur_blk_idx],
1819 cache_l2.cur_page_num, 1)) {
1820 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1821 "%s, Line %d, new Bad Block %d generated!\n",
1822 __FILE__, __LINE__,
1823 cache_l2.blk_array[cache_l2.cur_blk_idx]);
1824
1825 /* TODO: tag the current block as bad and try again */
1826
1827 return FAIL;
1828 }
1829#endif
1830
1831 /*
1832 * Update the L2 Cache table.
1833 *
1834 * First seaching in the table to see whether the logical block
1835 * has been mapped. If not, then kmalloc a new node for the
1836 * logical block, fill data, and then insert it to the list.
1837 * Otherwise, just update the mapped node directly.
1838 */
1839 found = 0;
1840 list_for_each(p, &cache_l2.table.list) {
1841 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1842 if (pnd->logical_blk_num == logical_blk_num) {
1843 pnd->pages_array[logical_page_num] =
1844 (cache_l2.cur_blk_idx << 16) |
1845 cache_l2.cur_page_num;
1846 found = 1;
1847 break;
1848 }
1849 }
1850 if (!found) { /* Create new node for the logical block here */
1851
1852 /* The logical pages to physical pages map array is
1853 * located at the end of struct spectra_l2_cache_list.
1854 */
1855 node_size = sizeof(struct spectra_l2_cache_list) +
1856 sizeof(u32) * DeviceInfo.wPagesPerBlock;
1857 pnd_new = kmalloc(node_size, GFP_ATOMIC);
1858 if (!pnd_new) {
1859 printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1860 __FILE__, __LINE__);
1861 /*
1862 * TODO: Need to flush all the L2 cache into NAND ASAP
1863 * since no memory available here
1864 */
1865 }
1866 pnd_new->logical_blk_num = logical_blk_num;
1867 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1868 pnd_new->pages_array[i] = MAX_U32_VALUE;
1869 pnd_new->pages_array[logical_page_num] =
1870 (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1871 list_add(&pnd_new->list, &cache_l2.table.list);
1872 }
1873
1874 /* Increasing the current position pointer of the L2 Cache */
1875 cache_l2.cur_page_num++;
1876 if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1877 cache_l2.cur_blk_idx++;
1878 if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1879 /* The L2 Cache is full. Need to flush it now */
1880 nand_dbg_print(NAND_DBG_WARN,
1881 "L2 Cache is full, will start to flush it\n");
1882 flush_l2_cache();
1883 } else {
1884 cache_l2.cur_page_num = 0;
1885 }
1886 }
1887
1888 return PASS;
1889}
1890
1891/*
1892 * Seach in the Level2 Cache table to find the cache item.
1893 * If find, read the data from the NAND page of L2 Cache,
1894 * Otherwise, return FAIL.
1895 */
1896static int search_l2_cache(u8 *buf, u64 logical_addr)
1897{
1898 u32 logical_blk_num;
1899 u16 logical_page_num;
1900 struct list_head *p;
1901 struct spectra_l2_cache_list *pnd;
1902 u32 tmp = MAX_U32_VALUE;
1903 u32 phy_blk;
1904 u16 phy_page;
1905 int ret = FAIL;
1906
1907 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1908 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1909
1910 list_for_each(p, &cache_l2.table.list) {
1911 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1912 if (pnd->logical_blk_num == logical_blk_num) {
1913 tmp = pnd->pages_array[logical_page_num];
1914 break;
1915 }
1916 }
1917
1918 if (tmp != MAX_U32_VALUE) { /* Found valid map */
1919 phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1920 phy_page = tmp & 0xFFFF;
1921#if CMD_DMA
1922 /* TODO */
1923#else
1924 ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
1925#endif
1926 }
1927
1928 return ret;
1929}
1930
494a43bb
AO
1931/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1932* Function: FTL_Cache_Write_Page
1933* Inputs: Pointer to buffer, page address, cache block number
1934* Outputs: PASS=0 / FAIL=1
1935* Description: It writes the data in Cache Block
1936*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1937static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
1938 u8 cache_blk, u16 flag)
1939{
1940 u8 *pDest;
1941 u64 addr;
1942
1943 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1944 __FILE__, __LINE__, __func__);
1945
1946 addr = Cache.array[cache_blk].address;
1947 pDest = Cache.array[cache_blk].buf;
1948
1949 pDest += (unsigned long)(page_addr - addr);
1950 Cache.array[cache_blk].changed = SET;
1951#if CMD_DMA
1952#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1953 int_cache[ftl_cmd_cnt].item = cache_blk;
1954 int_cache[ftl_cmd_cnt].cache.address =
1955 Cache.array[cache_blk].address;
1956 int_cache[ftl_cmd_cnt].cache.changed =
1957 Cache.array[cache_blk].changed;
1958#endif
1959 GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
1960 ftl_cmd_cnt++;
1961#else
1962 memcpy(pDest, pData, DeviceInfo.wPageDataSize);
1963#endif
1964 if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
1965 Cache.array[cache_blk].use_cnt++;
1966}
1967
1968/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1969* Function: FTL_Cache_Write
1970* Inputs: none
1971* Outputs: PASS=0 / FAIL=1
1972* Description: It writes least frequently used Cache block to flash if it
1973* has been changed
1974*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1975static int FTL_Cache_Write(void)
1976{
1977 int i, bResult = PASS;
1978 u16 bNO, least_count = 0xFFFF;
1979
1980 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1981 __FILE__, __LINE__, __func__);
1982
1983 FTL_Calculate_LRU();
1984
1985 bNO = Cache.LRU;
1986 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
1987 "Least used cache block is %d\n", bNO);
1988
1989 if (Cache.array[bNO].changed != SET)
1990 return bResult;
1991
1992 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
1993 " Block %d containing logical block %d is dirty\n",
1994 bNO,
1995 (u32)(Cache.array[bNO].address >>
1996 DeviceInfo.nBitsInBlockDataSize));
1997#if CMD_DMA
1998#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1999 int_cache[ftl_cmd_cnt].item = bNO;
2000 int_cache[ftl_cmd_cnt].cache.address =
2001 Cache.array[bNO].address;
2002 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
2003#endif
2004#endif
2005 bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
2006 Cache.array[bNO].address);
2007 if (bResult != ERR)
2008 Cache.array[bNO].changed = CLEAR;
2009
2010 least_count = Cache.array[bNO].use_cnt;
2011
2012 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2013 if (i == bNO)
2014 continue;
2015 if (Cache.array[i].use_cnt > 0)
2016 Cache.array[i].use_cnt -= least_count;
2017 }
2018
2019 return bResult;
2020}
2021
2022/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2023* Function: FTL_Cache_Read
2024* Inputs: Page address
2025* Outputs: PASS=0 / FAIL=1
2026* Description: It reads the block from device in Cache Block
2027* Set the LRU count to 1
2028* Mark the Cache Block as clean
2029*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2030static int FTL_Cache_Read(u64 logical_addr)
2031{
2032 u64 item_addr, phy_addr;
2033 u16 num;
2034 int ret;
2035
2036 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2037 __FILE__, __LINE__, __func__);
2038
2039 num = Cache.LRU; /* The LRU cache item will be overwritten */
2040
2041 item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
2042 Cache.cache_item_size;
2043 Cache.array[num].address = item_addr;
2044 Cache.array[num].use_cnt = 1;
2045 Cache.array[num].changed = CLEAR;
2046
2047#if CMD_DMA
2048#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2049 int_cache[ftl_cmd_cnt].item = num;
2050 int_cache[ftl_cmd_cnt].cache.address =
2051 Cache.array[num].address;
2052 int_cache[ftl_cmd_cnt].cache.changed =
2053 Cache.array[num].changed;
2054#endif
2055#endif
2056 /*
2057 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
2058 * Otherwise, read it from NAND
2059 */
2060 ret = search_l2_cache(Cache.array[num].buf, logical_addr);
2061 if (PASS == ret) /* Hit in L2 Cache */
2062 return ret;
2063
2064 /* Compute the physical start address of NAND device according to */
2065 /* the logical start address of the cache item (LRU cache item) */
2066 phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
2067 GLOB_u64_Remainder(item_addr, 2);
2068
2069 return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
2070}
2071
2072/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2073* Function: FTL_Check_Block_Table
2074* Inputs: ?
2075* Outputs: PASS=0 / FAIL=1
2076* Description: It checks the correctness of each block table entry
2077*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2078static int FTL_Check_Block_Table(int wOldTable)
2079{
2080 u32 i;
2081 int wResult = PASS;
2082 u32 blk_idx;
2083 u32 *pbt = (u32 *)g_pBlockTable;
2084 u8 *pFlag = flag_check_blk_table;
2085
2086 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2087 __FILE__, __LINE__, __func__);
2088
2089 if (NULL != pFlag) {
2090 memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
2091 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2092 blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
2093
2094 /*
2095 * 20081006/KBV - Changed to pFlag[i] reference
2096 * to avoid buffer overflow
2097 */
2098
2099 /*
2100 * 2008-10-20 Yunpeng Note: This change avoid
2101 * buffer overflow, but changed function of
2102 * the code, so it should be re-write later
2103 */
2104 if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
2105 PASS == pFlag[i]) {
2106 wResult = FAIL;
2107 break;
2108 } else {
2109 pFlag[i] = PASS;
2110 }
2111 }
2112 }
2113
2114 return wResult;
2115}
2116
2117
2118/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2119* Function: FTL_Write_Block_Table
2120* Inputs: flasg
2121* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
2122* happen. -1 Error
2123* Description: It writes the block table
2124* Block table always mapped to LBA 0 which inturn mapped
2125* to any physical block
2126*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2127static int FTL_Write_Block_Table(int wForce)
2128{
2129 u32 *pbt = (u32 *)g_pBlockTable;
2130 int wSuccess = PASS;
2131 u32 wTempBlockTableIndex;
2132 u16 bt_pages, new_bt_offset;
2133 u8 blockchangeoccured = 0;
2134
2135 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2136 __FILE__, __LINE__, __func__);
2137
2138 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2139
2140 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
2141 return 0;
2142
2143 if (PASS == wForce) {
2144 g_wBlockTableOffset =
2145 (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
2146#if CMD_DMA
2147 p_BTableChangesDelta =
2148 (struct BTableChangesDelta *)g_pBTDelta_Free;
2149 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2150
2151 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2152 p_BTableChangesDelta->g_wBlockTableOffset =
2153 g_wBlockTableOffset;
2154 p_BTableChangesDelta->ValidFields = 0x01;
2155#endif
2156 }
2157
2158 nand_dbg_print(NAND_DBG_DEBUG,
2159 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
2160 g_wBlockTableIndex, g_wBlockTableOffset);
2161
2162 do {
2163 new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
2164 if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
2165 (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
2166 (FAIL == wSuccess)) {
2167 wTempBlockTableIndex = FTL_Replace_Block_Table();
2168 if (BAD_BLOCK == wTempBlockTableIndex)
2169 return ERR;
2170 if (!blockchangeoccured) {
2171 bt_block_changed = 1;
2172 blockchangeoccured = 1;
2173 }
2174
2175 g_wBlockTableIndex = wTempBlockTableIndex;
2176 g_wBlockTableOffset = 0;
2177 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
2178#if CMD_DMA
2179 p_BTableChangesDelta =
2180 (struct BTableChangesDelta *)g_pBTDelta_Free;
2181 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2182
2183 p_BTableChangesDelta->ftl_cmd_cnt =
2184 ftl_cmd_cnt;
2185 p_BTableChangesDelta->g_wBlockTableOffset =
2186 g_wBlockTableOffset;
2187 p_BTableChangesDelta->g_wBlockTableIndex =
2188 g_wBlockTableIndex;
2189 p_BTableChangesDelta->ValidFields = 0x03;
2190
2191 p_BTableChangesDelta =
2192 (struct BTableChangesDelta *)g_pBTDelta_Free;
2193 g_pBTDelta_Free +=
2194 sizeof(struct BTableChangesDelta);
2195
2196 p_BTableChangesDelta->ftl_cmd_cnt =
2197 ftl_cmd_cnt;
2198 p_BTableChangesDelta->BT_Index =
2199 BLOCK_TABLE_INDEX;
2200 p_BTableChangesDelta->BT_Entry_Value =
2201 pbt[BLOCK_TABLE_INDEX];
2202 p_BTableChangesDelta->ValidFields = 0x0C;
2203#endif
2204 }
2205
2206 wSuccess = FTL_Write_Block_Table_Data();
2207 if (FAIL == wSuccess)
2208 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
2209 } while (FAIL == wSuccess);
2210
2211 g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2212
2213 return 1;
2214}
2215
494a43bb
AO
2216static int force_format_nand(void)
2217{
2218 u32 i;
2219
2220 /* Force erase the whole unprotected physical partiton of NAND */
2221 printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2222 printk(KERN_ALERT "From phyical block %d to %d\n",
2223 DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2224 for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2225 if (GLOB_LLD_Erase_Block(i))
2226 printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2227 }
2228 printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2229 while(1);
2230
2231 return PASS;
2232}
2233
2234int GLOB_FTL_Flash_Format(void)
2235{
2236 //return FTL_Format_Flash(1);
2237 return force_format_nand();
2238
2239}
2240
2241/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2242* Function: FTL_Search_Block_Table_IN_Block
2243* Inputs: Block Number
2244* Pointer to page
2245* Outputs: PASS / FAIL
2246* Page contatining the block table
2247* Description: It searches the block table in the block
2248* passed as an argument.
2249*
2250*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2251static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2252 u8 BT_Tag, u16 *Page)
2253{
2254 u16 i, j, k;
2255 u16 Result = PASS;
2256 u16 Last_IPF = 0;
2257 u8 BT_Found = 0;
2258 u8 *tagarray;
2259 u8 *tempbuf = tmp_buf_search_bt_in_block;
2260 u8 *pSpareBuf = spare_buf_search_bt_in_block;
2261 u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2262 u8 bt_flag_last_page = 0xFF;
2263 u8 search_in_previous_pages = 0;
2264 u16 bt_pages;
2265
2266 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2267 __FILE__, __LINE__, __func__);
2268
2269 nand_dbg_print(NAND_DBG_DEBUG,
2270 "Searching block table in %u block\n",
2271 (unsigned int)BT_Block);
2272
2273 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2274
2275 for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2276 i += (bt_pages + 1)) {
2277 nand_dbg_print(NAND_DBG_DEBUG,
2278 "Searching last IPF: %d\n", i);
2279 Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2280 BT_Block, i, 1);
2281
2282 if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2283 if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2284 continue;
2285 } else {
2286 search_in_previous_pages = 1;
2287 Last_IPF = i;
2288 }
2289 }
2290
2291 if (!search_in_previous_pages) {
2292 if (i != bt_pages) {
2293 i -= (bt_pages + 1);
2294 Last_IPF = i;
2295 }
2296 }
2297
2298 if (0 == Last_IPF)
2299 break;
2300
2301 if (!search_in_previous_pages) {
2302 i = i + 1;
2303 nand_dbg_print(NAND_DBG_DEBUG,
2304 "Reading the spare area of Block %u Page %u",
2305 (unsigned int)BT_Block, i);
2306 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2307 BT_Block, i, 1);
2308 nand_dbg_print(NAND_DBG_DEBUG,
2309 "Reading the spare area of Block %u Page %u",
2310 (unsigned int)BT_Block, i + bt_pages - 1);
2311 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2312 BT_Block, i + bt_pages - 1, 1);
2313
2314 k = 0;
2315 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2316 if (j) {
2317 for (; k < j; k++) {
2318 if (tagarray[k] == BT_Tag)
2319 break;
2320 }
2321 }
2322
2323 if (k < j)
2324 bt_flag = tagarray[k];
2325 else
2326 Result = FAIL;
2327
2328 if (Result == PASS) {
2329 k = 0;
2330 j = FTL_Extract_Block_Table_Tag(
2331 pSpareBufBTLastPage, &tagarray);
2332 if (j) {
2333 for (; k < j; k++) {
2334 if (tagarray[k] == BT_Tag)
2335 break;
2336 }
2337 }
2338
2339 if (k < j)
2340 bt_flag_last_page = tagarray[k];
2341 else
2342 Result = FAIL;
2343
2344 if (Result == PASS) {
2345 if (bt_flag == bt_flag_last_page) {
2346 nand_dbg_print(NAND_DBG_DEBUG,
2347 "Block table is found"
2348 " in page after IPF "
2349 "at block %d "
2350 "page %d\n",
2351 (int)BT_Block, i);
2352 BT_Found = 1;
2353 *Page = i;
2354 g_cBlockTableStatus =
2355 CURRENT_BLOCK_TABLE;
2356 break;
2357 } else {
2358 Result = FAIL;
2359 }
2360 }
2361 }
2362 }
2363
2364 if (search_in_previous_pages)
2365 i = i - bt_pages;
2366 else
2367 i = i - (bt_pages + 1);
2368
2369 Result = PASS;
2370
2371 nand_dbg_print(NAND_DBG_DEBUG,
2372 "Reading the spare area of Block %d Page %d",
2373 (int)BT_Block, i);
2374
2375 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2376 nand_dbg_print(NAND_DBG_DEBUG,
2377 "Reading the spare area of Block %u Page %u",
2378 (unsigned int)BT_Block, i + bt_pages - 1);
2379
2380 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2381 BT_Block, i + bt_pages - 1, 1);
2382
2383 k = 0;
2384 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2385 if (j) {
2386 for (; k < j; k++) {
2387 if (tagarray[k] == BT_Tag)
2388 break;
2389 }
2390 }
2391
2392 if (k < j)
2393 bt_flag = tagarray[k];
2394 else
2395 Result = FAIL;
2396
2397 if (Result == PASS) {
2398 k = 0;
2399 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2400 &tagarray);
2401 if (j) {
2402 for (; k < j; k++) {
2403 if (tagarray[k] == BT_Tag)
2404 break;
2405 }
2406 }
2407
2408 if (k < j) {
2409 bt_flag_last_page = tagarray[k];
2410 } else {
2411 Result = FAIL;
2412 break;
2413 }
2414
2415 if (Result == PASS) {
2416 if (bt_flag == bt_flag_last_page) {
2417 nand_dbg_print(NAND_DBG_DEBUG,
2418 "Block table is found "
2419 "in page prior to IPF "
2420 "at block %u page %d\n",
2421 (unsigned int)BT_Block, i);
2422 BT_Found = 1;
2423 *Page = i;
2424 g_cBlockTableStatus =
2425 IN_PROGRESS_BLOCK_TABLE;
2426 break;
2427 } else {
2428 Result = FAIL;
2429 break;
2430 }
2431 }
2432 }
2433 }
2434
2435 if (Result == FAIL) {
2436 if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2437 BT_Found = 1;
2438 *Page = i - (bt_pages + 1);
2439 }
2440 if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2441 goto func_return;
2442 }
2443
2444 if (Last_IPF == 0) {
2445 i = 0;
2446 Result = PASS;
2447 nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2448 "Block %u Page %u", (unsigned int)BT_Block, i);
2449
2450 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2451 nand_dbg_print(NAND_DBG_DEBUG,
2452 "Reading the spare area of Block %u Page %u",
2453 (unsigned int)BT_Block, i + bt_pages - 1);
2454 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2455 BT_Block, i + bt_pages - 1, 1);
2456
2457 k = 0;
2458 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2459 if (j) {
2460 for (; k < j; k++) {
2461 if (tagarray[k] == BT_Tag)
2462 break;
2463 }
2464 }
2465
2466 if (k < j)
2467 bt_flag = tagarray[k];
2468 else
2469 Result = FAIL;
2470
2471 if (Result == PASS) {
2472 k = 0;
2473 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2474 &tagarray);
2475 if (j) {
2476 for (; k < j; k++) {
2477 if (tagarray[k] == BT_Tag)
2478 break;
2479 }
2480 }
2481
2482 if (k < j)
2483 bt_flag_last_page = tagarray[k];
2484 else
2485 Result = FAIL;
2486
2487 if (Result == PASS) {
2488 if (bt_flag == bt_flag_last_page) {
2489 nand_dbg_print(NAND_DBG_DEBUG,
2490 "Block table is found "
2491 "in page after IPF at "
2492 "block %u page %u\n",
2493 (unsigned int)BT_Block,
2494 (unsigned int)i);
2495 BT_Found = 1;
2496 *Page = i;
2497 g_cBlockTableStatus =
2498 CURRENT_BLOCK_TABLE;
2499 goto func_return;
2500 } else {
2501 Result = FAIL;
2502 }
2503 }
2504 }
2505
2506 if (Result == FAIL)
2507 goto func_return;
2508 }
2509func_return:
2510 return Result;
2511}
2512
2513u8 *get_blk_table_start_addr(void)
2514{
2515 return g_pBlockTable;
2516}
2517
2518unsigned long get_blk_table_len(void)
2519{
2520 return DeviceInfo.wDataBlockNum * sizeof(u32);
2521}
2522
2523u8 *get_wear_leveling_table_start_addr(void)
2524{
2525 return g_pWearCounter;
2526}
2527
2528unsigned long get_wear_leveling_table_len(void)
2529{
2530 return DeviceInfo.wDataBlockNum * sizeof(u8);
2531}
2532
2533/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2534* Function: FTL_Read_Block_Table
2535* Inputs: none
2536* Outputs: PASS / FAIL
2537* Description: read the flash spare area and find a block containing the
2538* most recent block table(having largest block_table_counter).
2539* Find the last written Block table in this block.
2540* Check the correctness of Block Table
2541* If CDMA is enabled, this function is called in
2542* polling mode.
2543* We don't need to store changes in Block table in this
2544* function as it is called only at initialization
2545*
2546* Note: Currently this function is called at initialization
2547* before any read/erase/write command issued to flash so,
2548* there is no need to wait for CDMA list to complete as of now
2549*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2550static int FTL_Read_Block_Table(void)
2551{
2552 u16 i = 0;
2553 int k, j;
2554 u8 *tempBuf, *tagarray;
2555 int wResult = FAIL;
2556 int status = FAIL;
2557 u8 block_table_found = 0;
2558 int search_result;
2559 u32 Block;
2560 u16 Page = 0;
2561 u16 PageCount;
2562 u16 bt_pages;
2563 int wBytesCopied = 0, tempvar;
2564
2565 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2566 __FILE__, __LINE__, __func__);
2567
2568 tempBuf = tmp_buf1_read_blk_table;
2569 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2570
2571 for (j = DeviceInfo.wSpectraStartBlock;
2572 j <= (int)DeviceInfo.wSpectraEndBlock;
2573 j++) {
2574 status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2575 k = 0;
2576 i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2577 if (i) {
2578 status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2579 j, 0, 1);
2580 for (; k < i; k++) {
2581 if (tagarray[k] == tempBuf[3])
2582 break;
2583 }
2584 }
2585
2586 if (k < i)
2587 k = tagarray[k];
2588 else
2589 continue;
2590
2591 nand_dbg_print(NAND_DBG_DEBUG,
2592 "Block table is contained in Block %d %d\n",
2593 (unsigned int)j, (unsigned int)k);
2594
2595 if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2596 g_pBTBlocks[k-FIRST_BT_ID] = j;
2597 block_table_found = 1;
2598 } else {
2599 printk(KERN_ERR "FTL_Read_Block_Table -"
2600 "This should never happens. "
2601 "Two block table have same counter %u!\n", k);
2602 }
2603 }
2604
2605 if (block_table_found) {
2606 if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2607 g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2608 j = LAST_BT_ID;
2609 while ((j > FIRST_BT_ID) &&
2610 (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2611 j--;
2612 if (j == FIRST_BT_ID) {
2613 j = LAST_BT_ID;
2614 last_erased = LAST_BT_ID;
2615 } else {
2616 last_erased = (u8)j + 1;
2617 while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2618 g_pBTBlocks[j - FIRST_BT_ID]))
2619 j--;
2620 }
2621 } else {
2622 j = FIRST_BT_ID;
2623 while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2624 j++;
2625 last_erased = (u8)j;
2626 while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2627 g_pBTBlocks[j - FIRST_BT_ID]))
2628 j++;
2629 if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2630 j--;
2631 }
2632
2633 if (last_erased > j)
2634 j += (1 + LAST_BT_ID - FIRST_BT_ID);
2635
2636 for (; (j >= last_erased) && (FAIL == wResult); j--) {
2637 i = (j - FIRST_BT_ID) %
2638 (1 + LAST_BT_ID - FIRST_BT_ID);
2639 search_result =
2640 FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2641 i + FIRST_BT_ID, &Page);
2642 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2643 block_table_found = 0;
2644
2645 while ((search_result == PASS) && (FAIL == wResult)) {
2646 nand_dbg_print(NAND_DBG_DEBUG,
2647 "FTL_Read_Block_Table:"
2648 "Block: %u Page: %u "
2649 "contains block table\n",
2650 (unsigned int)g_pBTBlocks[i],
2651 (unsigned int)Page);
2652
2653 tempBuf = tmp_buf2_read_blk_table;
2654
2655 for (k = 0; k < bt_pages; k++) {
2656 Block = g_pBTBlocks[i];
2657 PageCount = 1;
2658
2659 status =
2660 GLOB_LLD_Read_Page_Main_Polling(
2661 tempBuf, Block, Page, PageCount);
2662
2663 tempvar = k ? 0 : 4;
2664
2665 wBytesCopied +=
2666 FTL_Copy_Block_Table_From_Flash(
2667 tempBuf + tempvar,
2668 DeviceInfo.wPageDataSize - tempvar,
2669 wBytesCopied);
2670
2671 Page++;
2672 }
2673
2674 wResult = FTL_Check_Block_Table(FAIL);
2675 if (FAIL == wResult) {
2676 block_table_found = 0;
2677 if (Page > bt_pages)
2678 Page -= ((bt_pages<<1) + 1);
2679 else
2680 search_result = FAIL;
2681 }
2682 }
2683 }
2684 }
2685
2686 if (PASS == wResult) {
2687 if (!block_table_found)
2688 FTL_Execute_SPL_Recovery();
2689
2690 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2691 g_wBlockTableOffset = (u16)Page + 1;
2692 else
2693 g_wBlockTableOffset = (u16)Page - bt_pages;
2694
2695 g_wBlockTableIndex = (u32)g_pBTBlocks[i];
2696
2697#if CMD_DMA
2698 if (DeviceInfo.MLCDevice)
2699 memcpy(g_pBTStartingCopy, g_pBlockTable,
2700 DeviceInfo.wDataBlockNum * sizeof(u32)
2701 + DeviceInfo.wDataBlockNum * sizeof(u8)
2702 + DeviceInfo.wDataBlockNum * sizeof(u16));
2703 else
2704 memcpy(g_pBTStartingCopy, g_pBlockTable,
2705 DeviceInfo.wDataBlockNum * sizeof(u32)
2706 + DeviceInfo.wDataBlockNum * sizeof(u8));
2707#endif
2708 }
2709
2710 if (FAIL == wResult)
2711 printk(KERN_ERR "Yunpeng - "
2712 "Can not find valid spectra block table!\n");
2713
2714#if AUTO_FORMAT_FLASH
2715 if (FAIL == wResult) {
2716 nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
2717 wResult = FTL_Format_Flash(0);
2718 }
2719#endif
2720
2721 return wResult;
2722}
2723
494a43bb
AO
2724/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2725* Function: FTL_Get_Page_Num
2726* Inputs: Size in bytes
2727* Outputs: Size in pages
2728* Description: It calculates the pages required for the length passed
2729*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2730static u32 FTL_Get_Page_Num(u64 length)
2731{
2732 return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
2733 (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
2734}
2735
2736/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2737* Function: FTL_Get_Physical_Block_Addr
2738* Inputs: Block Address (byte format)
2739* Outputs: Physical address of the block.
2740* Description: It translates LBA to PBA by returning address stored
2741* at the LBA location in the block table
2742*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2743static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
2744{
2745 u32 *pbt;
2746 u64 physical_addr;
2747
2748 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2749 __FILE__, __LINE__, __func__);
2750
2751 pbt = (u32 *)g_pBlockTable;
2752 physical_addr = (u64) DeviceInfo.wBlockDataSize *
2753 (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
2754
2755 return physical_addr;
2756}
2757
2758/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2759* Function: FTL_Get_Block_Index
2760* Inputs: Physical Block no.
2761* Outputs: Logical block no. /BAD_BLOCK
2762* Description: It returns the logical block no. for the PBA passed
2763*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2764static u32 FTL_Get_Block_Index(u32 wBlockNum)
2765{
2766 u32 *pbt = (u32 *)g_pBlockTable;
2767 u32 i;
2768
2769 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2770 __FILE__, __LINE__, __func__);
2771
2772 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
2773 if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
2774 return i;
2775
2776 return BAD_BLOCK;
2777}
2778
2779/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2780* Function: GLOB_FTL_Wear_Leveling
2781* Inputs: none
2782* Outputs: PASS=0
2783* Description: This is static wear leveling (done by explicit call)
2784* do complete static wear leveling
2785* do complete garbage collection
2786*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2787int GLOB_FTL_Wear_Leveling(void)
2788{
2789 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2790 __FILE__, __LINE__, __func__);
2791
2792 FTL_Static_Wear_Leveling();
2793 GLOB_FTL_Garbage_Collection();
2794
2795 return PASS;
2796}
2797
2798static void find_least_most_worn(u8 *chg,
2799 u32 *least_idx, u8 *least_cnt,
2800 u32 *most_idx, u8 *most_cnt)
2801{
2802 u32 *pbt = (u32 *)g_pBlockTable;
2803 u32 idx;
2804 u8 cnt;
2805 int i;
2806
2807 for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
2808 if (IS_BAD_BLOCK(i) || PASS == chg[i])
2809 continue;
2810
2811 idx = (u32) ((~BAD_BLOCK) & pbt[i]);
2812 cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
2813
2814 if (IS_SPARE_BLOCK(i)) {
2815 if (cnt > *most_cnt) {
2816 *most_cnt = cnt;
2817 *most_idx = idx;
2818 }
2819 }
2820
2821 if (IS_DATA_BLOCK(i)) {
2822 if (cnt < *least_cnt) {
2823 *least_cnt = cnt;
2824 *least_idx = idx;
2825 }
2826 }
2827
2828 if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
2829 debug_boundary_error(*most_idx,
2830 DeviceInfo.wDataBlockNum, 0);
2831 debug_boundary_error(*least_idx,
2832 DeviceInfo.wDataBlockNum, 0);
2833 continue;
2834 }
2835 }
2836}
2837
2838static int move_blks_for_wear_leveling(u8 *chg,
2839 u32 *least_idx, u32 *rep_blk_num, int *result)
2840{
2841 u32 *pbt = (u32 *)g_pBlockTable;
2842 u32 rep_blk;
2843 int j, ret_cp_blk, ret_erase;
2844 int ret = PASS;
2845
2846 chg[*least_idx] = PASS;
2847 debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
2848
2849 rep_blk = FTL_Replace_MWBlock();
2850 if (rep_blk != BAD_BLOCK) {
2851 nand_dbg_print(NAND_DBG_DEBUG,
2852 "More than two spare blocks exist so do it\n");
2853 nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
2854 rep_blk);
2855
2856 chg[rep_blk] = PASS;
2857
2858 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2859 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2860 FTL_Write_IN_Progress_Block_Table_Page();
2861 }
2862
2863 for (j = 0; j < RETRY_TIMES; j++) {
2864 ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
2865 DeviceInfo.wBlockDataSize,
2866 (u64)rep_blk * DeviceInfo.wBlockDataSize);
2867 if (FAIL == ret_cp_blk) {
2868 ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
2869 * DeviceInfo.wBlockDataSize);
2870 if (FAIL == ret_erase)
2871 MARK_BLOCK_AS_BAD(pbt[rep_blk]);
2872 } else {
2873 nand_dbg_print(NAND_DBG_DEBUG,
2874 "FTL_Copy_Block == OK\n");
2875 break;
2876 }
2877 }
2878
2879 if (j < RETRY_TIMES) {
2880 u32 tmp;
2881 u32 old_idx = FTL_Get_Block_Index(*least_idx);
2882 u32 rep_idx = FTL_Get_Block_Index(rep_blk);
2883 tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
2884 pbt[old_idx] = (u32)((~SPARE_BLOCK) &
2885 pbt[rep_idx]);
2886 pbt[rep_idx] = tmp;
2887#if CMD_DMA
2888 p_BTableChangesDelta = (struct BTableChangesDelta *)
2889 g_pBTDelta_Free;
2890 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2891 p_BTableChangesDelta->ftl_cmd_cnt =
2892 ftl_cmd_cnt;
2893 p_BTableChangesDelta->BT_Index = old_idx;
2894 p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
2895 p_BTableChangesDelta->ValidFields = 0x0C;
2896
2897 p_BTableChangesDelta = (struct BTableChangesDelta *)
2898 g_pBTDelta_Free;
2899 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2900
2901 p_BTableChangesDelta->ftl_cmd_cnt =
2902 ftl_cmd_cnt;
2903 p_BTableChangesDelta->BT_Index = rep_idx;
2904 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
2905 p_BTableChangesDelta->ValidFields = 0x0C;
2906#endif
2907 } else {
2908 pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
2909#if CMD_DMA
2910 p_BTableChangesDelta = (struct BTableChangesDelta *)
2911 g_pBTDelta_Free;
2912 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2913
2914 p_BTableChangesDelta->ftl_cmd_cnt =
2915 ftl_cmd_cnt;
2916 p_BTableChangesDelta->BT_Index =
2917 FTL_Get_Block_Index(rep_blk);
2918 p_BTableChangesDelta->BT_Entry_Value =
2919 pbt[FTL_Get_Block_Index(rep_blk)];
2920 p_BTableChangesDelta->ValidFields = 0x0C;
2921#endif
2922 *result = FAIL;
2923 ret = FAIL;
2924 }
2925
2926 if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
2927 ret = FAIL;
2928 } else {
2929 printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
2930 ret = FAIL;
2931 }
2932
2933 return ret;
2934}
2935
2936/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2937* Function: FTL_Static_Wear_Leveling
2938* Inputs: none
2939* Outputs: PASS=0 / FAIL=1
2940* Description: This is static wear leveling (done by explicit call)
2941* search for most&least used
2942* if difference < GATE:
2943* update the block table with exhange
2944* mark block table in flash as IN_PROGRESS
2945* copy flash block
2946* the caller should handle GC clean up after calling this function
2947*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2948int FTL_Static_Wear_Leveling(void)
2949{
2950 u8 most_worn_cnt;
2951 u8 least_worn_cnt;
2952 u32 most_worn_idx;
2953 u32 least_worn_idx;
2954 int result = PASS;
2955 int go_on = PASS;
2956 u32 replaced_blks = 0;
2957 u8 *chang_flag = flags_static_wear_leveling;
2958
2959 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2960 __FILE__, __LINE__, __func__);
2961
2962 if (!chang_flag)
2963 return FAIL;
2964
2965 memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
2966 while (go_on == PASS) {
2967 nand_dbg_print(NAND_DBG_DEBUG,
2968 "starting static wear leveling\n");
2969 most_worn_cnt = 0;
2970 least_worn_cnt = 0xFF;
2971 least_worn_idx = BLOCK_TABLE_INDEX;
2972 most_worn_idx = BLOCK_TABLE_INDEX;
2973
2974 find_least_most_worn(chang_flag, &least_worn_idx,
2975 &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
2976
2977 nand_dbg_print(NAND_DBG_DEBUG,
2978 "Used and least worn is block %u, whos count is %u\n",
2979 (unsigned int)least_worn_idx,
2980 (unsigned int)least_worn_cnt);
2981
2982 nand_dbg_print(NAND_DBG_DEBUG,
2983 "Free and most worn is block %u, whos count is %u\n",
2984 (unsigned int)most_worn_idx,
2985 (unsigned int)most_worn_cnt);
2986
2987 if ((most_worn_cnt > least_worn_cnt) &&
2988 (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
2989 go_on = move_blks_for_wear_leveling(chang_flag,
2990 &least_worn_idx, &replaced_blks, &result);
2991 else
2992 go_on = FAIL;
2993 }
2994
2995 return result;
2996}
2997
2998#if CMD_DMA
2999static int do_garbage_collection(u32 discard_cnt)
3000{
3001 u32 *pbt = (u32 *)g_pBlockTable;
3002 u32 pba;
3003 u8 bt_block_erased = 0;
3004 int i, cnt, ret = FAIL;
3005 u64 addr;
3006
3007 i = 0;
3008 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
3009 ((ftl_cmd_cnt + 28) < 256)) {
3010 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3011 (pbt[i] & DISCARD_BLOCK)) {
3012 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3013 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3014 FTL_Write_IN_Progress_Block_Table_Page();
3015 }
3016
3017 addr = FTL_Get_Physical_Block_Addr((u64)i *
3018 DeviceInfo.wBlockDataSize);
3019 pba = BLK_FROM_ADDR(addr);
3020
3021 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3022 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3023 nand_dbg_print(NAND_DBG_DEBUG,
3024 "GC will erase BT block %u\n",
3025 (unsigned int)pba);
3026 discard_cnt--;
3027 i++;
3028 bt_block_erased = 1;
3029 break;
3030 }
3031 }
3032
3033 if (bt_block_erased) {
3034 bt_block_erased = 0;
3035 continue;
3036 }
3037
3038 addr = FTL_Get_Physical_Block_Addr((u64)i *
3039 DeviceInfo.wBlockDataSize);
3040
3041 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3042 pbt[i] &= (u32)(~DISCARD_BLOCK);
3043 pbt[i] |= (u32)(SPARE_BLOCK);
3044 p_BTableChangesDelta =
3045 (struct BTableChangesDelta *)
3046 g_pBTDelta_Free;
3047 g_pBTDelta_Free +=
3048 sizeof(struct BTableChangesDelta);
3049 p_BTableChangesDelta->ftl_cmd_cnt =
3050 ftl_cmd_cnt - 1;
3051 p_BTableChangesDelta->BT_Index = i;
3052 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3053 p_BTableChangesDelta->ValidFields = 0x0C;
3054 discard_cnt--;
3055 ret = PASS;
3056 } else {
3057 MARK_BLOCK_AS_BAD(pbt[i]);
3058 }
3059 }
3060
3061 i++;
3062 }
3063
3064 return ret;
3065}
3066
3067#else
3068static int do_garbage_collection(u32 discard_cnt)
3069{
3070 u32 *pbt = (u32 *)g_pBlockTable;
3071 u32 pba;
3072 u8 bt_block_erased = 0;
3073 int i, cnt, ret = FAIL;
3074 u64 addr;
3075
3076 i = 0;
3077 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
3078 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3079 (pbt[i] & DISCARD_BLOCK)) {
3080 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3081 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3082 FTL_Write_IN_Progress_Block_Table_Page();
3083 }
3084
3085 addr = FTL_Get_Physical_Block_Addr((u64)i *
3086 DeviceInfo.wBlockDataSize);
3087 pba = BLK_FROM_ADDR(addr);
3088
3089 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3090 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3091 nand_dbg_print(NAND_DBG_DEBUG,
3092 "GC will erase BT block %d\n",
3093 pba);
3094 discard_cnt--;
3095 i++;
3096 bt_block_erased = 1;
3097 break;
3098 }
3099 }
3100
3101 if (bt_block_erased) {
3102 bt_block_erased = 0;
3103 continue;
3104 }
3105
3106 /* If the discard block is L2 cache block, then just skip it */
3107 for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
3108 if (cache_l2.blk_array[cnt] == pba) {
3109 nand_dbg_print(NAND_DBG_DEBUG,
3110 "GC will erase L2 cache blk %d\n",
3111 pba);
3112 break;
3113 }
3114 }
3115 if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
3116 discard_cnt--;
3117 i++;
3118 continue;
3119 }
3120
3121 addr = FTL_Get_Physical_Block_Addr((u64)i *
3122 DeviceInfo.wBlockDataSize);
3123
3124 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3125 pbt[i] &= (u32)(~DISCARD_BLOCK);
3126 pbt[i] |= (u32)(SPARE_BLOCK);
3127 discard_cnt--;
3128 ret = PASS;
3129 } else {
3130 MARK_BLOCK_AS_BAD(pbt[i]);
3131 }
3132 }
3133
3134 i++;
3135 }
3136
3137 return ret;
3138}
3139#endif
3140
3141/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3142* Function: GLOB_FTL_Garbage_Collection
3143* Inputs: none
3144* Outputs: PASS / FAIL (returns the number of un-erased blocks
3145* Description: search the block table for all discarded blocks to erase
3146* for each discarded block:
3147* set the flash block to IN_PROGRESS
3148* erase the block
3149* update the block table
3150* write the block table to flash
3151*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3152int GLOB_FTL_Garbage_Collection(void)
3153{
3154 u32 i;
3155 u32 wDiscard = 0;
3156 int wResult = FAIL;
3157 u32 *pbt = (u32 *)g_pBlockTable;
3158
3159 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3160 __FILE__, __LINE__, __func__);
3161
3162 if (GC_Called) {
3163 printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
3164 "has been re-entered! Exit.\n");
3165 return PASS;
3166 }
3167
3168 GC_Called = 1;
3169
3170 GLOB_FTL_BT_Garbage_Collection();
3171
3172 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3173 if (IS_DISCARDED_BLOCK(i))
3174 wDiscard++;
3175 }
3176
3177 if (wDiscard <= 0) {
3178 GC_Called = 0;
3179 return wResult;
3180 }
3181
3182 nand_dbg_print(NAND_DBG_DEBUG,
3183 "Found %d discarded blocks\n", wDiscard);
3184
3185 FTL_Write_Block_Table(FAIL);
3186
3187 wResult = do_garbage_collection(wDiscard);
3188
3189 FTL_Write_Block_Table(FAIL);
3190
3191 GC_Called = 0;
3192
3193 return wResult;
3194}
3195
3196
3197#if CMD_DMA
3198static int do_bt_garbage_collection(void)
3199{
3200 u32 pba, lba;
3201 u32 *pbt = (u32 *)g_pBlockTable;
3202 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3203 u64 addr;
3204 int i, ret = FAIL;
3205
3206 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3207 __FILE__, __LINE__, __func__);
3208
3209 if (BT_GC_Called)
3210 return PASS;
3211
3212 BT_GC_Called = 1;
3213
3214 for (i = last_erased; (i <= LAST_BT_ID) &&
3215 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3216 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3217 ((ftl_cmd_cnt + 28)) < 256; i++) {
3218 pba = pBTBlocksNode[i - FIRST_BT_ID];
3219 lba = FTL_Get_Block_Index(pba);
3220 nand_dbg_print(NAND_DBG_DEBUG,
3221 "do_bt_garbage_collection: pba %d, lba %d\n",
3222 pba, lba);
3223 nand_dbg_print(NAND_DBG_DEBUG,
3224 "Block Table Entry: %d", pbt[lba]);
3225
3226 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3227 (pbt[lba] & DISCARD_BLOCK)) {
3228 nand_dbg_print(NAND_DBG_DEBUG,
3229 "do_bt_garbage_collection_cdma: "
3230 "Erasing Block tables present in block %d\n",
3231 pba);
3232 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3233 DeviceInfo.wBlockDataSize);
3234 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3235 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3236 pbt[lba] |= (u32)(SPARE_BLOCK);
3237
3238 p_BTableChangesDelta =
3239 (struct BTableChangesDelta *)
3240 g_pBTDelta_Free;
3241 g_pBTDelta_Free +=
3242 sizeof(struct BTableChangesDelta);
3243
3244 p_BTableChangesDelta->ftl_cmd_cnt =
3245 ftl_cmd_cnt - 1;
3246 p_BTableChangesDelta->BT_Index = lba;
3247 p_BTableChangesDelta->BT_Entry_Value =
3248 pbt[lba];
3249
3250 p_BTableChangesDelta->ValidFields = 0x0C;
3251
3252 ret = PASS;
3253 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3254 BTBLOCK_INVAL;
3255 nand_dbg_print(NAND_DBG_DEBUG,
3256 "resetting bt entry at index %d "
3257 "value %d\n", i,
3258 pBTBlocksNode[i - FIRST_BT_ID]);
3259 if (last_erased == LAST_BT_ID)
3260 last_erased = FIRST_BT_ID;
3261 else
3262 last_erased++;
3263 } else {
3264 MARK_BLOCK_AS_BAD(pbt[lba]);
3265 }
3266 }
3267 }
3268
3269 BT_GC_Called = 0;
3270
3271 return ret;
3272}
3273
3274#else
3275static int do_bt_garbage_collection(void)
3276{
3277 u32 pba, lba;
3278 u32 *pbt = (u32 *)g_pBlockTable;
3279 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3280 u64 addr;
3281 int i, ret = FAIL;
3282
3283 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3284 __FILE__, __LINE__, __func__);
3285
3286 if (BT_GC_Called)
3287 return PASS;
3288
3289 BT_GC_Called = 1;
3290
3291 for (i = last_erased; (i <= LAST_BT_ID) &&
3292 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3293 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3294 pba = pBTBlocksNode[i - FIRST_BT_ID];
3295 lba = FTL_Get_Block_Index(pba);
3296 nand_dbg_print(NAND_DBG_DEBUG,
3297 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3298 pba, lba);
3299 nand_dbg_print(NAND_DBG_DEBUG,
3300 "Block Table Entry: %d", pbt[lba]);
3301
3302 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3303 (pbt[lba] & DISCARD_BLOCK)) {
3304 nand_dbg_print(NAND_DBG_DEBUG,
3305 "do_bt_garbage_collection: "
3306 "Erasing Block tables present in block %d\n",
3307 pba);
3308 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3309 DeviceInfo.wBlockDataSize);
3310 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3311 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3312 pbt[lba] |= (u32)(SPARE_BLOCK);
3313 ret = PASS;
3314 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3315 BTBLOCK_INVAL;
3316 nand_dbg_print(NAND_DBG_DEBUG,
3317 "resetting bt entry at index %d "
3318 "value %d\n", i,
3319 pBTBlocksNode[i - FIRST_BT_ID]);
3320 if (last_erased == LAST_BT_ID)
3321 last_erased = FIRST_BT_ID;
3322 else
3323 last_erased++;
3324 } else {
3325 MARK_BLOCK_AS_BAD(pbt[lba]);
3326 }
3327 }
3328 }
3329
3330 BT_GC_Called = 0;
3331
3332 return ret;
3333}
3334
3335#endif
3336
3337/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3338* Function: GLOB_FTL_BT_Garbage_Collection
3339* Inputs: none
3340* Outputs: PASS / FAIL (returns the number of un-erased blocks
3341* Description: Erases discarded blocks containing Block table
3342*
3343*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3344int GLOB_FTL_BT_Garbage_Collection(void)
3345{
3346 return do_bt_garbage_collection();
3347}
3348
3349/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3350* Function: FTL_Replace_OneBlock
3351* Inputs: Block number 1
3352* Block number 2
3353* Outputs: Replaced Block Number
3354* Description: Interchange block table entries at wBlockNum and wReplaceNum
3355*
3356*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3357static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3358{
3359 u32 tmp_blk;
3360 u32 replace_node = BAD_BLOCK;
3361 u32 *pbt = (u32 *)g_pBlockTable;
3362
3363 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3364 __FILE__, __LINE__, __func__);
3365
3366 if (rep_blk != BAD_BLOCK) {
3367 if (IS_BAD_BLOCK(blk))
3368 tmp_blk = pbt[blk];
3369 else
3370 tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3371
3372 replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3373 pbt[blk] = replace_node;
3374 pbt[rep_blk] = tmp_blk;
3375
3376#if CMD_DMA
3377 p_BTableChangesDelta =
3378 (struct BTableChangesDelta *)g_pBTDelta_Free;
3379 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3380
3381 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3382 p_BTableChangesDelta->BT_Index = blk;
3383 p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3384
3385 p_BTableChangesDelta->ValidFields = 0x0C;
3386
3387 p_BTableChangesDelta =
3388 (struct BTableChangesDelta *)g_pBTDelta_Free;
3389 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3390
3391 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3392 p_BTableChangesDelta->BT_Index = rep_blk;
3393 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3394 p_BTableChangesDelta->ValidFields = 0x0C;
3395#endif
3396 }
3397
3398 return replace_node;
3399}
3400
3401/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3402* Function: FTL_Write_Block_Table_Data
3403* Inputs: Block table size in pages
3404* Outputs: PASS=0 / FAIL=1
3405* Description: Write block table data in flash
3406* If first page and last page
3407* Write data+BT flag
3408* else
3409* Write data
3410* BT flag is a counter. Its value is incremented for block table
3411* write in a new Block
3412*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3413static int FTL_Write_Block_Table_Data(void)
3414{
3415 u64 dwBlockTableAddr, pTempAddr;
3416 u32 Block;
3417 u16 Page, PageCount;
3418 u8 *tempBuf = tmp_buf_write_blk_table_data;
3419 int wBytesCopied;
3420 u16 bt_pages;
3421
3422 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3423 __FILE__, __LINE__, __func__);
3424
3425 dwBlockTableAddr =
3426 (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3427 (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3428 pTempAddr = dwBlockTableAddr;
3429
3430 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3431
3432 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3433 "page= %d BlockTableIndex= %d "
3434 "BlockTableOffset=%d\n", bt_pages,
3435 g_wBlockTableIndex, g_wBlockTableOffset);
3436
3437 Block = BLK_FROM_ADDR(pTempAddr);
3438 Page = PAGE_FROM_ADDR(pTempAddr, Block);
3439 PageCount = 1;
3440
3441 if (bt_block_changed) {
3442 if (bt_flag == LAST_BT_ID) {
3443 bt_flag = FIRST_BT_ID;
3444 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3445 } else if (bt_flag < LAST_BT_ID) {
3446 bt_flag++;
3447 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3448 }
3449
3450 if ((bt_flag > (LAST_BT_ID-4)) &&
3451 g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3452 BTBLOCK_INVAL) {
3453 bt_block_changed = 0;
3454 GLOB_FTL_BT_Garbage_Collection();
3455 }
3456
3457 bt_block_changed = 0;
3458 nand_dbg_print(NAND_DBG_DEBUG,
3459 "Block Table Counter is %u Block %u\n",
3460 bt_flag, (unsigned int)Block);
3461 }
3462
3463 memset(tempBuf, 0, 3);
3464 tempBuf[3] = bt_flag;
3465 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3466 DeviceInfo.wPageDataSize - 4, 0);
3467 memset(&tempBuf[wBytesCopied + 4], 0xff,
3468 DeviceInfo.wPageSize - (wBytesCopied + 4));
3469 FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3470 bt_flag);
3471
3472#if CMD_DMA
3473 memcpy(g_pNextBlockTable, tempBuf,
3474 DeviceInfo.wPageSize * sizeof(u8));
3475 nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3476 "Block %u Page %u\n", (unsigned int)Block, Page);
3477 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3478 Block, Page, 1,
3479 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3480 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3481 "%s, Line %d, Function: %s, "
3482 "new Bad Block %d generated!\n",
3483 __FILE__, __LINE__, __func__, Block);
3484 goto func_return;
3485 }
3486
3487 ftl_cmd_cnt++;
3488 g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3489#else
3490 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3491 nand_dbg_print(NAND_DBG_WARN,
3492 "NAND Program fail in %s, Line %d, Function: %s, "
3493 "new Bad Block %d generated!\n",
3494 __FILE__, __LINE__, __func__, Block);
3495 goto func_return;
3496 }
3497#endif
3498
3499 if (bt_pages > 1) {
3500 PageCount = bt_pages - 1;
3501 if (PageCount > 1) {
3502 wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3503 DeviceInfo.wPageDataSize * (PageCount - 1),
3504 wBytesCopied);
3505
3506#if CMD_DMA
3507 memcpy(g_pNextBlockTable, tempBuf,
3508 (PageCount - 1) * DeviceInfo.wPageDataSize);
3509 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3510 g_pNextBlockTable, Block, Page + 1,
3511 PageCount - 1)) {
3512 nand_dbg_print(NAND_DBG_WARN,
3513 "NAND Program fail in %s, Line %d, "
3514 "Function: %s, "
3515 "new Bad Block %d generated!\n",
3516 __FILE__, __LINE__, __func__,
3517 (int)Block);
3518 goto func_return;
3519 }
3520
3521 ftl_cmd_cnt++;
3522 g_pNextBlockTable += (PageCount - 1) *
3523 DeviceInfo.wPageDataSize * sizeof(u8);
3524#else
3525 if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3526 Block, Page + 1, PageCount - 1)) {
3527 nand_dbg_print(NAND_DBG_WARN,
3528 "NAND Program fail in %s, Line %d, "
3529 "Function: %s, "
3530 "new Bad Block %d generated!\n",
3531 __FILE__, __LINE__, __func__,
3532 (int)Block);
3533 goto func_return;
3534 }
3535#endif
3536 }
3537
3538 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3539 DeviceInfo.wPageDataSize, wBytesCopied);
3540 memset(&tempBuf[wBytesCopied], 0xff,
3541 DeviceInfo.wPageSize-wBytesCopied);
3542 FTL_Insert_Block_Table_Signature(
3543 &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3544#if CMD_DMA
3545 memcpy(g_pNextBlockTable, tempBuf,
3546 DeviceInfo.wPageSize * sizeof(u8));
3547 nand_dbg_print(NAND_DBG_DEBUG,
3548 "Writing the last Page of Block Table "
3549 "Block %u Page %u\n",
3550 (unsigned int)Block, Page + bt_pages - 1);
3551 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3552 g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3553 LLD_CMD_FLAG_MODE_CDMA |
3554 LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3555 nand_dbg_print(NAND_DBG_WARN,
3556 "NAND Program fail in %s, Line %d, "
3557 "Function: %s, new Bad Block %d generated!\n",
3558 __FILE__, __LINE__, __func__, Block);
3559 goto func_return;
3560 }
3561 ftl_cmd_cnt++;
3562#else
3563 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3564 Block, Page+bt_pages - 1, 1)) {
3565 nand_dbg_print(NAND_DBG_WARN,
3566 "NAND Program fail in %s, Line %d, "
3567 "Function: %s, "
3568 "new Bad Block %d generated!\n",
3569 __FILE__, __LINE__, __func__, Block);
3570 goto func_return;
3571 }
3572#endif
3573 }
3574
3575 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3576
3577func_return:
3578 return PASS;
3579}
3580
3581/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3582* Function: FTL_Replace_Block_Table
3583* Inputs: None
3584* Outputs: PASS=0 / FAIL=1
3585* Description: Get a new block to write block table
3586*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3587static u32 FTL_Replace_Block_Table(void)
3588{
3589 u32 blk;
3590 int gc;
3591
3592 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3593 __FILE__, __LINE__, __func__);
3594
3595 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3596
3597 if ((BAD_BLOCK == blk) && (PASS == gc)) {
3598 GLOB_FTL_Garbage_Collection();
3599 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3600 }
3601 if (BAD_BLOCK == blk)
3602 printk(KERN_ERR "%s, %s: There is no spare block. "
3603 "It should never happen\n",
3604 __FILE__, __func__);
3605
3606 nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
3607
3608 return blk;
3609}
3610
3611/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3612* Function: FTL_Replace_LWBlock
3613* Inputs: Block number
3614* Pointer to Garbage Collect flag
3615* Outputs:
3616* Description: Determine the least weared block by traversing
3617* block table
3618* Set Garbage collection to be called if number of spare
3619* block is less than Free Block Gate count
3620* Change Block table entry to map least worn block for current
3621* operation
3622*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3623static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
3624{
3625 u32 i;
3626 u32 *pbt = (u32 *)g_pBlockTable;
3627 u8 wLeastWornCounter = 0xFF;
3628 u32 wLeastWornIndex = BAD_BLOCK;
3629 u32 wSpareBlockNum = 0;
3630 u32 wDiscardBlockNum = 0;
3631
3632 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3633 __FILE__, __LINE__, __func__);
3634
3635 if (IS_SPARE_BLOCK(wBlockNum)) {
3636 *pGarbageCollect = FAIL;
3637 pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
3638#if CMD_DMA
3639 p_BTableChangesDelta =
3640 (struct BTableChangesDelta *)g_pBTDelta_Free;
3641 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3642 p_BTableChangesDelta->ftl_cmd_cnt =
3643 ftl_cmd_cnt;
3644 p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
3645 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
3646 p_BTableChangesDelta->ValidFields = 0x0C;
3647#endif
3648 return pbt[wBlockNum];
3649 }
3650
3651 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3652 if (IS_DISCARDED_BLOCK(i))
3653 wDiscardBlockNum++;
3654
3655 if (IS_SPARE_BLOCK(i)) {
3656 u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
3657 if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
3658 printk(KERN_ERR "FTL_Replace_LWBlock: "
3659 "This should never occur!\n");
3660 if (g_pWearCounter[wPhysicalIndex -
3661 DeviceInfo.wSpectraStartBlock] <
3662 wLeastWornCounter) {
3663 wLeastWornCounter =
3664 g_pWearCounter[wPhysicalIndex -
3665 DeviceInfo.wSpectraStartBlock];
3666 wLeastWornIndex = i;
3667 }
3668 wSpareBlockNum++;
3669 }
3670 }
3671
3672 nand_dbg_print(NAND_DBG_WARN,
3673 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
3674 (int)wLeastWornCounter);
3675
3676 if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
3677 (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
3678 *pGarbageCollect = PASS;
3679 else
3680 *pGarbageCollect = FAIL;
3681
3682 nand_dbg_print(NAND_DBG_DEBUG,
3683 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
3684 " Blocks %u\n",
3685 (unsigned int)wDiscardBlockNum,
3686 (unsigned int)wSpareBlockNum);
3687
3688 return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
3689}
3690
3691/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3692* Function: FTL_Replace_MWBlock
3693* Inputs: None
3694* Outputs: most worn spare block no./BAD_BLOCK
3695* Description: It finds most worn spare block.
3696*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3697static u32 FTL_Replace_MWBlock(void)
3698{
3699 u32 i;
3700 u32 *pbt = (u32 *)g_pBlockTable;
3701 u8 wMostWornCounter = 0;
3702 u32 wMostWornIndex = BAD_BLOCK;
3703 u32 wSpareBlockNum = 0;
3704
3705 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3706 __FILE__, __LINE__, __func__);
3707
3708 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3709 if (IS_SPARE_BLOCK(i)) {
3710 u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
3711 if (g_pWearCounter[wPhysicalIndex -
3712 DeviceInfo.wSpectraStartBlock] >
3713 wMostWornCounter) {
3714 wMostWornCounter =
3715 g_pWearCounter[wPhysicalIndex -
3716 DeviceInfo.wSpectraStartBlock];
3717 wMostWornIndex = wPhysicalIndex;
3718 }
3719 wSpareBlockNum++;
3720 }
3721 }
3722
3723 if (wSpareBlockNum <= 2)
3724 return BAD_BLOCK;
3725
3726 return wMostWornIndex;
3727}
3728
3729/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3730* Function: FTL_Replace_Block
3731* Inputs: Block Address
3732* Outputs: PASS=0 / FAIL=1
3733* Description: If block specified by blk_addr parameter is not free,
3734* replace it with the least worn block.
3735*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3736static int FTL_Replace_Block(u64 blk_addr)
3737{
3738 u32 current_blk = BLK_FROM_ADDR(blk_addr);
3739 u32 *pbt = (u32 *)g_pBlockTable;
3740 int wResult = PASS;
3741 int GarbageCollect = FAIL;
3742
3743 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3744 __FILE__, __LINE__, __func__);
3745
3746 if (IS_SPARE_BLOCK(current_blk)) {
3747 pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
3748#if CMD_DMA
3749 p_BTableChangesDelta =
3750 (struct BTableChangesDelta *)g_pBTDelta_Free;
3751 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3752 p_BTableChangesDelta->ftl_cmd_cnt =
3753 ftl_cmd_cnt;
3754 p_BTableChangesDelta->BT_Index = current_blk;
3755 p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
3756 p_BTableChangesDelta->ValidFields = 0x0C ;
3757#endif
3758 return wResult;
3759 }
3760
3761 FTL_Replace_LWBlock(current_blk, &GarbageCollect);
3762
3763 if (PASS == GarbageCollect)
3764 wResult = GLOB_FTL_Garbage_Collection();
3765
3766 return wResult;
3767}
3768
3769/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3770* Function: GLOB_FTL_Is_BadBlock
3771* Inputs: block number to test
3772* Outputs: PASS (block is BAD) / FAIL (block is not bad)
3773* Description: test if this block number is flagged as bad
3774*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3775int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
3776{
3777 u32 *pbt = (u32 *)g_pBlockTable;
3778
3779 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3780 __FILE__, __LINE__, __func__);
3781
3782 if (wBlockNum >= DeviceInfo.wSpectraStartBlock
3783 && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
3784 return PASS;
3785 else
3786 return FAIL;
3787}
3788
3789/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3790* Function: GLOB_FTL_Flush_Cache
3791* Inputs: none
3792* Outputs: PASS=0 / FAIL=1
3793* Description: flush all the cache blocks to flash
3794* if a cache block is not dirty, don't do anything with it
3795* else, write the block and update the block table
3796* Note: This function should be called at shutdown/power down.
3797* to write important data into device
3798*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3799int GLOB_FTL_Flush_Cache(void)
3800{
3801 int i, ret;
3802
3803 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3804 __FILE__, __LINE__, __func__);
3805
3806 for (i = 0; i < CACHE_ITEM_NUM; i++) {
3807 if (SET == Cache.array[i].changed) {
3808#if CMD_DMA
3809#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
3810 int_cache[ftl_cmd_cnt].item = i;
3811 int_cache[ftl_cmd_cnt].cache.address =
3812 Cache.array[i].address;
3813 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
3814#endif
3815#endif
3816 ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
3817 if (PASS == ret) {
3818 Cache.array[i].changed = CLEAR;
3819 } else {
3820 printk(KERN_ALERT "Failed when write back to L2 cache!\n");
3821 /* TODO - How to handle this? */
3822 }
3823 }
3824 }
3825
3826 flush_l2_cache();
3827
3828 return FTL_Write_Block_Table(FAIL);
3829}
3830
3831/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3832* Function: GLOB_FTL_Page_Read
3833* Inputs: pointer to data
3834* logical address of data (u64 is LBA * Bytes/Page)
3835* Outputs: PASS=0 / FAIL=1
3836* Description: reads a page of data into RAM from the cache
3837* if the data is not already in cache, read from flash to cache
3838*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3839int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
3840{
3841 u16 cache_item;
3842 int res = PASS;
3843
3844 nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
3845 "page_addr: %llu\n", logical_addr);
3846
3847 cache_item = FTL_Cache_If_Hit(logical_addr);
3848
3849 if (UNHIT_CACHE_ITEM == cache_item) {
3850 nand_dbg_print(NAND_DBG_DEBUG,
3851 "GLOB_FTL_Page_Read: Cache not hit\n");
3852 res = FTL_Cache_Write();
3853 if (ERR == FTL_Cache_Read(logical_addr))
3854 res = ERR;
3855 cache_item = Cache.LRU;
3856 }
3857
3858 FTL_Cache_Read_Page(data, logical_addr, cache_item);
3859
3860 return res;
3861}
3862
3863/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3864* Function: GLOB_FTL_Page_Write
3865* Inputs: pointer to data
3866* address of data (ADDRESSTYPE is LBA * Bytes/Page)
3867* Outputs: PASS=0 / FAIL=1
3868* Description: writes a page of data from RAM to the cache
3869* if the data is not already in cache, write back the
3870* least recently used block and read the addressed block
3871* from flash to cache
3872*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3873int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
3874{
3875 u16 cache_blk;
3876 u32 *pbt = (u32 *)g_pBlockTable;
3877 int wResult = PASS;
3878
3879 nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
3880 "dwPageAddr: %llu\n", dwPageAddr);
3881
3882 cache_blk = FTL_Cache_If_Hit(dwPageAddr);
3883
3884 if (UNHIT_CACHE_ITEM == cache_blk) {
3885 wResult = FTL_Cache_Write();
3886 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
3887 wResult = FTL_Replace_Block(dwPageAddr);
3888 pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
3889 if (wResult == FAIL)
3890 return FAIL;
3891 }
3892 if (ERR == FTL_Cache_Read(dwPageAddr))
3893 wResult = ERR;
3894 cache_blk = Cache.LRU;
3895 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3896 } else {
3897#if CMD_DMA
3898 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
3899 LLD_CMD_FLAG_ORDER_BEFORE_REST);
3900#else
3901 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3902#endif
3903 }
3904
3905 return wResult;
3906}
3907
3908/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3909* Function: GLOB_FTL_Block_Erase
3910* Inputs: address of block to erase (now in byte format, should change to
3911* block format)
3912* Outputs: PASS=0 / FAIL=1
3913* Description: erases the specified block
3914* increments the erase count
3915* If erase count reaches its upper limit,call function to
3916* do the ajustment as per the relative erase count values
3917*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3918int GLOB_FTL_Block_Erase(u64 blk_addr)
3919{
3920 int status;
3921 u32 BlkIdx;
3922
3923 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3924 __FILE__, __LINE__, __func__);
3925
3926 BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
3927
3928 if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
3929 printk(KERN_ERR "GLOB_FTL_Block_Erase: "
3930 "This should never occur\n");
3931 return FAIL;
3932 }
3933
3934#if CMD_DMA
3935 status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
3936 if (status == FAIL)
3937 nand_dbg_print(NAND_DBG_WARN,
3938 "NAND Program fail in %s, Line %d, "
3939 "Function: %s, new Bad Block %d generated!\n",
3940 __FILE__, __LINE__, __func__, BlkIdx);
3941#else
3942 status = GLOB_LLD_Erase_Block(BlkIdx);
3943 if (status == FAIL) {
3944 nand_dbg_print(NAND_DBG_WARN,
3945 "NAND Program fail in %s, Line %d, "
3946 "Function: %s, new Bad Block %d generated!\n",
3947 __FILE__, __LINE__, __func__, BlkIdx);
3948 return status;
3949 }
3950#endif
3951
3952 if (DeviceInfo.MLCDevice) {
3953 g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
3954 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
3955 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3956 FTL_Write_IN_Progress_Block_Table_Page();
3957 }
3958 }
3959
3960 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
3961
3962#if CMD_DMA
3963 p_BTableChangesDelta =
3964 (struct BTableChangesDelta *)g_pBTDelta_Free;
3965 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3966 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3967 p_BTableChangesDelta->WC_Index =
3968 BlkIdx - DeviceInfo.wSpectraStartBlock;
3969 p_BTableChangesDelta->WC_Entry_Value =
3970 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
3971 p_BTableChangesDelta->ValidFields = 0x30;
3972
3973 if (DeviceInfo.MLCDevice) {
3974 p_BTableChangesDelta =
3975 (struct BTableChangesDelta *)g_pBTDelta_Free;
3976 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3977 p_BTableChangesDelta->ftl_cmd_cnt =
3978 ftl_cmd_cnt;
3979 p_BTableChangesDelta->RC_Index =
3980 BlkIdx - DeviceInfo.wSpectraStartBlock;
3981 p_BTableChangesDelta->RC_Entry_Value =
3982 g_pReadCounter[BlkIdx -
3983 DeviceInfo.wSpectraStartBlock];
3984 p_BTableChangesDelta->ValidFields = 0xC0;
3985 }
3986
3987 ftl_cmd_cnt++;
3988#endif
3989
3990 if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
3991 FTL_Adjust_Relative_Erase_Count(BlkIdx);
3992
3993 return status;
3994}
3995
3996
3997/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3998* Function: FTL_Adjust_Relative_Erase_Count
3999* Inputs: index to block that was just incremented and is at the max
4000* Outputs: PASS=0 / FAIL=1
4001* Description: If any erase counts at MAX, adjusts erase count of every
4002* block by substracting least worn
4003* counter from counter value of every entry in wear table
4004*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4005static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
4006{
4007 u8 wLeastWornCounter = MAX_BYTE_VALUE;
4008 u8 wWearCounter;
4009 u32 i, wWearIndex;
4010 u32 *pbt = (u32 *)g_pBlockTable;
4011 int wResult = PASS;
4012
4013 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4014 __FILE__, __LINE__, __func__);
4015
4016 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4017 if (IS_BAD_BLOCK(i))
4018 continue;
4019 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4020
4021 if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
4022 printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
4023 "This should never occur\n");
4024 wWearCounter = g_pWearCounter[wWearIndex -
4025 DeviceInfo.wSpectraStartBlock];
4026 if (wWearCounter < wLeastWornCounter)
4027 wLeastWornCounter = wWearCounter;
4028 }
4029
4030 if (wLeastWornCounter == 0) {
4031 nand_dbg_print(NAND_DBG_WARN,
4032 "Adjusting Wear Levelling Counters: Special Case\n");
4033 g_pWearCounter[Index_of_MAX -
4034 DeviceInfo.wSpectraStartBlock]--;
4035#if CMD_DMA
4036 p_BTableChangesDelta =
4037 (struct BTableChangesDelta *)g_pBTDelta_Free;
4038 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4039 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4040 p_BTableChangesDelta->WC_Index =
4041 Index_of_MAX - DeviceInfo.wSpectraStartBlock;
4042 p_BTableChangesDelta->WC_Entry_Value =
4043 g_pWearCounter[Index_of_MAX -
4044 DeviceInfo.wSpectraStartBlock];
4045 p_BTableChangesDelta->ValidFields = 0x30;
4046#endif
4047 FTL_Static_Wear_Leveling();
4048 } else {
4049 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
4050 if (!IS_BAD_BLOCK(i)) {
4051 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4052 g_pWearCounter[wWearIndex -
4053 DeviceInfo.wSpectraStartBlock] =
4054 (u8)(g_pWearCounter
4055 [wWearIndex -
4056 DeviceInfo.wSpectraStartBlock] -
4057 wLeastWornCounter);
4058#if CMD_DMA
4059 p_BTableChangesDelta =
4060 (struct BTableChangesDelta *)g_pBTDelta_Free;
4061 g_pBTDelta_Free +=
4062 sizeof(struct BTableChangesDelta);
4063
4064 p_BTableChangesDelta->ftl_cmd_cnt =
4065 ftl_cmd_cnt;
4066 p_BTableChangesDelta->WC_Index = wWearIndex -
4067 DeviceInfo.wSpectraStartBlock;
4068 p_BTableChangesDelta->WC_Entry_Value =
4069 g_pWearCounter[wWearIndex -
4070 DeviceInfo.wSpectraStartBlock];
4071 p_BTableChangesDelta->ValidFields = 0x30;
4072#endif
4073 }
4074 }
4075
4076 return wResult;
4077}
4078
4079/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4080* Function: FTL_Write_IN_Progress_Block_Table_Page
4081* Inputs: None
4082* Outputs: None
4083* Description: It writes in-progress flag page to the page next to
4084* block table
4085***********************************************************************/
4086static int FTL_Write_IN_Progress_Block_Table_Page(void)
4087{
4088 int wResult = PASS;
4089 u16 bt_pages;
4090 u16 dwIPFPageAddr;
4091#if CMD_DMA
4092#else
4093 u32 *pbt = (u32 *)g_pBlockTable;
4094 u32 wTempBlockTableIndex;
4095#endif
4096
4097 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4098 __FILE__, __LINE__, __func__);
4099
4100 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
4101
4102 dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
4103
4104 nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
4105 "Block %d Page %d\n",
4106 g_wBlockTableIndex, dwIPFPageAddr);
4107
4108#if CMD_DMA
4109 wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
4110 g_wBlockTableIndex, dwIPFPageAddr, 1,
4111 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
4112 if (wResult == FAIL) {
4113 nand_dbg_print(NAND_DBG_WARN,
4114 "NAND Program fail in %s, Line %d, "
4115 "Function: %s, new Bad Block %d generated!\n",
4116 __FILE__, __LINE__, __func__,
4117 g_wBlockTableIndex);
4118 }
4119 g_wBlockTableOffset = dwIPFPageAddr + 1;
4120 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
4121 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4122 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4123 p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
4124 p_BTableChangesDelta->ValidFields = 0x01;
4125 ftl_cmd_cnt++;
4126#else
4127 wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
4128 g_wBlockTableIndex, dwIPFPageAddr, 1);
4129 if (wResult == FAIL) {
4130 nand_dbg_print(NAND_DBG_WARN,
4131 "NAND Program fail in %s, Line %d, "
4132 "Function: %s, new Bad Block %d generated!\n",
4133 __FILE__, __LINE__, __func__,
4134 (int)g_wBlockTableIndex);
4135 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
4136 wTempBlockTableIndex = FTL_Replace_Block_Table();
4137 bt_block_changed = 1;
4138 if (BAD_BLOCK == wTempBlockTableIndex)
4139 return ERR;
4140 g_wBlockTableIndex = wTempBlockTableIndex;
4141 g_wBlockTableOffset = 0;
4142 /* Block table tag is '00'. Means it's used one */
4143 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
4144 return FAIL;
4145 }
4146 g_wBlockTableOffset = dwIPFPageAddr + 1;
4147#endif
4148 return wResult;
4149}
4150
4151/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4152* Function: FTL_Read_Disturbance
4153* Inputs: block address
4154* Outputs: PASS=0 / FAIL=1
4155* Description: used to handle read disturbance. Data in block that
4156* reaches its read limit is moved to new block
4157*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4158int FTL_Read_Disturbance(u32 blk_addr)
4159{
4160 int wResult = FAIL;
4161 u32 *pbt = (u32 *) g_pBlockTable;
4162 u32 dwOldBlockAddr = blk_addr;
4163 u32 wBlockNum;
4164 u32 i;
4165 u32 wLeastReadCounter = 0xFFFF;
4166 u32 wLeastReadIndex = BAD_BLOCK;
4167 u32 wSpareBlockNum = 0;
4168 u32 wTempNode;
4169 u32 wReplacedNode;
4170 u8 *g_pTempBuf;
4171
4172 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
4173 __FILE__, __LINE__, __func__);
4174
4175#if CMD_DMA
4176 g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
4177 cp_back_buf_idx++;
4178 if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
4179 printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
4180 "Maybe too many pending commands in your CDMA chain.\n");
4181 return FAIL;
4182 }
4183#else
4184 g_pTempBuf = tmp_buf_read_disturbance;
4185#endif
4186
4187 wBlockNum = FTL_Get_Block_Index(blk_addr);
4188
4189 do {
4190 /* This is a bug.Here 'i' should be logical block number
4191 * and start from 1 (0 is reserved for block table).
4192 * Have fixed it. - Yunpeng 2008. 12. 19
4193 */
4194 for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
4195 if (IS_SPARE_BLOCK(i)) {
4196 u32 wPhysicalIndex =
4197 (u32)((~SPARE_BLOCK) & pbt[i]);
4198 if (g_pReadCounter[wPhysicalIndex -
4199 DeviceInfo.wSpectraStartBlock] <
4200 wLeastReadCounter) {
4201 wLeastReadCounter =
4202 g_pReadCounter[wPhysicalIndex -
4203 DeviceInfo.wSpectraStartBlock];
4204 wLeastReadIndex = i;
4205 }
4206 wSpareBlockNum++;
4207 }
4208 }
4209
4210 if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4211 wResult = GLOB_FTL_Garbage_Collection();
4212 if (PASS == wResult)
4213 continue;
4214 else
4215 break;
4216 } else {
4217 wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4218 wReplacedNode = (u32)((~SPARE_BLOCK) &
4219 pbt[wLeastReadIndex]);
4220#if CMD_DMA
4221 pbt[wBlockNum] = wReplacedNode;
4222 pbt[wLeastReadIndex] = wTempNode;
4223 p_BTableChangesDelta =
4224 (struct BTableChangesDelta *)g_pBTDelta_Free;
4225 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4226
4227 p_BTableChangesDelta->ftl_cmd_cnt =
4228 ftl_cmd_cnt;
4229 p_BTableChangesDelta->BT_Index = wBlockNum;
4230 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4231 p_BTableChangesDelta->ValidFields = 0x0C;
4232
4233 p_BTableChangesDelta =
4234 (struct BTableChangesDelta *)g_pBTDelta_Free;
4235 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4236
4237 p_BTableChangesDelta->ftl_cmd_cnt =
4238 ftl_cmd_cnt;
4239 p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4240 p_BTableChangesDelta->BT_Entry_Value =
4241 pbt[wLeastReadIndex];
4242 p_BTableChangesDelta->ValidFields = 0x0C;
4243
4244 wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4245 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4246 LLD_CMD_FLAG_MODE_CDMA);
4247 if (wResult == FAIL)
4248 return wResult;
4249
4250 ftl_cmd_cnt++;
4251
4252 if (wResult != FAIL) {
4253 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4254 g_pTempBuf, pbt[wBlockNum], 0,
4255 DeviceInfo.wPagesPerBlock)) {
4256 nand_dbg_print(NAND_DBG_WARN,
4257 "NAND Program fail in "
4258 "%s, Line %d, Function: %s, "
4259 "new Bad Block %d "
4260 "generated!\n",
4261 __FILE__, __LINE__, __func__,
4262 (int)pbt[wBlockNum]);
4263 wResult = FAIL;
4264 MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4265 }
4266 ftl_cmd_cnt++;
4267 }
4268#else
4269 wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4270 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4271 if (wResult == FAIL)
4272 return wResult;
4273
4274 if (wResult != FAIL) {
4275 /* This is a bug. At this time, pbt[wBlockNum]
4276 is still the physical address of
4277 discard block, and should not be write.
4278 Have fixed it as below.
4279 -- Yunpeng 2008.12.19
4280 */
4281 wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4282 wReplacedNode, 0,
4283 DeviceInfo.wPagesPerBlock);
4284 if (wResult == FAIL) {
4285 nand_dbg_print(NAND_DBG_WARN,
4286 "NAND Program fail in "
4287 "%s, Line %d, Function: %s, "
4288 "new Bad Block %d "
4289 "generated!\n",
4290 __FILE__, __LINE__, __func__,
4291 (int)wReplacedNode);
4292 MARK_BLOCK_AS_BAD(wReplacedNode);
4293 } else {
4294 pbt[wBlockNum] = wReplacedNode;
4295 pbt[wLeastReadIndex] = wTempNode;
4296 }
4297 }
4298
4299 if ((wResult == PASS) && (g_cBlockTableStatus !=
4300 IN_PROGRESS_BLOCK_TABLE)) {
4301 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4302 FTL_Write_IN_Progress_Block_Table_Page();
4303 }
4304#endif
4305 }
4306 } while (wResult != PASS)
4307 ;
4308
4309#if CMD_DMA
4310 /* ... */
4311#endif
4312
4313 return wResult;
4314}
4315