]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/mtd/nand/pxa3xx_nand.c
[MTD] [NAND] pxa3xx_nand: moved nand definitions into shared platform header
[net-next-2.6.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/clk.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h>
a1c06ee1
DW
21#include <linux/io.h>
22#include <linux/irq.h>
fe69af00 23#include <asm/dma.h>
24
a09e64fb
RK
25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h>
fe69af00 27
28#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
29
30/* registers and bit definitions */
31#define NDCR (0x00) /* Control register */
32#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
33#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
34#define NDSR (0x14) /* Status Register */
35#define NDPCR (0x18) /* Page Count Register */
36#define NDBDR0 (0x1C) /* Bad Block Register 0 */
37#define NDBDR1 (0x20) /* Bad Block Register 1 */
38#define NDDB (0x40) /* Data Buffer */
39#define NDCB0 (0x48) /* Command Buffer0 */
40#define NDCB1 (0x4C) /* Command Buffer1 */
41#define NDCB2 (0x50) /* Command Buffer2 */
42
43#define NDCR_SPARE_EN (0x1 << 31)
44#define NDCR_ECC_EN (0x1 << 30)
45#define NDCR_DMA_EN (0x1 << 29)
46#define NDCR_ND_RUN (0x1 << 28)
47#define NDCR_DWIDTH_C (0x1 << 27)
48#define NDCR_DWIDTH_M (0x1 << 26)
49#define NDCR_PAGE_SZ (0x1 << 24)
50#define NDCR_NCSX (0x1 << 23)
51#define NDCR_ND_MODE (0x3 << 21)
52#define NDCR_NAND_MODE (0x0)
53#define NDCR_CLR_PG_CNT (0x1 << 20)
54#define NDCR_CLR_ECC (0x1 << 19)
55#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
56#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
57
58#define NDCR_RA_START (0x1 << 15)
59#define NDCR_PG_PER_BLK (0x1 << 14)
60#define NDCR_ND_ARB_EN (0x1 << 12)
61
62#define NDSR_MASK (0xfff)
63#define NDSR_RDY (0x1 << 11)
64#define NDSR_CS0_PAGED (0x1 << 10)
65#define NDSR_CS1_PAGED (0x1 << 9)
66#define NDSR_CS0_CMDD (0x1 << 8)
67#define NDSR_CS1_CMDD (0x1 << 7)
68#define NDSR_CS0_BBD (0x1 << 6)
69#define NDSR_CS1_BBD (0x1 << 5)
70#define NDSR_DBERR (0x1 << 4)
71#define NDSR_SBERR (0x1 << 3)
72#define NDSR_WRDREQ (0x1 << 2)
73#define NDSR_RDDREQ (0x1 << 1)
74#define NDSR_WRCMDREQ (0x1)
75
76#define NDCB0_AUTO_RS (0x1 << 25)
77#define NDCB0_CSEL (0x1 << 24)
78#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
79#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
80#define NDCB0_NC (0x1 << 20)
81#define NDCB0_DBC (0x1 << 19)
82#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
83#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
84#define NDCB0_CMD2_MASK (0xff << 8)
85#define NDCB0_CMD1_MASK (0xff)
86#define NDCB0_ADDR_CYC_SHIFT (16)
87
88/* dma-able I/O address for the NAND data and commands */
89#define NDCB0_DMA_ADDR (0x43100048)
90#define NDDB_DMA_ADDR (0x43100040)
91
92/* macros for registers read/write */
93#define nand_writel(info, off, val) \
94 __raw_writel((val), (info)->mmio_base + (off))
95
96#define nand_readl(info, off) \
97 __raw_readl((info)->mmio_base + (off))
98
99/* error code and state */
100enum {
101 ERR_NONE = 0,
102 ERR_DMABUSERR = -1,
103 ERR_SENDCMD = -2,
104 ERR_DBERR = -3,
105 ERR_BBERR = -4,
106};
107
108enum {
109 STATE_READY = 0,
110 STATE_CMD_HANDLE,
111 STATE_DMA_READING,
112 STATE_DMA_WRITING,
113 STATE_DMA_DONE,
114 STATE_PIO_READING,
115 STATE_PIO_WRITING,
116};
117
fe69af00 118struct pxa3xx_nand_info {
119 struct nand_chip nand_chip;
120
121 struct platform_device *pdev;
122 struct pxa3xx_nand_flash *flash_info;
123
124 struct clk *clk;
125 void __iomem *mmio_base;
126
127 unsigned int buf_start;
128 unsigned int buf_count;
129
130 /* DMA information */
131 int drcmr_dat;
132 int drcmr_cmd;
133
134 unsigned char *data_buff;
135 dma_addr_t data_buff_phys;
136 size_t data_buff_size;
137 int data_dma_ch;
138 struct pxa_dma_desc *data_desc;
139 dma_addr_t data_desc_addr;
140
141 uint32_t reg_ndcr;
142
143 /* saved column/page_addr during CMD_SEQIN */
144 int seqin_column;
145 int seqin_page_addr;
146
147 /* relate to the command */
148 unsigned int state;
149
150 int use_ecc; /* use HW ECC ? */
151 int use_dma; /* use DMA ? */
152
153 size_t data_size; /* data size in FIFO */
154 int retcode;
155 struct completion cmd_complete;
156
157 /* generated NDCBx register values */
158 uint32_t ndcb0;
159 uint32_t ndcb1;
160 uint32_t ndcb2;
161};
162
163static int use_dma = 1;
164module_param(use_dma, bool, 0444);
165MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
166
167static struct pxa3xx_nand_cmdset smallpage_cmdset = {
168 .read1 = 0x0000,
169 .read2 = 0x0050,
170 .program = 0x1080,
171 .read_status = 0x0070,
172 .read_id = 0x0090,
173 .erase = 0xD060,
174 .reset = 0x00FF,
175 .lock = 0x002A,
176 .unlock = 0x2423,
177 .lock_status = 0x007A,
178};
179
180static struct pxa3xx_nand_cmdset largepage_cmdset = {
181 .read1 = 0x3000,
182 .read2 = 0x0050,
183 .program = 0x1080,
184 .read_status = 0x0070,
185 .read_id = 0x0090,
186 .erase = 0xD060,
187 .reset = 0x00FF,
188 .lock = 0x002A,
189 .unlock = 0x2423,
190 .lock_status = 0x007A,
191};
192
193static struct pxa3xx_nand_timing samsung512MbX16_timing = {
194 .tCH = 10,
195 .tCS = 0,
196 .tWH = 20,
197 .tWP = 40,
198 .tRH = 30,
199 .tRP = 40,
200 .tR = 11123,
201 .tWHR = 110,
202 .tAR = 10,
203};
204
205static struct pxa3xx_nand_flash samsung512MbX16 = {
206 .timing = &samsung512MbX16_timing,
207 .cmdset = &smallpage_cmdset,
208 .page_per_block = 32,
209 .page_size = 512,
210 .flash_width = 16,
211 .dfc_width = 16,
212 .num_blocks = 4096,
213 .chip_id = 0x46ec,
214};
215
216static struct pxa3xx_nand_timing micron_timing = {
217 .tCH = 10,
218 .tCS = 25,
219 .tWH = 15,
220 .tWP = 25,
221 .tRH = 15,
222 .tRP = 25,
223 .tR = 25000,
224 .tWHR = 60,
225 .tAR = 10,
226};
227
228static struct pxa3xx_nand_flash micron1GbX8 = {
229 .timing = &micron_timing,
230 .cmdset = &largepage_cmdset,
231 .page_per_block = 64,
232 .page_size = 2048,
233 .flash_width = 8,
234 .dfc_width = 8,
235 .num_blocks = 1024,
236 .chip_id = 0xa12c,
237};
238
239static struct pxa3xx_nand_flash micron1GbX16 = {
240 .timing = &micron_timing,
241 .cmdset = &largepage_cmdset,
242 .page_per_block = 64,
243 .page_size = 2048,
244 .flash_width = 16,
245 .dfc_width = 16,
246 .num_blocks = 1024,
247 .chip_id = 0xb12c,
248};
249
4262bd29
SL
250static struct pxa3xx_nand_timing stm2GbX16_timing = {
251 .tCH = 10,
252 .tCS = 35,
253 .tWH = 15,
254 .tWP = 25,
255 .tRH = 15,
256 .tRP = 25,
257 .tR = 25000,
258 .tWHR = 60,
259 .tAR = 10,
260};
261
262static struct pxa3xx_nand_flash stm2GbX16 = {
263 .timing = &stm2GbX16_timing,
264 .page_per_block = 64,
265 .page_size = 2048,
266 .flash_width = 16,
267 .dfc_width = 16,
268 .num_blocks = 2048,
269 .chip_id = 0xba20,
270};
271
fe69af00 272static struct pxa3xx_nand_flash *builtin_flash_types[] = {
273 &samsung512MbX16,
274 &micron1GbX8,
275 &micron1GbX16,
4262bd29 276 &stm2GbX16,
fe69af00 277};
278
279#define NDTR0_tCH(c) (min((c), 7) << 19)
280#define NDTR0_tCS(c) (min((c), 7) << 16)
281#define NDTR0_tWH(c) (min((c), 7) << 11)
282#define NDTR0_tWP(c) (min((c), 7) << 8)
283#define NDTR0_tRH(c) (min((c), 7) << 3)
284#define NDTR0_tRP(c) (min((c), 7) << 0)
285
286#define NDTR1_tR(c) (min((c), 65535) << 16)
287#define NDTR1_tWHR(c) (min((c), 15) << 4)
288#define NDTR1_tAR(c) (min((c), 15) << 0)
289
290/* convert nano-seconds to nand flash controller clock cycles */
291#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1)
292
293static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
294 struct pxa3xx_nand_timing *t)
295{
296 unsigned long nand_clk = clk_get_rate(info->clk);
297 uint32_t ndtr0, ndtr1;
298
299 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
300 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
301 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
302 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
303 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
304 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
305
306 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
307 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
308 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
309
310 nand_writel(info, NDTR0CS0, ndtr0);
311 nand_writel(info, NDTR1CS0, ndtr1);
312}
313
314#define WAIT_EVENT_TIMEOUT 10
315
316static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
317{
318 int timeout = WAIT_EVENT_TIMEOUT;
319 uint32_t ndsr;
320
321 while (timeout--) {
322 ndsr = nand_readl(info, NDSR) & NDSR_MASK;
323 if (ndsr & event) {
324 nand_writel(info, NDSR, ndsr);
325 return 0;
326 }
327 udelay(10);
328 }
329
330 return -ETIMEDOUT;
331}
332
333static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
334 uint16_t cmd, int column, int page_addr)
335{
336 struct pxa3xx_nand_flash *f = info->flash_info;
337 struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
338
339 /* calculate data size */
340 switch (f->page_size) {
341 case 2048:
342 info->data_size = (info->use_ecc) ? 2088 : 2112;
343 break;
344 case 512:
345 info->data_size = (info->use_ecc) ? 520 : 528;
346 break;
347 default:
348 return -EINVAL;
349 }
350
351 /* generate values for NDCBx registers */
352 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
353 info->ndcb1 = 0;
354 info->ndcb2 = 0;
355 info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles);
356
357 if (f->col_addr_cycles == 2) {
358 /* large block, 2 cycles for column address
359 * row address starts from 3rd cycle
360 */
361 info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
362 if (f->row_addr_cycles == 3)
363 info->ndcb2 = (page_addr >> 16) & 0xff;
364 } else
365 /* small block, 1 cycles for column address
366 * row address starts from 2nd cycle
367 */
368 info->ndcb1 = (page_addr << 8) | (column & 0xff);
369
370 if (cmd == cmdset->program)
371 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
372
373 return 0;
374}
375
376static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
377 uint16_t cmd, int page_addr)
378{
379 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
380 info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
381 info->ndcb1 = page_addr;
382 info->ndcb2 = 0;
383 return 0;
384}
385
386static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
387{
388 struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
389
390 info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
391 info->ndcb1 = 0;
392 info->ndcb2 = 0;
393
394 if (cmd == cmdset->read_id) {
395 info->ndcb0 |= NDCB0_CMD_TYPE(3);
396 info->data_size = 8;
397 } else if (cmd == cmdset->read_status) {
398 info->ndcb0 |= NDCB0_CMD_TYPE(4);
399 info->data_size = 8;
400 } else if (cmd == cmdset->reset || cmd == cmdset->lock ||
401 cmd == cmdset->unlock) {
402 info->ndcb0 |= NDCB0_CMD_TYPE(5);
403 } else
404 return -EINVAL;
405
406 return 0;
407}
408
409static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
410{
411 uint32_t ndcr;
412
413 ndcr = nand_readl(info, NDCR);
414 nand_writel(info, NDCR, ndcr & ~int_mask);
415}
416
417static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
418{
419 uint32_t ndcr;
420
421 ndcr = nand_readl(info, NDCR);
422 nand_writel(info, NDCR, ndcr | int_mask);
423}
424
425/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
426 * otherwise, it does not work
427 */
428static int write_cmd(struct pxa3xx_nand_info *info)
429{
430 uint32_t ndcr;
431
432 /* clear status bits and run */
433 nand_writel(info, NDSR, NDSR_MASK);
434
435 ndcr = info->reg_ndcr;
436
437 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
438 ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
439 ndcr |= NDCR_ND_RUN;
440
441 nand_writel(info, NDCR, ndcr);
442
443 if (wait_for_event(info, NDSR_WRCMDREQ)) {
444 printk(KERN_ERR "timed out writing command\n");
445 return -ETIMEDOUT;
446 }
447
448 nand_writel(info, NDCB0, info->ndcb0);
449 nand_writel(info, NDCB0, info->ndcb1);
450 nand_writel(info, NDCB0, info->ndcb2);
451 return 0;
452}
453
454static int handle_data_pio(struct pxa3xx_nand_info *info)
455{
456 int ret, timeout = CHIP_DELAY_TIMEOUT;
457
458 switch (info->state) {
459 case STATE_PIO_WRITING:
460 __raw_writesl(info->mmio_base + NDDB, info->data_buff,
461 info->data_size << 2);
462
463 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
464
465 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
466 if (!ret) {
467 printk(KERN_ERR "program command time out\n");
468 return -1;
469 }
470 break;
471 case STATE_PIO_READING:
472 __raw_readsl(info->mmio_base + NDDB, info->data_buff,
473 info->data_size << 2);
474 break;
475 default:
a1c06ee1 476 printk(KERN_ERR "%s: invalid state %d\n", __func__,
fe69af00 477 info->state);
478 return -EINVAL;
479 }
480
481 info->state = STATE_READY;
482 return 0;
483}
484
485static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
486{
487 struct pxa_dma_desc *desc = info->data_desc;
488 int dma_len = ALIGN(info->data_size, 32);
489
490 desc->ddadr = DDADR_STOP;
491 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
492
493 if (dir_out) {
494 desc->dsadr = info->data_buff_phys;
495 desc->dtadr = NDDB_DMA_ADDR;
496 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
497 } else {
498 desc->dtadr = info->data_buff_phys;
499 desc->dsadr = NDDB_DMA_ADDR;
500 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
501 }
502
503 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
504 DDADR(info->data_dma_ch) = info->data_desc_addr;
505 DCSR(info->data_dma_ch) |= DCSR_RUN;
506}
507
508static void pxa3xx_nand_data_dma_irq(int channel, void *data)
509{
510 struct pxa3xx_nand_info *info = data;
511 uint32_t dcsr;
512
513 dcsr = DCSR(channel);
514 DCSR(channel) = dcsr;
515
516 if (dcsr & DCSR_BUSERR) {
517 info->retcode = ERR_DMABUSERR;
518 complete(&info->cmd_complete);
519 }
520
521 if (info->state == STATE_DMA_WRITING) {
522 info->state = STATE_DMA_DONE;
523 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
524 } else {
525 info->state = STATE_READY;
526 complete(&info->cmd_complete);
527 }
528}
529
530static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
531{
532 struct pxa3xx_nand_info *info = devid;
533 unsigned int status;
534
535 status = nand_readl(info, NDSR);
536
537 if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
538 if (status & NDSR_DBERR)
539 info->retcode = ERR_DBERR;
540
541 disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
542
543 if (info->use_dma) {
544 info->state = STATE_DMA_READING;
545 start_data_dma(info, 0);
546 } else {
547 info->state = STATE_PIO_READING;
548 complete(&info->cmd_complete);
549 }
550 } else if (status & NDSR_WRDREQ) {
551 disable_int(info, NDSR_WRDREQ);
552 if (info->use_dma) {
553 info->state = STATE_DMA_WRITING;
554 start_data_dma(info, 1);
555 } else {
556 info->state = STATE_PIO_WRITING;
557 complete(&info->cmd_complete);
558 }
559 } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
560 if (status & NDSR_CS0_BBD)
561 info->retcode = ERR_BBERR;
562
563 disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
564 info->state = STATE_READY;
565 complete(&info->cmd_complete);
566 }
567 nand_writel(info, NDSR, status);
568 return IRQ_HANDLED;
569}
570
571static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
572{
573 uint32_t ndcr;
574 int ret, timeout = CHIP_DELAY_TIMEOUT;
575
576 if (write_cmd(info)) {
577 info->retcode = ERR_SENDCMD;
578 goto fail_stop;
579 }
580
581 info->state = STATE_CMD_HANDLE;
582
583 enable_int(info, event);
584
585 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
586 if (!ret) {
587 printk(KERN_ERR "command execution timed out\n");
588 info->retcode = ERR_SENDCMD;
589 goto fail_stop;
590 }
591
592 if (info->use_dma == 0 && info->data_size > 0)
593 if (handle_data_pio(info))
594 goto fail_stop;
595
596 return 0;
597
598fail_stop:
599 ndcr = nand_readl(info, NDCR);
600 nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
601 udelay(10);
602 return -ETIMEDOUT;
603}
604
605static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
606{
607 struct pxa3xx_nand_info *info = mtd->priv;
608 return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
609}
610
611static inline int is_buf_blank(uint8_t *buf, size_t len)
612{
613 for (; len > 0; len--)
614 if (*buf++ != 0xff)
615 return 0;
616 return 1;
617}
618
619static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
a1c06ee1 620 int column, int page_addr)
fe69af00 621{
622 struct pxa3xx_nand_info *info = mtd->priv;
a1c06ee1 623 struct pxa3xx_nand_flash *flash_info = info->flash_info;
fe69af00 624 struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
625 int ret;
626
627 info->use_dma = (use_dma) ? 1 : 0;
628 info->use_ecc = 0;
629 info->data_size = 0;
630 info->state = STATE_READY;
631
632 init_completion(&info->cmd_complete);
633
634 switch (command) {
635 case NAND_CMD_READOOB:
636 /* disable HW ECC to get all the OOB data */
637 info->buf_count = mtd->writesize + mtd->oobsize;
638 info->buf_start = mtd->writesize + column;
639
640 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
641 break;
642
643 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
644
645 /* We only are OOB, so if the data has error, does not matter */
646 if (info->retcode == ERR_DBERR)
647 info->retcode = ERR_NONE;
648 break;
649
650 case NAND_CMD_READ0:
651 info->use_ecc = 1;
652 info->retcode = ERR_NONE;
653 info->buf_start = column;
654 info->buf_count = mtd->writesize + mtd->oobsize;
655 memset(info->data_buff, 0xFF, info->buf_count);
656
657 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
658 break;
659
660 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
661
662 if (info->retcode == ERR_DBERR) {
663 /* for blank page (all 0xff), HW will calculate its ECC as
664 * 0, which is different from the ECC information within
665 * OOB, ignore such double bit errors
666 */
667 if (is_buf_blank(info->data_buff, mtd->writesize))
668 info->retcode = ERR_NONE;
669 }
670 break;
671 case NAND_CMD_SEQIN:
672 info->buf_start = column;
673 info->buf_count = mtd->writesize + mtd->oobsize;
674 memset(info->data_buff, 0xff, info->buf_count);
675
676 /* save column/page_addr for next CMD_PAGEPROG */
677 info->seqin_column = column;
678 info->seqin_page_addr = page_addr;
679 break;
680 case NAND_CMD_PAGEPROG:
681 info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
682
683 if (prepare_read_prog_cmd(info, cmdset->program,
684 info->seqin_column, info->seqin_page_addr))
685 break;
686
687 pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
688 break;
689 case NAND_CMD_ERASE1:
690 if (prepare_erase_cmd(info, cmdset->erase, page_addr))
691 break;
692
693 pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
694 break;
695 case NAND_CMD_ERASE2:
696 break;
697 case NAND_CMD_READID:
698 case NAND_CMD_STATUS:
699 info->use_dma = 0; /* force PIO read */
700 info->buf_start = 0;
701 info->buf_count = (command == NAND_CMD_READID) ?
702 flash_info->read_id_bytes : 1;
703
704 if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
705 cmdset->read_id : cmdset->read_status))
706 break;
707
708 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
709 break;
710 case NAND_CMD_RESET:
711 if (prepare_other_cmd(info, cmdset->reset))
712 break;
713
714 ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
715 if (ret == 0) {
716 int timeout = 2;
717 uint32_t ndcr;
718
719 while (timeout--) {
720 if (nand_readl(info, NDSR) & NDSR_RDY)
721 break;
722 msleep(10);
723 }
724
725 ndcr = nand_readl(info, NDCR);
726 nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
727 }
728 break;
729 default:
730 printk(KERN_ERR "non-supported command.\n");
731 break;
732 }
733
734 if (info->retcode == ERR_DBERR) {
735 printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
736 info->retcode = ERR_NONE;
737 }
738}
739
740static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
741{
742 struct pxa3xx_nand_info *info = mtd->priv;
743 char retval = 0xFF;
744
745 if (info->buf_start < info->buf_count)
746 /* Has just send a new command? */
747 retval = info->data_buff[info->buf_start++];
748
749 return retval;
750}
751
752static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
753{
754 struct pxa3xx_nand_info *info = mtd->priv;
755 u16 retval = 0xFFFF;
756
757 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
758 retval = *((u16 *)(info->data_buff+info->buf_start));
759 info->buf_start += 2;
760 }
761 return retval;
762}
763
764static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
765{
766 struct pxa3xx_nand_info *info = mtd->priv;
767 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
768
769 memcpy(buf, info->data_buff + info->buf_start, real_len);
770 info->buf_start += real_len;
771}
772
773static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
774 const uint8_t *buf, int len)
775{
776 struct pxa3xx_nand_info *info = mtd->priv;
777 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
778
779 memcpy(info->data_buff + info->buf_start, buf, real_len);
780 info->buf_start += real_len;
781}
782
783static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
784 const uint8_t *buf, int len)
785{
786 return 0;
787}
788
789static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
790{
791 return;
792}
793
794static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
795{
796 struct pxa3xx_nand_info *info = mtd->priv;
797
798 /* pxa3xx_nand_send_command has waited for command complete */
799 if (this->state == FL_WRITING || this->state == FL_ERASING) {
800 if (info->retcode == ERR_NONE)
801 return 0;
802 else {
803 /*
804 * any error make it return 0x01 which will tell
805 * the caller the erase and write fail
806 */
807 return 0x01;
808 }
809 }
810
811 return 0;
812}
813
814static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
815{
816 return;
817}
818
819static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
820 const uint8_t *dat, uint8_t *ecc_code)
821{
822 return 0;
823}
824
825static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
826 uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
827{
828 struct pxa3xx_nand_info *info = mtd->priv;
829 /*
830 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
831 * consider it as a ecc error which will tell the caller the
832 * read fail We have distinguish all the errors, but the
833 * nand_read_ecc only check this function return value
834 */
835 if (info->retcode != ERR_NONE)
836 return -1;
837
838 return 0;
839}
840
841static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
842{
843 struct pxa3xx_nand_flash *f = info->flash_info;
844 struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
845 uint32_t ndcr;
846 uint8_t id_buff[8];
847
848 if (prepare_other_cmd(info, cmdset->read_id)) {
849 printk(KERN_ERR "failed to prepare command\n");
850 return -EINVAL;
851 }
852
853 /* Send command */
854 if (write_cmd(info))
855 goto fail_timeout;
856
857 /* Wait for CMDDM(command done successfully) */
858 if (wait_for_event(info, NDSR_RDDREQ))
859 goto fail_timeout;
860
861 __raw_readsl(info->mmio_base + NDDB, id_buff, 2);
862 *id = id_buff[0] | (id_buff[1] << 8);
863 return 0;
864
865fail_timeout:
866 ndcr = nand_readl(info, NDCR);
867 nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
868 udelay(10);
869 return -ETIMEDOUT;
870}
871
872static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
873 struct pxa3xx_nand_flash *f)
874{
875 struct platform_device *pdev = info->pdev;
876 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
877 uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
878
879 if (f->page_size != 2048 && f->page_size != 512)
880 return -EINVAL;
881
882 if (f->flash_width != 16 && f->flash_width != 8)
883 return -EINVAL;
884
885 /* calculate flash information */
886 f->oob_size = (f->page_size == 2048) ? 64 : 16;
887 f->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
888
889 /* calculate addressing information */
890 f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
891
892 if (f->num_blocks * f->page_per_block > 65536)
893 f->row_addr_cycles = 3;
894 else
895 f->row_addr_cycles = 2;
896
897 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
898 ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0;
899 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
900 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
901 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
902 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
903
904 ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes);
905 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
906
907 info->reg_ndcr = ndcr;
908
909 pxa3xx_nand_set_timing(info, f->timing);
910 info->flash_info = f;
911 return 0;
912}
913
914static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info)
915{
916 struct pxa3xx_nand_flash *f;
917 uint32_t id;
918 int i;
919
920 for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
921
922 f = builtin_flash_types[i];
923
924 if (pxa3xx_nand_config_flash(info, f))
925 continue;
926
927 if (__readid(info, &id))
928 continue;
929
930 if (id == f->chip_id)
931 return 0;
932 }
933
934 return -ENODEV;
935}
936
937/* the maximum possible buffer size for large page with OOB data
938 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
939 * data buffer and the DMA descriptor
940 */
941#define MAX_BUFF_SIZE PAGE_SIZE
942
943static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
944{
945 struct platform_device *pdev = info->pdev;
946 int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
947
948 if (use_dma == 0) {
949 info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
950 if (info->data_buff == NULL)
951 return -ENOMEM;
952 return 0;
953 }
954
955 info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
956 &info->data_buff_phys, GFP_KERNEL);
957 if (info->data_buff == NULL) {
958 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
959 return -ENOMEM;
960 }
961
962 info->data_buff_size = MAX_BUFF_SIZE;
963 info->data_desc = (void *)info->data_buff + data_desc_offset;
964 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
965
966 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
967 pxa3xx_nand_data_dma_irq, info);
968 if (info->data_dma_ch < 0) {
969 dev_err(&pdev->dev, "failed to request data dma\n");
970 dma_free_coherent(&pdev->dev, info->data_buff_size,
971 info->data_buff, info->data_buff_phys);
972 return info->data_dma_ch;
973 }
974
975 return 0;
976}
977
978static struct nand_ecclayout hw_smallpage_ecclayout = {
979 .eccbytes = 6,
980 .eccpos = {8, 9, 10, 11, 12, 13 },
981 .oobfree = { {2, 6} }
982};
983
984static struct nand_ecclayout hw_largepage_ecclayout = {
985 .eccbytes = 24,
986 .eccpos = {
987 40, 41, 42, 43, 44, 45, 46, 47,
988 48, 49, 50, 51, 52, 53, 54, 55,
989 56, 57, 58, 59, 60, 61, 62, 63},
990 .oobfree = { {2, 38} }
991};
992
993static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
994 struct pxa3xx_nand_info *info)
995{
996 struct pxa3xx_nand_flash *f = info->flash_info;
997 struct nand_chip *this = &info->nand_chip;
998
999 this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1000
1001 this->waitfunc = pxa3xx_nand_waitfunc;
1002 this->select_chip = pxa3xx_nand_select_chip;
1003 this->dev_ready = pxa3xx_nand_dev_ready;
1004 this->cmdfunc = pxa3xx_nand_cmdfunc;
1005 this->read_word = pxa3xx_nand_read_word;
1006 this->read_byte = pxa3xx_nand_read_byte;
1007 this->read_buf = pxa3xx_nand_read_buf;
1008 this->write_buf = pxa3xx_nand_write_buf;
1009 this->verify_buf = pxa3xx_nand_verify_buf;
1010
1011 this->ecc.mode = NAND_ECC_HW;
1012 this->ecc.hwctl = pxa3xx_nand_ecc_hwctl;
1013 this->ecc.calculate = pxa3xx_nand_ecc_calculate;
1014 this->ecc.correct = pxa3xx_nand_ecc_correct;
1015 this->ecc.size = f->page_size;
1016
1017 if (f->page_size == 2048)
1018 this->ecc.layout = &hw_largepage_ecclayout;
1019 else
1020 this->ecc.layout = &hw_smallpage_ecclayout;
1021
a1c06ee1 1022 this->chip_delay = 25;
fe69af00 1023}
1024
1025static int pxa3xx_nand_probe(struct platform_device *pdev)
1026{
1027 struct pxa3xx_nand_platform_data *pdata;
1028 struct pxa3xx_nand_info *info;
1029 struct nand_chip *this;
1030 struct mtd_info *mtd;
1031 struct resource *r;
1032 int ret = 0, irq;
1033
1034 pdata = pdev->dev.platform_data;
1035
a1c06ee1 1036 if (!pdata) {
fe69af00 1037 dev_err(&pdev->dev, "no platform data defined\n");
1038 return -ENODEV;
1039 }
1040
1041 mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1042 GFP_KERNEL);
a1c06ee1 1043 if (!mtd) {
fe69af00 1044 dev_err(&pdev->dev, "failed to allocate memory\n");
1045 return -ENOMEM;
a1c06ee1 1046 }
fe69af00 1047
1048 info = (struct pxa3xx_nand_info *)(&mtd[1]);
1049 info->pdev = pdev;
1050
1051 this = &info->nand_chip;
1052 mtd->priv = info;
1053
1054 info->clk = clk_get(&pdev->dev, "NANDCLK");
1055 if (IS_ERR(info->clk)) {
1056 dev_err(&pdev->dev, "failed to get nand clock\n");
1057 ret = PTR_ERR(info->clk);
1058 goto fail_free_mtd;
1059 }
1060 clk_enable(info->clk);
1061
1062 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1063 if (r == NULL) {
1064 dev_err(&pdev->dev, "no resource defined for data DMA\n");
1065 ret = -ENXIO;
1066 goto fail_put_clk;
1067 }
1068 info->drcmr_dat = r->start;
1069
1070 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1071 if (r == NULL) {
1072 dev_err(&pdev->dev, "no resource defined for command DMA\n");
1073 ret = -ENXIO;
1074 goto fail_put_clk;
1075 }
1076 info->drcmr_cmd = r->start;
1077
1078 irq = platform_get_irq(pdev, 0);
1079 if (irq < 0) {
1080 dev_err(&pdev->dev, "no IRQ resource defined\n");
1081 ret = -ENXIO;
1082 goto fail_put_clk;
1083 }
1084
1085 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1086 if (r == NULL) {
1087 dev_err(&pdev->dev, "no IO memory resource defined\n");
1088 ret = -ENODEV;
1089 goto fail_put_clk;
1090 }
1091
1092 r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
1093 if (r == NULL) {
1094 dev_err(&pdev->dev, "failed to request memory resource\n");
1095 ret = -EBUSY;
1096 goto fail_put_clk;
1097 }
1098
1099 info->mmio_base = ioremap(r->start, r->end - r->start + 1);
1100 if (info->mmio_base == NULL) {
1101 dev_err(&pdev->dev, "ioremap() failed\n");
1102 ret = -ENODEV;
1103 goto fail_free_res;
1104 }
1105
1106 ret = pxa3xx_nand_init_buff(info);
1107 if (ret)
1108 goto fail_free_io;
1109
1110 ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
1111 pdev->name, info);
1112 if (ret < 0) {
1113 dev_err(&pdev->dev, "failed to request IRQ\n");
1114 goto fail_free_buf;
1115 }
1116
1117 ret = pxa3xx_nand_detect_flash(info);
1118 if (ret) {
1119 dev_err(&pdev->dev, "failed to detect flash\n");
1120 ret = -ENODEV;
1121 goto fail_free_irq;
1122 }
1123
1124 pxa3xx_nand_init_mtd(mtd, info);
1125
1126 platform_set_drvdata(pdev, mtd);
1127
1128 if (nand_scan(mtd, 1)) {
1129 dev_err(&pdev->dev, "failed to scan nand\n");
1130 ret = -ENXIO;
1131 goto fail_free_irq;
1132 }
1133
1134 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1135
1136fail_free_irq:
1137 free_irq(IRQ_NAND, info);
1138fail_free_buf:
1139 if (use_dma) {
1140 pxa_free_dma(info->data_dma_ch);
1141 dma_free_coherent(&pdev->dev, info->data_buff_size,
1142 info->data_buff, info->data_buff_phys);
1143 } else
1144 kfree(info->data_buff);
1145fail_free_io:
1146 iounmap(info->mmio_base);
1147fail_free_res:
1148 release_mem_region(r->start, r->end - r->start + 1);
1149fail_put_clk:
1150 clk_disable(info->clk);
1151 clk_put(info->clk);
1152fail_free_mtd:
1153 kfree(mtd);
1154 return ret;
1155}
1156
1157static int pxa3xx_nand_remove(struct platform_device *pdev)
1158{
1159 struct mtd_info *mtd = platform_get_drvdata(pdev);
1160 struct pxa3xx_nand_info *info = mtd->priv;
1161
1162 platform_set_drvdata(pdev, NULL);
1163
1164 del_mtd_device(mtd);
1165 del_mtd_partitions(mtd);
1166 free_irq(IRQ_NAND, info);
1167 if (use_dma) {
1168 pxa_free_dma(info->data_dma_ch);
1169 dma_free_writecombine(&pdev->dev, info->data_buff_size,
1170 info->data_buff, info->data_buff_phys);
1171 } else
1172 kfree(info->data_buff);
1173 kfree(mtd);
1174 return 0;
1175}
1176
1177#ifdef CONFIG_PM
1178static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1179{
1180 struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1181 struct pxa3xx_nand_info *info = mtd->priv;
1182
1183 if (info->state != STATE_READY) {
1184 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1185 return -EAGAIN;
1186 }
1187
1188 return 0;
1189}
1190
1191static int pxa3xx_nand_resume(struct platform_device *pdev)
1192{
1193 struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1194 struct pxa3xx_nand_info *info = mtd->priv;
1195
1196 clk_enable(info->clk);
1197
9b62d864 1198 return pxa3xx_nand_config_flash(info, info->flash_info);
fe69af00 1199}
1200#else
1201#define pxa3xx_nand_suspend NULL
1202#define pxa3xx_nand_resume NULL
1203#endif
1204
1205static struct platform_driver pxa3xx_nand_driver = {
1206 .driver = {
1207 .name = "pxa3xx-nand",
1208 },
1209 .probe = pxa3xx_nand_probe,
1210 .remove = pxa3xx_nand_remove,
1211 .suspend = pxa3xx_nand_suspend,
1212 .resume = pxa3xx_nand_resume,
1213};
1214
1215static int __init pxa3xx_nand_init(void)
1216{
1217 return platform_driver_register(&pxa3xx_nand_driver);
1218}
1219module_init(pxa3xx_nand_init);
1220
1221static void __exit pxa3xx_nand_exit(void)
1222{
1223 platform_driver_unregister(&pxa3xx_nand_driver);
1224}
1225module_exit(pxa3xx_nand_exit);
1226
1227MODULE_LICENSE("GPL");
1228MODULE_DESCRIPTION("PXA3xx NAND controller driver");