]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/block/spectra/ffsport.c
spectra: Don't overwrite nand_debug_level. It's a module param
[net-next-2.6.git] / drivers / block / spectra / ffsport.c
CommitLineData
494a43bb
AO
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "ffsport.h"
21#include "flash.h"
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/blkdev.h>
25#include <linux/wait.h>
26#include <linux/mutex.h>
27#include <linux/kthread.h>
28#include <linux/log2.h>
29#include <linux/init.h>
30
31/**** Helper functions used for Div, Remainder operation on u64 ****/
32
33/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
34* Function: GLOB_Calc_Used_Bits
35* Inputs: Power of 2 number
36* Outputs: Number of Used Bits
37* 0, if the argument is 0
38* Description: Calculate the number of bits used by a given power of 2 number
39* Number can be upto 32 bit
40*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
41int GLOB_Calc_Used_Bits(u32 n)
42{
43 int tot_bits = 0;
44
45 if (n >= 1 << 16) {
46 n >>= 16;
47 tot_bits += 16;
48 }
49
50 if (n >= 1 << 8) {
51 n >>= 8;
52 tot_bits += 8;
53 }
54
55 if (n >= 1 << 4) {
56 n >>= 4;
57 tot_bits += 4;
58 }
59
60 if (n >= 1 << 2) {
61 n >>= 2;
62 tot_bits += 2;
63 }
64
65 if (n >= 1 << 1)
66 tot_bits += 1;
67
68 return ((n == 0) ? (0) : tot_bits);
69}
70
71/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
72* Function: GLOB_u64_Div
73* Inputs: Number of u64
74* A power of 2 number as Division
75* Outputs: Quotient of the Divisor operation
76* Description: It divides the address by divisor by using bit shift operation
77* (essentially without explicitely using "/").
78* Divisor is a power of 2 number and Divided is of u64
79*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
80u64 GLOB_u64_Div(u64 addr, u32 divisor)
81{
82 return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
83}
84
85/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
86* Function: GLOB_u64_Remainder
87* Inputs: Number of u64
88* Divisor Type (1 -PageAddress, 2- BlockAddress)
89* Outputs: Remainder of the Division operation
90* Description: It calculates the remainder of a number (of u64) by
91* divisor(power of 2 number ) by using bit shifting and multiply
92* operation(essentially without explicitely using "/").
93*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
94u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
95{
96 u64 result = 0;
97
98 if (divisor_type == 1) { /* Remainder -- Page */
99 result = (addr >> DeviceInfo.nBitsInPageDataSize);
100 result = result * DeviceInfo.wPageDataSize;
101 } else if (divisor_type == 2) { /* Remainder -- Block */
102 result = (addr >> DeviceInfo.nBitsInBlockDataSize);
103 result = result * DeviceInfo.wBlockDataSize;
104 }
105
106 result = addr - result;
107
108 return result;
109}
110
111#define NUM_DEVICES 1
112#define PARTITIONS 8
113
114#define GLOB_SBD_NAME "nd"
115#define GLOB_SBD_IRQ_NUM (29)
116#define GLOB_VERSION "driver version 20091110"
117
118#define GLOB_SBD_IOCTL_GC (0x7701)
119#define GLOB_SBD_IOCTL_WL (0x7702)
120#define GLOB_SBD_IOCTL_FORMAT (0x7703)
121#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
122#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
123#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
124#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
125#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
126#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
127#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
128
90d59828
DW
129static int reserved_mb = 0;
130module_param(reserved_mb, int, 0);
131MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
494a43bb
AO
132
133int nand_debug_level;
134module_param(nand_debug_level, int, 0644);
135MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
136
137MODULE_LICENSE("GPL");
138
139struct spectra_nand_dev {
140 struct pci_dev *dev;
141 u64 size;
142 u16 users;
143 spinlock_t qlock;
144 void __iomem *ioaddr; /* Mapped address */
145 struct request_queue *queue;
146 struct task_struct *thread;
147 struct gendisk *gd;
148 u8 *tmp_buf;
149};
150
151
152static int GLOB_SBD_majornum;
153
154static char *GLOB_version = GLOB_VERSION;
155
156static struct spectra_nand_dev nand_device[NUM_DEVICES];
157
158static struct mutex spectra_lock;
159
160static int res_blks_os = 1;
161
162struct spectra_indentfy_dev_tag IdentifyDeviceData;
163
164static int force_flush_cache(void)
165{
166 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
167 __FILE__, __LINE__, __func__);
168
169 if (ERR == GLOB_FTL_Flush_Cache()) {
170 printk(KERN_ERR "Fail to Flush FTL Cache!\n");
171 return -EFAULT;
172 }
173#if CMD_DMA
174 if (glob_ftl_execute_cmds())
175 return -EIO;
176 else
177 return 0;
178#endif
179 return 0;
180}
181
182struct ioctl_rw_page_info {
183 u8 *data;
184 unsigned int page;
185};
186
187static int ioctl_read_page_data(unsigned long arg)
188{
189 u8 *buf;
190 struct ioctl_rw_page_info info;
191 int result = PASS;
192
193 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
194 return -EFAULT;
195
196 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
197 if (!buf) {
198 printk(KERN_ERR "ioctl_read_page_data: "
199 "failed to allocate memory\n");
200 return -ENOMEM;
201 }
202
203 mutex_lock(&spectra_lock);
204 result = GLOB_FTL_Page_Read(buf,
205 (u64)info.page * IdentifyDeviceData.PageDataSize);
206 mutex_unlock(&spectra_lock);
207
208 if (copy_to_user((void __user *)info.data, buf,
209 IdentifyDeviceData.PageDataSize)) {
210 printk(KERN_ERR "ioctl_read_page_data: "
211 "failed to copy user data\n");
212 kfree(buf);
213 return -EFAULT;
214 }
215
216 kfree(buf);
217 return result;
218}
219
220static int ioctl_write_page_data(unsigned long arg)
221{
222 u8 *buf;
223 struct ioctl_rw_page_info info;
224 int result = PASS;
225
226 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
227 return -EFAULT;
228
229 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
230 if (!buf) {
231 printk(KERN_ERR "ioctl_write_page_data: "
232 "failed to allocate memory\n");
233 return -ENOMEM;
234 }
235
236 if (copy_from_user(buf, (void __user *)info.data,
237 IdentifyDeviceData.PageDataSize)) {
238 printk(KERN_ERR "ioctl_write_page_data: "
239 "failed to copy user data\n");
240 kfree(buf);
241 return -EFAULT;
242 }
243
244 mutex_lock(&spectra_lock);
245 result = GLOB_FTL_Page_Write(buf,
246 (u64)info.page * IdentifyDeviceData.PageDataSize);
247 mutex_unlock(&spectra_lock);
248
249 kfree(buf);
250 return result;
251}
252
253/* Return how many blocks should be reserved for bad block replacement */
254static int get_res_blk_num_bad_blk(void)
255{
256 return IdentifyDeviceData.wDataBlockNum / 10;
257}
258
259/* Return how many blocks should be reserved for OS image */
260static int get_res_blk_num_os(void)
261{
262 u32 res_blks, blk_size;
263
264 blk_size = IdentifyDeviceData.PageDataSize *
265 IdentifyDeviceData.PagesPerBlock;
266
90d59828 267 res_blks = (reserved_mb * 1024 * 1024) / blk_size;
494a43bb
AO
268
269 if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
270 res_blks = 1; /* Reserved 1 block for block table */
271
272 return res_blks;
273}
274
275static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
276{
277 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
278 /* rq->timeout = 5 * HZ; */
279 rq->cmd[0] = REQ_LB_OP_FLUSH;
280}
281
282/* Transfer a full request. */
283static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
284{
285 u64 start_addr, addr;
286 u32 logical_start_sect, hd_start_sect;
287 u32 nsect, hd_sects;
288 u32 rsect, tsect = 0;
289 char *buf;
290 u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
291
292 start_addr = (u64)(blk_rq_pos(req)) << 9;
293 /* Add a big enough offset to prevent the OS Image from
294 * being accessed or damaged by file system */
295 start_addr += IdentifyDeviceData.PageDataSize *
296 IdentifyDeviceData.PagesPerBlock *
297 res_blks_os;
298
299 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
300 req->cmd[0] == REQ_LB_OP_FLUSH) {
301 if (force_flush_cache()) /* Fail to flush cache */
302 return -EIO;
303 else
304 return 0;
305 }
306
307 if (!blk_fs_request(req))
308 return -EIO;
309
310 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
311 printk(KERN_ERR "Spectra error: request over the NAND "
312 "capacity!sector %d, current_nr_sectors %d, "
313 "while capacity is %d\n",
314 (int)blk_rq_pos(req),
315 blk_rq_cur_sectors(req),
316 (int)get_capacity(tr->gd));
317 return -EIO;
318 }
319
320 logical_start_sect = start_addr >> 9;
321 hd_start_sect = logical_start_sect / ratio;
322 rsect = logical_start_sect - hd_start_sect * ratio;
323
324 addr = (u64)hd_start_sect * ratio * 512;
325 buf = req->buffer;
326 nsect = blk_rq_cur_sectors(req);
327
328 if (rsect)
329 tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
330
331 switch (rq_data_dir(req)) {
332 case READ:
333 /* Read the first NAND page */
334 if (rsect) {
335 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
336 printk(KERN_ERR "Error in %s, Line %d\n",
337 __FILE__, __LINE__);
338 return -EIO;
339 }
340 memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
341 addr += IdentifyDeviceData.PageDataSize;
342 buf += tsect << 9;
343 nsect -= tsect;
344 }
345
346 /* Read the other NAND pages */
347 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
348 if (GLOB_FTL_Page_Read(buf, addr)) {
349 printk(KERN_ERR "Error in %s, Line %d\n",
350 __FILE__, __LINE__);
351 return -EIO;
352 }
353 addr += IdentifyDeviceData.PageDataSize;
354 buf += IdentifyDeviceData.PageDataSize;
355 }
356
357 /* Read the last NAND pages */
358 if (nsect % ratio) {
359 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
360 printk(KERN_ERR "Error in %s, Line %d\n",
361 __FILE__, __LINE__);
362 return -EIO;
363 }
364 memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
365 }
366#if CMD_DMA
367 if (glob_ftl_execute_cmds())
368 return -EIO;
369 else
370 return 0;
371#endif
372 return 0;
373
374 case WRITE:
375 /* Write the first NAND page */
376 if (rsect) {
377 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
378 printk(KERN_ERR "Error in %s, Line %d\n",
379 __FILE__, __LINE__);
380 return -EIO;
381 }
382 memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
383 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
384 printk(KERN_ERR "Error in %s, Line %d\n",
385 __FILE__, __LINE__);
386 return -EIO;
387 }
388 addr += IdentifyDeviceData.PageDataSize;
389 buf += tsect << 9;
390 nsect -= tsect;
391 }
392
393 /* Write the other NAND pages */
394 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
395 if (GLOB_FTL_Page_Write(buf, addr)) {
396 printk(KERN_ERR "Error in %s, Line %d\n",
397 __FILE__, __LINE__);
398 return -EIO;
399 }
400 addr += IdentifyDeviceData.PageDataSize;
401 buf += IdentifyDeviceData.PageDataSize;
402 }
403
404 /* Write the last NAND pages */
405 if (nsect % ratio) {
406 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
407 printk(KERN_ERR "Error in %s, Line %d\n",
408 __FILE__, __LINE__);
409 return -EIO;
410 }
411 memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
412 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
413 printk(KERN_ERR "Error in %s, Line %d\n",
414 __FILE__, __LINE__);
415 return -EIO;
416 }
417 }
418#if CMD_DMA
419 if (glob_ftl_execute_cmds())
420 return -EIO;
421 else
422 return 0;
423#endif
424 return 0;
425
426 default:
427 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
428 return -EIO;
429 }
430}
431
432/* This function is copied from drivers/mtd/mtd_blkdevs.c */
433static int spectra_trans_thread(void *arg)
434{
435 struct spectra_nand_dev *tr = arg;
436 struct request_queue *rq = tr->queue;
437 struct request *req = NULL;
438
439 /* we might get involved when memory gets low, so use PF_MEMALLOC */
440 current->flags |= PF_MEMALLOC;
441
442 spin_lock_irq(rq->queue_lock);
443 while (!kthread_should_stop()) {
444 int res;
445
446 if (!req) {
447 req = blk_fetch_request(rq);
448 if (!req) {
449 set_current_state(TASK_INTERRUPTIBLE);
450 spin_unlock_irq(rq->queue_lock);
451 schedule();
452 spin_lock_irq(rq->queue_lock);
453 continue;
454 }
455 }
456
457 spin_unlock_irq(rq->queue_lock);
458
459 mutex_lock(&spectra_lock);
460 res = do_transfer(tr, req);
461 mutex_unlock(&spectra_lock);
462
463 spin_lock_irq(rq->queue_lock);
464
465 if (!__blk_end_request_cur(req, res))
466 req = NULL;
467 }
468
469 if (req)
470 __blk_end_request_all(req, -EIO);
471
472 spin_unlock_irq(rq->queue_lock);
473
474 return 0;
475}
476
477
478/* Request function that "handles clustering". */
479static void GLOB_SBD_request(struct request_queue *rq)
480{
481 struct spectra_nand_dev *pdev = rq->queuedata;
482 wake_up_process(pdev->thread);
483}
484
485static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
486
487{
488 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
489 __FILE__, __LINE__, __func__);
490 return 0;
491}
492
493static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
494{
495 int ret;
496
497 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
498 __FILE__, __LINE__, __func__);
499
500 mutex_lock(&spectra_lock);
501 ret = force_flush_cache();
502 mutex_unlock(&spectra_lock);
503
504 return 0;
505}
506
507static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
508{
509 geo->heads = 4;
510 geo->sectors = 16;
511 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
512
513 nand_dbg_print(NAND_DBG_DEBUG,
514 "heads: %d, sectors: %d, cylinders: %d\n",
515 geo->heads, geo->sectors, geo->cylinders);
516
517 return 0;
518}
519
520int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
521 unsigned int cmd, unsigned long arg)
522{
523 int ret;
524
525 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
526 __FILE__, __LINE__, __func__);
527
528 switch (cmd) {
529 case GLOB_SBD_IOCTL_GC:
530 nand_dbg_print(NAND_DBG_DEBUG,
531 "Spectra IOCTL: Garbage Collection "
532 "being performed\n");
533 if (PASS != GLOB_FTL_Garbage_Collection())
534 return -EFAULT;
535 return 0;
536
537 case GLOB_SBD_IOCTL_WL:
538 nand_dbg_print(NAND_DBG_DEBUG,
539 "Spectra IOCTL: Static Wear Leveling "
540 "being performed\n");
541 if (PASS != GLOB_FTL_Wear_Leveling())
542 return -EFAULT;
543 return 0;
544
545 case GLOB_SBD_IOCTL_FORMAT:
546 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
547 "being performed\n");
548 if (PASS != GLOB_FTL_Flash_Format())
549 return -EFAULT;
550 return 0;
551
552 case GLOB_SBD_IOCTL_FLUSH_CACHE:
553 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
554 "being performed\n");
555 mutex_lock(&spectra_lock);
556 ret = force_flush_cache();
557 mutex_unlock(&spectra_lock);
558 return ret;
559
560 case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
561 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
562 "Copy block table\n");
563 if (copy_to_user((void __user *)arg,
564 get_blk_table_start_addr(),
565 get_blk_table_len()))
566 return -EFAULT;
567 return 0;
568
569 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
570 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
571 "Copy wear leveling table\n");
572 if (copy_to_user((void __user *)arg,
573 get_wear_leveling_table_start_addr(),
574 get_wear_leveling_table_len()))
575 return -EFAULT;
576 return 0;
577
578 case GLOB_SBD_IOCTL_GET_NAND_INFO:
579 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
580 "Get NAND info\n");
581 if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
582 sizeof(IdentifyDeviceData)))
583 return -EFAULT;
584 return 0;
585
586 case GLOB_SBD_IOCTL_WRITE_DATA:
587 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
588 "Write one page data\n");
589 return ioctl_write_page_data(arg);
590
591 case GLOB_SBD_IOCTL_READ_DATA:
592 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
593 "Read one page data\n");
594 return ioctl_read_page_data(arg);
595 }
596
597 return -ENOTTY;
598}
599
600static struct block_device_operations GLOB_SBD_ops = {
601 .owner = THIS_MODULE,
602 .open = GLOB_SBD_open,
603 .release = GLOB_SBD_release,
604 .locked_ioctl = GLOB_SBD_ioctl,
605 .getgeo = GLOB_SBD_getgeo,
606};
607
608static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
609{
610 int res_blks;
611 u32 sects;
612
613 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
614 __FILE__, __LINE__, __func__);
615
616 memset(dev, 0, sizeof(struct spectra_nand_dev));
617
618 nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
619 "for OS image, %d blocks for bad block replacement.\n",
620 get_res_blk_num_os(),
621 get_res_blk_num_bad_blk());
622
623 res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
624
625 dev->size = (u64)IdentifyDeviceData.PageDataSize *
626 IdentifyDeviceData.PagesPerBlock *
627 (IdentifyDeviceData.wDataBlockNum - res_blks);
628
629 res_blks_os = get_res_blk_num_os();
630
631 spin_lock_init(&dev->qlock);
632
633 dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
634 if (!dev->tmp_buf) {
635 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
636 __FILE__, __LINE__);
637 goto out_vfree;
638 }
639
640 dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
641 if (dev->queue == NULL) {
642 printk(KERN_ERR
643 "Spectra: Request queue could not be initialized."
644 " Aborting\n ");
645 goto out_vfree;
646 }
647 dev->queue->queuedata = dev;
648
649 /* As Linux block layer doens't support >4KB hardware sector, */
650 /* Here we force report 512 byte hardware sector size to Kernel */
651 blk_queue_logical_block_size(dev->queue, 512);
652
653 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
654 SBD_prepare_flush);
655
656 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
657 if (IS_ERR(dev->thread)) {
658 blk_cleanup_queue(dev->queue);
659 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
660 return PTR_ERR(dev->thread);
661 }
662
663 dev->gd = alloc_disk(PARTITIONS);
664 if (!dev->gd) {
665 printk(KERN_ERR
666 "Spectra: Could not allocate disk. Aborting \n ");
667 goto out_vfree;
668 }
669 dev->gd->major = GLOB_SBD_majornum;
670 dev->gd->first_minor = which * PARTITIONS;
671 dev->gd->fops = &GLOB_SBD_ops;
672 dev->gd->queue = dev->queue;
673 dev->gd->private_data = dev;
674 snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
675
676 sects = dev->size >> 9;
677 nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
678 set_capacity(dev->gd, sects);
679
680 add_disk(dev->gd);
681
682 return 0;
683out_vfree:
684 return -ENOMEM;
685}
686
687/*
688static ssize_t show_nand_block_num(struct device *dev,
689 struct device_attribute *attr, char *buf)
690{
691 return snprintf(buf, PAGE_SIZE, "%d\n",
692 (int)IdentifyDeviceData.wDataBlockNum);
693}
694
695static ssize_t show_nand_pages_per_block(struct device *dev,
696 struct device_attribute *attr, char *buf)
697{
698 return snprintf(buf, PAGE_SIZE, "%d\n",
699 (int)IdentifyDeviceData.PagesPerBlock);
700}
701
702static ssize_t show_nand_page_size(struct device *dev,
703 struct device_attribute *attr, char *buf)
704{
705 return snprintf(buf, PAGE_SIZE, "%d\n",
706 (int)IdentifyDeviceData.PageDataSize);
707}
708
709static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
710static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
711static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
712
713static void create_sysfs_entry(struct device *dev)
714{
715 if (device_create_file(dev, &dev_attr_nand_block_num))
716 printk(KERN_ERR "Spectra: "
717 "failed to create sysfs entry nand_block_num.\n");
718 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
719 printk(KERN_ERR "Spectra: "
720 "failed to create sysfs entry nand_pages_per_block.\n");
721 if (device_create_file(dev, &dev_attr_nand_page_size))
722 printk(KERN_ERR "Spectra: "
723 "failed to create sysfs entry nand_page_size.\n");
724}
725*/
726
727static int GLOB_SBD_init(void)
728{
729 int i;
730
731 /* Set debug output level (0~3) here. 3 is most verbose */
494a43bb
AO
732 printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
733
734 mutex_init(&spectra_lock);
735
736 GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
737 if (GLOB_SBD_majornum <= 0) {
738 printk(KERN_ERR "Unable to get the major %d for Spectra",
739 GLOB_SBD_majornum);
740 return -EBUSY;
741 }
742
743 if (PASS != GLOB_FTL_Flash_Init()) {
744 printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
745 "Aborting\n");
746 goto out_flash_register;
747 }
748
749 /* create_sysfs_entry(&dev->dev); */
750
751 if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
752 printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
753 "Aborting\n");
754 goto out_flash_register;
755 } else {
756 nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
757 "Num blocks=%d, pagesperblock=%d, "
758 "pagedatasize=%d, ECCBytesPerSector=%d\n",
759 (int)IdentifyDeviceData.NumBlocks,
760 (int)IdentifyDeviceData.PagesPerBlock,
761 (int)IdentifyDeviceData.PageDataSize,
762 (int)IdentifyDeviceData.wECCBytesPerSector);
763 }
764
765 printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
766 if (GLOB_FTL_Init() != PASS) {
767 printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
768 "Aborting\n");
769 goto out_ftl_flash_register;
770 }
771 printk(KERN_ALERT "Spectra: block table has been found.\n");
772
773 for (i = 0; i < NUM_DEVICES; i++)
774 if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
775 goto out_ftl_flash_register;
776
777 nand_dbg_print(NAND_DBG_DEBUG,
778 "Spectra: module loaded with major number %d\n",
779 GLOB_SBD_majornum);
780
781 return 0;
782
783out_ftl_flash_register:
784 GLOB_FTL_Cache_Release();
785out_flash_register:
786 GLOB_FTL_Flash_Release();
787 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
788 printk(KERN_ERR "Spectra: Module load failed.\n");
789
790 return -ENOMEM;
791}
792
793static void __exit GLOB_SBD_exit(void)
794{
795 int i;
796
797 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
798 __FILE__, __LINE__, __func__);
799
800 for (i = 0; i < NUM_DEVICES; i++) {
801 struct spectra_nand_dev *dev = &nand_device[i];
802 if (dev->gd) {
803 del_gendisk(dev->gd);
804 put_disk(dev->gd);
805 }
806 if (dev->queue)
807 blk_cleanup_queue(dev->queue);
808 kfree(dev->tmp_buf);
809 }
810
811 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
812
813 mutex_lock(&spectra_lock);
814 force_flush_cache();
815 mutex_unlock(&spectra_lock);
816
817 GLOB_FTL_Cache_Release();
818
819 GLOB_FTL_Flash_Release();
820
821 nand_dbg_print(NAND_DBG_DEBUG,
822 "Spectra FTL module (major number %d) unloaded.\n",
823 GLOB_SBD_majornum);
824}
825
494a43bb
AO
826module_init(GLOB_SBD_init);
827module_exit(GLOB_SBD_exit);