]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/spectra/ffsport.c
block: autoconvert trivial BKL users to private mutex
[net-next-2.6.git] / drivers / staging / spectra / ffsport.c
CommitLineData
494a43bb
AO
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "ffsport.h"
21#include "flash.h"
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/blkdev.h>
25#include <linux/wait.h>
26#include <linux/mutex.h>
27#include <linux/kthread.h>
28#include <linux/log2.h>
29#include <linux/init.h>
f8d261d3 30#include <linux/slab.h>
494a43bb
AO
31
32/**** Helper functions used for Div, Remainder operation on u64 ****/
33
34/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
35* Function: GLOB_Calc_Used_Bits
36* Inputs: Power of 2 number
37* Outputs: Number of Used Bits
38* 0, if the argument is 0
39* Description: Calculate the number of bits used by a given power of 2 number
40* Number can be upto 32 bit
41*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
42int GLOB_Calc_Used_Bits(u32 n)
43{
44 int tot_bits = 0;
45
46 if (n >= 1 << 16) {
47 n >>= 16;
48 tot_bits += 16;
49 }
50
51 if (n >= 1 << 8) {
52 n >>= 8;
53 tot_bits += 8;
54 }
55
56 if (n >= 1 << 4) {
57 n >>= 4;
58 tot_bits += 4;
59 }
60
61 if (n >= 1 << 2) {
62 n >>= 2;
63 tot_bits += 2;
64 }
65
66 if (n >= 1 << 1)
67 tot_bits += 1;
68
69 return ((n == 0) ? (0) : tot_bits);
70}
71
72/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
73* Function: GLOB_u64_Div
74* Inputs: Number of u64
75* A power of 2 number as Division
76* Outputs: Quotient of the Divisor operation
77* Description: It divides the address by divisor by using bit shift operation
78* (essentially without explicitely using "/").
79* Divisor is a power of 2 number and Divided is of u64
80*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
81u64 GLOB_u64_Div(u64 addr, u32 divisor)
82{
83 return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
84}
85
86/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
87* Function: GLOB_u64_Remainder
88* Inputs: Number of u64
89* Divisor Type (1 -PageAddress, 2- BlockAddress)
90* Outputs: Remainder of the Division operation
91* Description: It calculates the remainder of a number (of u64) by
92* divisor(power of 2 number ) by using bit shifting and multiply
93* operation(essentially without explicitely using "/").
94*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
95u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
96{
97 u64 result = 0;
98
99 if (divisor_type == 1) { /* Remainder -- Page */
100 result = (addr >> DeviceInfo.nBitsInPageDataSize);
101 result = result * DeviceInfo.wPageDataSize;
102 } else if (divisor_type == 2) { /* Remainder -- Block */
103 result = (addr >> DeviceInfo.nBitsInBlockDataSize);
104 result = result * DeviceInfo.wBlockDataSize;
105 }
106
107 result = addr - result;
108
109 return result;
110}
111
112#define NUM_DEVICES 1
113#define PARTITIONS 8
114
115#define GLOB_SBD_NAME "nd"
116#define GLOB_SBD_IRQ_NUM (29)
494a43bb
AO
117
118#define GLOB_SBD_IOCTL_GC (0x7701)
119#define GLOB_SBD_IOCTL_WL (0x7702)
120#define GLOB_SBD_IOCTL_FORMAT (0x7703)
121#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
122#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
123#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
124#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
125#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
126#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
127#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
128
90d59828
DW
129static int reserved_mb = 0;
130module_param(reserved_mb, int, 0);
131MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
494a43bb
AO
132
133int nand_debug_level;
134module_param(nand_debug_level, int, 0644);
135MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
136
137MODULE_LICENSE("GPL");
138
139struct spectra_nand_dev {
140 struct pci_dev *dev;
141 u64 size;
142 u16 users;
143 spinlock_t qlock;
144 void __iomem *ioaddr; /* Mapped address */
145 struct request_queue *queue;
146 struct task_struct *thread;
147 struct gendisk *gd;
148 u8 *tmp_buf;
149};
150
151
152static int GLOB_SBD_majornum;
153
154static char *GLOB_version = GLOB_VERSION;
155
156static struct spectra_nand_dev nand_device[NUM_DEVICES];
157
158static struct mutex spectra_lock;
159
160static int res_blks_os = 1;
161
162struct spectra_indentfy_dev_tag IdentifyDeviceData;
163
164static int force_flush_cache(void)
165{
166 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
167 __FILE__, __LINE__, __func__);
168
169 if (ERR == GLOB_FTL_Flush_Cache()) {
170 printk(KERN_ERR "Fail to Flush FTL Cache!\n");
171 return -EFAULT;
172 }
173#if CMD_DMA
174 if (glob_ftl_execute_cmds())
175 return -EIO;
176 else
177 return 0;
178#endif
179 return 0;
180}
181
182struct ioctl_rw_page_info {
183 u8 *data;
184 unsigned int page;
185};
186
187static int ioctl_read_page_data(unsigned long arg)
188{
189 u8 *buf;
190 struct ioctl_rw_page_info info;
191 int result = PASS;
192
193 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
194 return -EFAULT;
195
196 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
197 if (!buf) {
198 printk(KERN_ERR "ioctl_read_page_data: "
199 "failed to allocate memory\n");
200 return -ENOMEM;
201 }
202
203 mutex_lock(&spectra_lock);
204 result = GLOB_FTL_Page_Read(buf,
205 (u64)info.page * IdentifyDeviceData.PageDataSize);
206 mutex_unlock(&spectra_lock);
207
208 if (copy_to_user((void __user *)info.data, buf,
209 IdentifyDeviceData.PageDataSize)) {
210 printk(KERN_ERR "ioctl_read_page_data: "
211 "failed to copy user data\n");
212 kfree(buf);
213 return -EFAULT;
214 }
215
216 kfree(buf);
217 return result;
218}
219
220static int ioctl_write_page_data(unsigned long arg)
221{
222 u8 *buf;
223 struct ioctl_rw_page_info info;
224 int result = PASS;
225
226 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
227 return -EFAULT;
228
229 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
230 if (!buf) {
231 printk(KERN_ERR "ioctl_write_page_data: "
232 "failed to allocate memory\n");
233 return -ENOMEM;
234 }
235
236 if (copy_from_user(buf, (void __user *)info.data,
237 IdentifyDeviceData.PageDataSize)) {
238 printk(KERN_ERR "ioctl_write_page_data: "
239 "failed to copy user data\n");
240 kfree(buf);
241 return -EFAULT;
242 }
243
244 mutex_lock(&spectra_lock);
245 result = GLOB_FTL_Page_Write(buf,
246 (u64)info.page * IdentifyDeviceData.PageDataSize);
247 mutex_unlock(&spectra_lock);
248
249 kfree(buf);
250 return result;
251}
252
253/* Return how many blocks should be reserved for bad block replacement */
254static int get_res_blk_num_bad_blk(void)
255{
256 return IdentifyDeviceData.wDataBlockNum / 10;
257}
258
259/* Return how many blocks should be reserved for OS image */
260static int get_res_blk_num_os(void)
261{
262 u32 res_blks, blk_size;
263
264 blk_size = IdentifyDeviceData.PageDataSize *
265 IdentifyDeviceData.PagesPerBlock;
266
90d59828 267 res_blks = (reserved_mb * 1024 * 1024) / blk_size;
494a43bb
AO
268
269 if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
270 res_blks = 1; /* Reserved 1 block for block table */
271
272 return res_blks;
273}
274
494a43bb
AO
275/* Transfer a full request. */
276static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
277{
278 u64 start_addr, addr;
279 u32 logical_start_sect, hd_start_sect;
280 u32 nsect, hd_sects;
281 u32 rsect, tsect = 0;
282 char *buf;
283 u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
284
285 start_addr = (u64)(blk_rq_pos(req)) << 9;
286 /* Add a big enough offset to prevent the OS Image from
287 * being accessed or damaged by file system */
288 start_addr += IdentifyDeviceData.PageDataSize *
289 IdentifyDeviceData.PagesPerBlock *
290 res_blks_os;
291
7b633f66 292 if (req->cmd_type & REQ_FLUSH) {
494a43bb
AO
293 if (force_flush_cache()) /* Fail to flush cache */
294 return -EIO;
295 else
296 return 0;
297 }
298
eeba34d9 299 if (req->cmd_type != REQ_TYPE_FS)
494a43bb
AO
300 return -EIO;
301
302 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
303 printk(KERN_ERR "Spectra error: request over the NAND "
304 "capacity!sector %d, current_nr_sectors %d, "
305 "while capacity is %d\n",
306 (int)blk_rq_pos(req),
307 blk_rq_cur_sectors(req),
308 (int)get_capacity(tr->gd));
309 return -EIO;
310 }
311
312 logical_start_sect = start_addr >> 9;
313 hd_start_sect = logical_start_sect / ratio;
314 rsect = logical_start_sect - hd_start_sect * ratio;
315
316 addr = (u64)hd_start_sect * ratio * 512;
317 buf = req->buffer;
318 nsect = blk_rq_cur_sectors(req);
319
320 if (rsect)
321 tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
322
323 switch (rq_data_dir(req)) {
324 case READ:
325 /* Read the first NAND page */
326 if (rsect) {
327 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
328 printk(KERN_ERR "Error in %s, Line %d\n",
329 __FILE__, __LINE__);
330 return -EIO;
331 }
332 memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
333 addr += IdentifyDeviceData.PageDataSize;
334 buf += tsect << 9;
335 nsect -= tsect;
336 }
337
338 /* Read the other NAND pages */
339 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
340 if (GLOB_FTL_Page_Read(buf, addr)) {
341 printk(KERN_ERR "Error in %s, Line %d\n",
342 __FILE__, __LINE__);
343 return -EIO;
344 }
345 addr += IdentifyDeviceData.PageDataSize;
346 buf += IdentifyDeviceData.PageDataSize;
347 }
348
349 /* Read the last NAND pages */
350 if (nsect % ratio) {
351 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
352 printk(KERN_ERR "Error in %s, Line %d\n",
353 __FILE__, __LINE__);
354 return -EIO;
355 }
356 memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
357 }
358#if CMD_DMA
359 if (glob_ftl_execute_cmds())
360 return -EIO;
361 else
362 return 0;
363#endif
364 return 0;
365
366 case WRITE:
367 /* Write the first NAND page */
368 if (rsect) {
369 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
370 printk(KERN_ERR "Error in %s, Line %d\n",
371 __FILE__, __LINE__);
372 return -EIO;
373 }
374 memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
375 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
376 printk(KERN_ERR "Error in %s, Line %d\n",
377 __FILE__, __LINE__);
378 return -EIO;
379 }
380 addr += IdentifyDeviceData.PageDataSize;
381 buf += tsect << 9;
382 nsect -= tsect;
383 }
384
385 /* Write the other NAND pages */
386 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
387 if (GLOB_FTL_Page_Write(buf, addr)) {
388 printk(KERN_ERR "Error in %s, Line %d\n",
389 __FILE__, __LINE__);
390 return -EIO;
391 }
392 addr += IdentifyDeviceData.PageDataSize;
393 buf += IdentifyDeviceData.PageDataSize;
394 }
395
396 /* Write the last NAND pages */
397 if (nsect % ratio) {
398 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
399 printk(KERN_ERR "Error in %s, Line %d\n",
400 __FILE__, __LINE__);
401 return -EIO;
402 }
403 memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
404 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
405 printk(KERN_ERR "Error in %s, Line %d\n",
406 __FILE__, __LINE__);
407 return -EIO;
408 }
409 }
410#if CMD_DMA
411 if (glob_ftl_execute_cmds())
412 return -EIO;
413 else
414 return 0;
415#endif
416 return 0;
417
418 default:
419 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
420 return -EIO;
421 }
422}
423
424/* This function is copied from drivers/mtd/mtd_blkdevs.c */
425static int spectra_trans_thread(void *arg)
426{
427 struct spectra_nand_dev *tr = arg;
428 struct request_queue *rq = tr->queue;
429 struct request *req = NULL;
430
431 /* we might get involved when memory gets low, so use PF_MEMALLOC */
432 current->flags |= PF_MEMALLOC;
433
434 spin_lock_irq(rq->queue_lock);
435 while (!kthread_should_stop()) {
436 int res;
437
438 if (!req) {
439 req = blk_fetch_request(rq);
440 if (!req) {
441 set_current_state(TASK_INTERRUPTIBLE);
442 spin_unlock_irq(rq->queue_lock);
443 schedule();
444 spin_lock_irq(rq->queue_lock);
445 continue;
446 }
447 }
448
449 spin_unlock_irq(rq->queue_lock);
450
451 mutex_lock(&spectra_lock);
452 res = do_transfer(tr, req);
453 mutex_unlock(&spectra_lock);
454
455 spin_lock_irq(rq->queue_lock);
456
457 if (!__blk_end_request_cur(req, res))
458 req = NULL;
459 }
460
461 if (req)
462 __blk_end_request_all(req, -EIO);
463
464 spin_unlock_irq(rq->queue_lock);
465
466 return 0;
467}
468
469
470/* Request function that "handles clustering". */
471static void GLOB_SBD_request(struct request_queue *rq)
472{
473 struct spectra_nand_dev *pdev = rq->queuedata;
474 wake_up_process(pdev->thread);
475}
476
477static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
478
479{
480 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
481 __FILE__, __LINE__, __func__);
482 return 0;
483}
484
485static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
486{
487 int ret;
488
489 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
490 __FILE__, __LINE__, __func__);
491
492 mutex_lock(&spectra_lock);
493 ret = force_flush_cache();
494 mutex_unlock(&spectra_lock);
495
496 return 0;
497}
498
499static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
500{
501 geo->heads = 4;
502 geo->sectors = 16;
503 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
504
505 nand_dbg_print(NAND_DBG_DEBUG,
506 "heads: %d, sectors: %d, cylinders: %d\n",
507 geo->heads, geo->sectors, geo->cylinders);
508
509 return 0;
510}
511
512int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
513 unsigned int cmd, unsigned long arg)
514{
515 int ret;
516
517 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
518 __FILE__, __LINE__, __func__);
519
520 switch (cmd) {
521 case GLOB_SBD_IOCTL_GC:
522 nand_dbg_print(NAND_DBG_DEBUG,
523 "Spectra IOCTL: Garbage Collection "
524 "being performed\n");
525 if (PASS != GLOB_FTL_Garbage_Collection())
526 return -EFAULT;
527 return 0;
528
529 case GLOB_SBD_IOCTL_WL:
530 nand_dbg_print(NAND_DBG_DEBUG,
531 "Spectra IOCTL: Static Wear Leveling "
532 "being performed\n");
533 if (PASS != GLOB_FTL_Wear_Leveling())
534 return -EFAULT;
535 return 0;
536
537 case GLOB_SBD_IOCTL_FORMAT:
538 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
539 "being performed\n");
540 if (PASS != GLOB_FTL_Flash_Format())
541 return -EFAULT;
542 return 0;
543
544 case GLOB_SBD_IOCTL_FLUSH_CACHE:
545 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
546 "being performed\n");
547 mutex_lock(&spectra_lock);
548 ret = force_flush_cache();
549 mutex_unlock(&spectra_lock);
550 return ret;
551
552 case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
553 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
554 "Copy block table\n");
555 if (copy_to_user((void __user *)arg,
556 get_blk_table_start_addr(),
557 get_blk_table_len()))
558 return -EFAULT;
559 return 0;
560
561 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
562 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
563 "Copy wear leveling table\n");
564 if (copy_to_user((void __user *)arg,
565 get_wear_leveling_table_start_addr(),
566 get_wear_leveling_table_len()))
567 return -EFAULT;
568 return 0;
569
570 case GLOB_SBD_IOCTL_GET_NAND_INFO:
571 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
572 "Get NAND info\n");
573 if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
574 sizeof(IdentifyDeviceData)))
575 return -EFAULT;
576 return 0;
577
578 case GLOB_SBD_IOCTL_WRITE_DATA:
579 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
580 "Write one page data\n");
581 return ioctl_write_page_data(arg);
582
583 case GLOB_SBD_IOCTL_READ_DATA:
584 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
585 "Read one page data\n");
586 return ioctl_read_page_data(arg);
587 }
588
589 return -ENOTTY;
590}
591
2a48fc0a
AB
592static DEFINE_MUTEX(ffsport_mutex);
593
6e19d2db
JMC
594int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
595 unsigned int cmd, unsigned long arg)
596{
597 int ret;
598
2a48fc0a 599 mutex_lock(&ffsport_mutex);
6e19d2db 600 ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
2a48fc0a 601 mutex_unlock(&ffsport_mutex);
6e19d2db
JMC
602
603 return ret;
604}
605
494a43bb
AO
606static struct block_device_operations GLOB_SBD_ops = {
607 .owner = THIS_MODULE,
608 .open = GLOB_SBD_open,
609 .release = GLOB_SBD_release,
6e19d2db 610 .ioctl = GLOB_SBD_unlocked_ioctl,
494a43bb
AO
611 .getgeo = GLOB_SBD_getgeo,
612};
613
614static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
615{
616 int res_blks;
617 u32 sects;
618
619 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
620 __FILE__, __LINE__, __func__);
621
622 memset(dev, 0, sizeof(struct spectra_nand_dev));
623
624 nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
625 "for OS image, %d blocks for bad block replacement.\n",
626 get_res_blk_num_os(),
627 get_res_blk_num_bad_blk());
628
629 res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
630
631 dev->size = (u64)IdentifyDeviceData.PageDataSize *
632 IdentifyDeviceData.PagesPerBlock *
633 (IdentifyDeviceData.wDataBlockNum - res_blks);
634
635 res_blks_os = get_res_blk_num_os();
636
637 spin_lock_init(&dev->qlock);
638
639 dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
640 if (!dev->tmp_buf) {
641 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
642 __FILE__, __LINE__);
643 goto out_vfree;
644 }
645
646 dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
647 if (dev->queue == NULL) {
648 printk(KERN_ERR
649 "Spectra: Request queue could not be initialized."
650 " Aborting\n ");
651 goto out_vfree;
652 }
653 dev->queue->queuedata = dev;
654
655 /* As Linux block layer doens't support >4KB hardware sector, */
656 /* Here we force report 512 byte hardware sector size to Kernel */
657 blk_queue_logical_block_size(dev->queue, 512);
658
09e74c79 659 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
494a43bb
AO
660
661 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
662 if (IS_ERR(dev->thread)) {
663 blk_cleanup_queue(dev->queue);
664 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
665 return PTR_ERR(dev->thread);
666 }
667
668 dev->gd = alloc_disk(PARTITIONS);
669 if (!dev->gd) {
670 printk(KERN_ERR
671 "Spectra: Could not allocate disk. Aborting \n ");
672 goto out_vfree;
673 }
674 dev->gd->major = GLOB_SBD_majornum;
675 dev->gd->first_minor = which * PARTITIONS;
676 dev->gd->fops = &GLOB_SBD_ops;
677 dev->gd->queue = dev->queue;
678 dev->gd->private_data = dev;
679 snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
680
681 sects = dev->size >> 9;
682 nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
683 set_capacity(dev->gd, sects);
684
685 add_disk(dev->gd);
686
687 return 0;
688out_vfree:
689 return -ENOMEM;
690}
691
692/*
693static ssize_t show_nand_block_num(struct device *dev,
694 struct device_attribute *attr, char *buf)
695{
696 return snprintf(buf, PAGE_SIZE, "%d\n",
697 (int)IdentifyDeviceData.wDataBlockNum);
698}
699
700static ssize_t show_nand_pages_per_block(struct device *dev,
701 struct device_attribute *attr, char *buf)
702{
703 return snprintf(buf, PAGE_SIZE, "%d\n",
704 (int)IdentifyDeviceData.PagesPerBlock);
705}
706
707static ssize_t show_nand_page_size(struct device *dev,
708 struct device_attribute *attr, char *buf)
709{
710 return snprintf(buf, PAGE_SIZE, "%d\n",
711 (int)IdentifyDeviceData.PageDataSize);
712}
713
714static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
715static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
716static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
717
718static void create_sysfs_entry(struct device *dev)
719{
720 if (device_create_file(dev, &dev_attr_nand_block_num))
721 printk(KERN_ERR "Spectra: "
722 "failed to create sysfs entry nand_block_num.\n");
723 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
724 printk(KERN_ERR "Spectra: "
725 "failed to create sysfs entry nand_pages_per_block.\n");
726 if (device_create_file(dev, &dev_attr_nand_page_size))
727 printk(KERN_ERR "Spectra: "
728 "failed to create sysfs entry nand_page_size.\n");
729}
730*/
731
732static int GLOB_SBD_init(void)
733{
734 int i;
735
736 /* Set debug output level (0~3) here. 3 is most verbose */
494a43bb
AO
737 printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
738
739 mutex_init(&spectra_lock);
740
741 GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
742 if (GLOB_SBD_majornum <= 0) {
743 printk(KERN_ERR "Unable to get the major %d for Spectra",
744 GLOB_SBD_majornum);
745 return -EBUSY;
746 }
747
748 if (PASS != GLOB_FTL_Flash_Init()) {
749 printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
750 "Aborting\n");
751 goto out_flash_register;
752 }
753
754 /* create_sysfs_entry(&dev->dev); */
755
756 if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
757 printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
758 "Aborting\n");
759 goto out_flash_register;
760 } else {
761 nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
762 "Num blocks=%d, pagesperblock=%d, "
763 "pagedatasize=%d, ECCBytesPerSector=%d\n",
764 (int)IdentifyDeviceData.NumBlocks,
765 (int)IdentifyDeviceData.PagesPerBlock,
766 (int)IdentifyDeviceData.PageDataSize,
767 (int)IdentifyDeviceData.wECCBytesPerSector);
768 }
769
770 printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
771 if (GLOB_FTL_Init() != PASS) {
772 printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
773 "Aborting\n");
774 goto out_ftl_flash_register;
775 }
776 printk(KERN_ALERT "Spectra: block table has been found.\n");
777
778 for (i = 0; i < NUM_DEVICES; i++)
779 if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
780 goto out_ftl_flash_register;
781
782 nand_dbg_print(NAND_DBG_DEBUG,
783 "Spectra: module loaded with major number %d\n",
784 GLOB_SBD_majornum);
785
786 return 0;
787
788out_ftl_flash_register:
789 GLOB_FTL_Cache_Release();
790out_flash_register:
791 GLOB_FTL_Flash_Release();
792 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
793 printk(KERN_ERR "Spectra: Module load failed.\n");
794
795 return -ENOMEM;
796}
797
798static void __exit GLOB_SBD_exit(void)
799{
800 int i;
801
802 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
803 __FILE__, __LINE__, __func__);
804
805 for (i = 0; i < NUM_DEVICES; i++) {
806 struct spectra_nand_dev *dev = &nand_device[i];
807 if (dev->gd) {
808 del_gendisk(dev->gd);
809 put_disk(dev->gd);
810 }
811 if (dev->queue)
812 blk_cleanup_queue(dev->queue);
813 kfree(dev->tmp_buf);
814 }
815
816 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
817
818 mutex_lock(&spectra_lock);
819 force_flush_cache();
820 mutex_unlock(&spectra_lock);
821
822 GLOB_FTL_Cache_Release();
823
824 GLOB_FTL_Flash_Release();
825
826 nand_dbg_print(NAND_DBG_DEBUG,
827 "Spectra FTL module (major number %d) unloaded.\n",
828 GLOB_SBD_majornum);
829}
830
494a43bb
AO
831module_init(GLOB_SBD_init);
832module_exit(GLOB_SBD_exit);