2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
21 #include <linux/hdreg.h>
22 #include <linux/async.h>
23 #include <linux/mutex.h>
25 #include <asm/ccwdev.h>
26 #include <asm/ebcdic.h>
27 #include <asm/idals.h>
31 #define PRINTK_HEADER "dasd:"
35 * SECTION: Constant definitions to be used within this file
37 #define DASD_CHANQ_MAX_SIZE 4
40 * SECTION: exported variables of dasd.c
42 debug_info_t *dasd_debug_area;
43 struct dasd_discipline *dasd_diag_discipline_pointer;
44 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
46 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
47 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
48 " Copyright 2000 IBM Corporation");
49 MODULE_SUPPORTED_DEVICE("dasd");
50 MODULE_LICENSE("GPL");
53 * SECTION: prototypes for static functions of dasd.c
55 static int dasd_alloc_queue(struct dasd_block *);
56 static void dasd_setup_queue(struct dasd_block *);
57 static void dasd_free_queue(struct dasd_block *);
58 static void dasd_flush_request_queue(struct dasd_block *);
59 static int dasd_flush_block_queue(struct dasd_block *);
60 static void dasd_device_tasklet(struct dasd_device *);
61 static void dasd_block_tasklet(struct dasd_block *);
62 static void do_kick_device(struct work_struct *);
63 static void do_restore_device(struct work_struct *);
64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65 static void dasd_device_timeout(unsigned long);
66 static void dasd_block_timeout(unsigned long);
67 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
70 * SECTION: Operations on the device structure.
72 static wait_queue_head_t dasd_init_waitq;
73 static wait_queue_head_t dasd_flush_wq;
74 static wait_queue_head_t generic_waitq;
77 * Allocate memory for a new device structure.
79 struct dasd_device *dasd_alloc_device(void)
81 struct dasd_device *device;
83 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
85 return ERR_PTR(-ENOMEM);
87 /* Get two pages for normal block device operations. */
88 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
89 if (!device->ccw_mem) {
91 return ERR_PTR(-ENOMEM);
93 /* Get one page for error recovery. */
94 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
95 if (!device->erp_mem) {
96 free_pages((unsigned long) device->ccw_mem, 1);
98 return ERR_PTR(-ENOMEM);
101 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
102 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
103 spin_lock_init(&device->mem_lock);
104 atomic_set(&device->tasklet_scheduled, 0);
105 tasklet_init(&device->tasklet,
106 (void (*)(unsigned long)) dasd_device_tasklet,
107 (unsigned long) device);
108 INIT_LIST_HEAD(&device->ccw_queue);
109 init_timer(&device->timer);
110 device->timer.function = dasd_device_timeout;
111 device->timer.data = (unsigned long) device;
112 INIT_WORK(&device->kick_work, do_kick_device);
113 INIT_WORK(&device->restore_device, do_restore_device);
114 device->state = DASD_STATE_NEW;
115 device->target = DASD_STATE_NEW;
116 mutex_init(&device->state_mutex);
122 * Free memory of a device structure.
124 void dasd_free_device(struct dasd_device *device)
126 kfree(device->private);
127 free_page((unsigned long) device->erp_mem);
128 free_pages((unsigned long) device->ccw_mem, 1);
133 * Allocate memory for a new device structure.
135 struct dasd_block *dasd_alloc_block(void)
137 struct dasd_block *block;
139 block = kzalloc(sizeof(*block), GFP_ATOMIC);
141 return ERR_PTR(-ENOMEM);
142 /* open_count = 0 means device online but not in use */
143 atomic_set(&block->open_count, -1);
145 spin_lock_init(&block->request_queue_lock);
146 atomic_set(&block->tasklet_scheduled, 0);
147 tasklet_init(&block->tasklet,
148 (void (*)(unsigned long)) dasd_block_tasklet,
149 (unsigned long) block);
150 INIT_LIST_HEAD(&block->ccw_queue);
151 spin_lock_init(&block->queue_lock);
152 init_timer(&block->timer);
153 block->timer.function = dasd_block_timeout;
154 block->timer.data = (unsigned long) block;
160 * Free memory of a device structure.
162 void dasd_free_block(struct dasd_block *block)
168 * Make a new device known to the system.
170 static int dasd_state_new_to_known(struct dasd_device *device)
175 * As long as the device is not in state DASD_STATE_NEW we want to
176 * keep the reference count > 0.
178 dasd_get_device(device);
181 rc = dasd_alloc_queue(device->block);
183 dasd_put_device(device);
187 device->state = DASD_STATE_KNOWN;
192 * Let the system forget about a device.
194 static int dasd_state_known_to_new(struct dasd_device *device)
196 /* Disable extended error reporting for this device. */
197 dasd_eer_disable(device);
198 /* Forget the discipline information. */
199 if (device->discipline) {
200 if (device->discipline->uncheck_device)
201 device->discipline->uncheck_device(device);
202 module_put(device->discipline->owner);
204 device->discipline = NULL;
205 if (device->base_discipline)
206 module_put(device->base_discipline->owner);
207 device->base_discipline = NULL;
208 device->state = DASD_STATE_NEW;
211 dasd_free_queue(device->block);
213 /* Give up reference we took in dasd_state_new_to_known. */
214 dasd_put_device(device);
219 * Request the irq line for the device.
221 static int dasd_state_known_to_basic(struct dasd_device *device)
225 /* Allocate and register gendisk structure. */
227 rc = dasd_gendisk_alloc(device->block);
231 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
232 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
234 debug_register_view(device->debug_area, &debug_sprintf_view);
235 debug_set_level(device->debug_area, DBF_WARNING);
236 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
238 device->state = DASD_STATE_BASIC;
243 * Release the irq line for the device. Terminate any running i/o.
245 static int dasd_state_basic_to_known(struct dasd_device *device)
249 dasd_gendisk_free(device->block);
250 dasd_block_clear_timer(device->block);
252 rc = dasd_flush_device_queue(device);
255 dasd_device_clear_timer(device);
257 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
258 if (device->debug_area != NULL) {
259 debug_unregister(device->debug_area);
260 device->debug_area = NULL;
262 device->state = DASD_STATE_KNOWN;
267 * Do the initial analysis. The do_analysis function may return
268 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
269 * until the discipline decides to continue the startup sequence
270 * by calling the function dasd_change_state. The eckd disciplines
271 * uses this to start a ccw that detects the format. The completion
272 * interrupt for this detection ccw uses the kernel event daemon to
273 * trigger the call to dasd_change_state. All this is done in the
274 * discipline code, see dasd_eckd.c.
275 * After the analysis ccw is done (do_analysis returned 0) the block
277 * In case the analysis returns an error, the device setup is stopped
278 * (a fake disk was already added to allow formatting).
280 static int dasd_state_basic_to_ready(struct dasd_device *device)
283 struct dasd_block *block;
286 block = device->block;
287 /* make disk known with correct capacity */
289 if (block->base->discipline->do_analysis != NULL)
290 rc = block->base->discipline->do_analysis(block);
293 device->state = DASD_STATE_UNFMT;
296 dasd_setup_queue(block);
297 set_capacity(block->gdp,
298 block->blocks << block->s2b_shift);
299 device->state = DASD_STATE_READY;
300 rc = dasd_scan_partitions(block);
302 device->state = DASD_STATE_BASIC;
304 device->state = DASD_STATE_READY;
310 * Remove device from block device layer. Destroy dirty buffers.
311 * Forget format information. Check if the target level is basic
312 * and if it is create fake disk for formatting.
314 static int dasd_state_ready_to_basic(struct dasd_device *device)
318 device->state = DASD_STATE_BASIC;
320 struct dasd_block *block = device->block;
321 rc = dasd_flush_block_queue(block);
323 device->state = DASD_STATE_READY;
326 dasd_destroy_partitions(block);
327 dasd_flush_request_queue(block);
330 block->s2b_shift = 0;
338 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
340 device->state = DASD_STATE_BASIC;
345 * Make the device online and schedule the bottom half to start
346 * the requeueing of requests from the linux request queue to the
350 dasd_state_ready_to_online(struct dasd_device * device)
353 struct gendisk *disk;
354 struct disk_part_iter piter;
355 struct hd_struct *part;
357 if (device->discipline->ready_to_online) {
358 rc = device->discipline->ready_to_online(device);
362 device->state = DASD_STATE_ONLINE;
364 dasd_schedule_block_bh(device->block);
365 disk = device->block->bdev->bd_disk;
366 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
367 while ((part = disk_part_iter_next(&piter)))
368 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
369 disk_part_iter_exit(&piter);
375 * Stop the requeueing of requests again.
377 static int dasd_state_online_to_ready(struct dasd_device *device)
380 struct gendisk *disk;
381 struct disk_part_iter piter;
382 struct hd_struct *part;
384 if (device->discipline->online_to_ready) {
385 rc = device->discipline->online_to_ready(device);
389 device->state = DASD_STATE_READY;
391 disk = device->block->bdev->bd_disk;
392 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
393 while ((part = disk_part_iter_next(&piter)))
394 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
395 disk_part_iter_exit(&piter);
401 * Device startup state changes.
403 static int dasd_increase_state(struct dasd_device *device)
408 if (device->state == DASD_STATE_NEW &&
409 device->target >= DASD_STATE_KNOWN)
410 rc = dasd_state_new_to_known(device);
413 device->state == DASD_STATE_KNOWN &&
414 device->target >= DASD_STATE_BASIC)
415 rc = dasd_state_known_to_basic(device);
418 device->state == DASD_STATE_BASIC &&
419 device->target >= DASD_STATE_READY)
420 rc = dasd_state_basic_to_ready(device);
423 device->state == DASD_STATE_UNFMT &&
424 device->target > DASD_STATE_UNFMT)
428 device->state == DASD_STATE_READY &&
429 device->target >= DASD_STATE_ONLINE)
430 rc = dasd_state_ready_to_online(device);
436 * Device shutdown state changes.
438 static int dasd_decrease_state(struct dasd_device *device)
443 if (device->state == DASD_STATE_ONLINE &&
444 device->target <= DASD_STATE_READY)
445 rc = dasd_state_online_to_ready(device);
448 device->state == DASD_STATE_READY &&
449 device->target <= DASD_STATE_BASIC)
450 rc = dasd_state_ready_to_basic(device);
453 device->state == DASD_STATE_UNFMT &&
454 device->target <= DASD_STATE_BASIC)
455 rc = dasd_state_unfmt_to_basic(device);
458 device->state == DASD_STATE_BASIC &&
459 device->target <= DASD_STATE_KNOWN)
460 rc = dasd_state_basic_to_known(device);
463 device->state == DASD_STATE_KNOWN &&
464 device->target <= DASD_STATE_NEW)
465 rc = dasd_state_known_to_new(device);
471 * This is the main startup/shutdown routine.
473 static void dasd_change_state(struct dasd_device *device)
477 if (device->state == device->target)
478 /* Already where we want to go today... */
480 if (device->state < device->target)
481 rc = dasd_increase_state(device);
483 rc = dasd_decrease_state(device);
487 device->target = device->state;
489 if (device->state == device->target)
490 wake_up(&dasd_init_waitq);
492 /* let user-space know that the device status changed */
493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
497 * Kick starter for devices that did not complete the startup/shutdown
498 * procedure or were sleeping because of a pending state.
499 * dasd_kick_device will schedule a call do do_kick_device to the kernel
502 static void do_kick_device(struct work_struct *work)
504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
505 mutex_lock(&device->state_mutex);
506 dasd_change_state(device);
507 mutex_unlock(&device->state_mutex);
508 dasd_schedule_device_bh(device);
509 dasd_put_device(device);
512 void dasd_kick_device(struct dasd_device *device)
514 dasd_get_device(device);
515 /* queue call to dasd_kick_device to the kernel event daemon. */
516 schedule_work(&device->kick_work);
520 * dasd_restore_device will schedule a call do do_restore_device to the kernel
523 static void do_restore_device(struct work_struct *work)
525 struct dasd_device *device = container_of(work, struct dasd_device,
527 device->cdev->drv->restore(device->cdev);
528 dasd_put_device(device);
531 void dasd_restore_device(struct dasd_device *device)
533 dasd_get_device(device);
534 /* queue call to dasd_restore_device to the kernel event daemon. */
535 schedule_work(&device->restore_device);
539 * Set the target state for a device and starts the state change.
541 void dasd_set_target_state(struct dasd_device *device, int target)
543 dasd_get_device(device);
544 mutex_lock(&device->state_mutex);
545 /* If we are in probeonly mode stop at DASD_STATE_READY. */
546 if (dasd_probeonly && target > DASD_STATE_READY)
547 target = DASD_STATE_READY;
548 if (device->target != target) {
549 if (device->state == target)
550 wake_up(&dasd_init_waitq);
551 device->target = target;
553 if (device->state != device->target)
554 dasd_change_state(device);
555 mutex_unlock(&device->state_mutex);
556 dasd_put_device(device);
560 * Enable devices with device numbers in [from..to].
562 static inline int _wait_for_device(struct dasd_device *device)
564 return (device->state == device->target);
567 void dasd_enable_device(struct dasd_device *device)
569 dasd_set_target_state(device, DASD_STATE_ONLINE);
570 if (device->state <= DASD_STATE_KNOWN)
571 /* No discipline for device found. */
572 dasd_set_target_state(device, DASD_STATE_NEW);
573 /* Now wait for the devices to come up. */
574 wait_event(dasd_init_waitq, _wait_for_device(device));
578 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
580 #ifdef CONFIG_DASD_PROFILE
582 struct dasd_profile_info_t dasd_global_profile;
583 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
586 * Increments counter in global and local profiling structures.
588 #define dasd_profile_counter(value, counter, block) \
591 for (index = 0; index < 31 && value >> (2+index); index++); \
592 dasd_global_profile.counter[index]++; \
593 block->profile.counter[index]++; \
597 * Add profiling information for cqr before execution.
599 static void dasd_profile_start(struct dasd_block *block,
600 struct dasd_ccw_req *cqr,
604 unsigned int counter;
606 if (dasd_profile_level != DASD_PROFILE_ON)
609 /* count the length of the chanq for statistics */
611 list_for_each(l, &block->ccw_queue)
614 dasd_global_profile.dasd_io_nr_req[counter]++;
615 block->profile.dasd_io_nr_req[counter]++;
619 * Add profiling information for cqr after execution.
621 static void dasd_profile_end(struct dasd_block *block,
622 struct dasd_ccw_req *cqr,
625 long strtime, irqtime, endtime, tottime; /* in microseconds */
626 long tottimeps, sectors;
628 if (dasd_profile_level != DASD_PROFILE_ON)
631 sectors = blk_rq_sectors(req);
632 if (!cqr->buildclk || !cqr->startclk ||
633 !cqr->stopclk || !cqr->endclk ||
637 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
638 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
639 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
640 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
641 tottimeps = tottime / sectors;
643 if (!dasd_global_profile.dasd_io_reqs)
644 memset(&dasd_global_profile, 0,
645 sizeof(struct dasd_profile_info_t));
646 dasd_global_profile.dasd_io_reqs++;
647 dasd_global_profile.dasd_io_sects += sectors;
649 if (!block->profile.dasd_io_reqs)
650 memset(&block->profile, 0,
651 sizeof(struct dasd_profile_info_t));
652 block->profile.dasd_io_reqs++;
653 block->profile.dasd_io_sects += sectors;
655 dasd_profile_counter(sectors, dasd_io_secs, block);
656 dasd_profile_counter(tottime, dasd_io_times, block);
657 dasd_profile_counter(tottimeps, dasd_io_timps, block);
658 dasd_profile_counter(strtime, dasd_io_time1, block);
659 dasd_profile_counter(irqtime, dasd_io_time2, block);
660 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
661 dasd_profile_counter(endtime, dasd_io_time3, block);
664 #define dasd_profile_start(block, cqr, req) do {} while (0)
665 #define dasd_profile_end(block, cqr, req) do {} while (0)
666 #endif /* CONFIG_DASD_PROFILE */
669 * Allocate memory for a channel program with 'cplength' channel
670 * command words and 'datasize' additional space. There are two
671 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
672 * memory and 2) dasd_smalloc_request uses the static ccw memory
673 * that gets allocated for each device.
675 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
677 struct dasd_device *device)
679 struct dasd_ccw_req *cqr;
682 BUG_ON(datasize > PAGE_SIZE ||
683 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
685 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
687 return ERR_PTR(-ENOMEM);
690 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
691 GFP_ATOMIC | GFP_DMA);
692 if (cqr->cpaddr == NULL) {
694 return ERR_PTR(-ENOMEM);
699 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
700 if (cqr->data == NULL) {
703 return ERR_PTR(-ENOMEM);
707 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
708 dasd_get_device(device);
712 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
714 struct dasd_device *device)
717 struct dasd_ccw_req *cqr;
722 BUG_ON(datasize > PAGE_SIZE ||
723 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
725 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
727 size += cplength * sizeof(struct ccw1);
730 spin_lock_irqsave(&device->mem_lock, flags);
731 cqr = (struct dasd_ccw_req *)
732 dasd_alloc_chunk(&device->ccw_chunks, size);
733 spin_unlock_irqrestore(&device->mem_lock, flags);
735 return ERR_PTR(-ENOMEM);
736 memset(cqr, 0, sizeof(struct dasd_ccw_req));
737 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
740 cqr->cpaddr = (struct ccw1 *) data;
741 data += cplength*sizeof(struct ccw1);
742 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
747 memset(cqr->data, 0, datasize);
750 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
751 dasd_get_device(device);
756 * Free memory of a channel program. This function needs to free all the
757 * idal lists that might have been created by dasd_set_cda and the
758 * struct dasd_ccw_req itself.
760 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
765 /* Clear any idals used for the request. */
768 clear_normalized_cda(ccw);
769 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
774 dasd_put_device(device);
777 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
781 spin_lock_irqsave(&device->mem_lock, flags);
782 dasd_free_chunk(&device->ccw_chunks, cqr);
783 spin_unlock_irqrestore(&device->mem_lock, flags);
784 dasd_put_device(device);
788 * Check discipline magic in cqr.
790 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
792 struct dasd_device *device;
796 device = cqr->startdev;
797 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
798 DBF_DEV_EVENT(DBF_WARNING, device,
799 " dasd_ccw_req 0x%08x magic doesn't match"
800 " discipline 0x%08x",
802 *(unsigned int *) device->discipline->name);
809 * Terminate the current i/o and set the request to clear_pending.
810 * Timer keeps device runnig.
811 * ccw_device_clear can fail if the i/o subsystem
814 int dasd_term_IO(struct dasd_ccw_req *cqr)
816 struct dasd_device *device;
818 char errorstring[ERRORLENGTH];
821 rc = dasd_check_cqr(cqr);
825 device = (struct dasd_device *) cqr->startdev;
826 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
827 rc = ccw_device_clear(device->cdev, (long) cqr);
829 case 0: /* termination successful */
831 cqr->status = DASD_CQR_CLEAR_PENDING;
832 cqr->stopclk = get_clock();
834 DBF_DEV_EVENT(DBF_DEBUG, device,
835 "terminate cqr %p successful",
839 DBF_DEV_EVENT(DBF_ERR, device, "%s",
840 "device gone, retry");
843 DBF_DEV_EVENT(DBF_ERR, device, "%s",
848 DBF_DEV_EVENT(DBF_ERR, device, "%s",
849 "device busy, retry later");
852 /* internal error 10 - unknown rc*/
853 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
854 dev_err(&device->cdev->dev, "An error occurred in the "
855 "DASD device driver, reason=%s\n", errorstring);
861 dasd_schedule_device_bh(device);
866 * Start the i/o. This start_IO can fail if the channel is really busy.
867 * In that case set up a timer to start the request later.
869 int dasd_start_IO(struct dasd_ccw_req *cqr)
871 struct dasd_device *device;
873 char errorstring[ERRORLENGTH];
876 rc = dasd_check_cqr(cqr);
881 device = (struct dasd_device *) cqr->startdev;
882 if (cqr->retries < 0) {
883 /* internal error 14 - start_IO run out of retries */
884 sprintf(errorstring, "14 %p", cqr);
885 dev_err(&device->cdev->dev, "An error occurred in the DASD "
886 "device driver, reason=%s\n", errorstring);
887 cqr->status = DASD_CQR_ERROR;
890 cqr->startclk = get_clock();
891 cqr->starttime = jiffies;
893 if (cqr->cpmode == 1) {
894 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
895 (long) cqr, cqr->lpm);
897 rc = ccw_device_start(device->cdev, cqr->cpaddr,
898 (long) cqr, cqr->lpm, 0);
902 cqr->status = DASD_CQR_IN_IO;
905 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
906 "start_IO: device busy, retry later");
909 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
910 "start_IO: request timeout, retry later");
913 /* -EACCES indicates that the request used only a
914 * subset of the available pathes and all these
916 * Do a retry with all available pathes.
918 cqr->lpm = LPM_ANYPATH;
919 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
920 "start_IO: selected pathes gone,"
921 " retry on all pathes");
924 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
925 "start_IO: -ENODEV device gone, retry");
928 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
929 "start_IO: -EIO device gone, retry");
932 /* most likely caused in power management context */
933 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
934 "start_IO: -EINVAL device currently "
938 /* internal error 11 - unknown rc */
939 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
940 dev_err(&device->cdev->dev,
941 "An error occurred in the DASD device driver, "
942 "reason=%s\n", errorstring);
951 * Timeout function for dasd devices. This is used for different purposes
952 * 1) missing interrupt handler for normal operation
953 * 2) delayed start of request where start_IO failed with -EBUSY
954 * 3) timeout for missing state change interrupts
955 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
956 * DASD_CQR_QUEUED for 2) and 3).
958 static void dasd_device_timeout(unsigned long ptr)
961 struct dasd_device *device;
963 device = (struct dasd_device *) ptr;
964 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
965 /* re-activate request queue */
966 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
967 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
968 dasd_schedule_device_bh(device);
972 * Setup timeout for a device in jiffies.
974 void dasd_device_set_timer(struct dasd_device *device, int expires)
977 del_timer(&device->timer);
979 mod_timer(&device->timer, jiffies + expires);
983 * Clear timeout for a device.
985 void dasd_device_clear_timer(struct dasd_device *device)
987 del_timer(&device->timer);
990 static void dasd_handle_killed_request(struct ccw_device *cdev,
991 unsigned long intparm)
993 struct dasd_ccw_req *cqr;
994 struct dasd_device *device;
998 cqr = (struct dasd_ccw_req *) intparm;
999 if (cqr->status != DASD_CQR_IN_IO) {
1000 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1001 "invalid status in handle_killed_request: "
1002 "%02x", cqr->status);
1006 device = (struct dasd_device *) cqr->startdev;
1007 if (device == NULL ||
1008 device != dasd_device_from_cdev_locked(cdev) ||
1009 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1010 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1011 "invalid device in request");
1015 /* Schedule request to be retried. */
1016 cqr->status = DASD_CQR_QUEUED;
1018 dasd_device_clear_timer(device);
1019 dasd_schedule_device_bh(device);
1020 dasd_put_device(device);
1023 void dasd_generic_handle_state_change(struct dasd_device *device)
1025 /* First of all start sense subsystem status request. */
1026 dasd_eer_snss(device);
1028 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1029 dasd_schedule_device_bh(device);
1031 dasd_schedule_block_bh(device->block);
1035 * Interrupt handler for "normal" ssch-io based dasd devices.
1037 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1040 struct dasd_ccw_req *cqr, *next;
1041 struct dasd_device *device;
1042 unsigned long long now;
1046 switch (PTR_ERR(irb)) {
1050 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1051 "request timed out\n", __func__);
1054 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1055 "unknown error %ld\n", __func__,
1058 dasd_handle_killed_request(cdev, intparm);
1064 /* check for unsolicited interrupts */
1065 cqr = (struct dasd_ccw_req *) intparm;
1066 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1067 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1068 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1069 if (cqr && cqr->status == DASD_CQR_IN_IO)
1070 cqr->status = DASD_CQR_QUEUED;
1071 device = dasd_device_from_cdev_locked(cdev);
1072 if (!IS_ERR(device)) {
1073 dasd_device_clear_timer(device);
1074 device->discipline->handle_unsolicited_interrupt(device,
1076 dasd_put_device(device);
1081 device = (struct dasd_device *) cqr->startdev;
1083 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1084 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1085 "invalid device in request");
1089 /* Check for clear pending */
1090 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1091 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1092 cqr->status = DASD_CQR_CLEARED;
1093 dasd_device_clear_timer(device);
1094 wake_up(&dasd_flush_wq);
1095 dasd_schedule_device_bh(device);
1099 /* check status - the request might have been killed by dyn detach */
1100 if (cqr->status != DASD_CQR_IN_IO) {
1101 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1102 "status %02x", dev_name(&cdev->dev), cqr->status);
1108 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1109 scsw_cstat(&irb->scsw) == 0) {
1110 /* request was completed successfully */
1111 cqr->status = DASD_CQR_SUCCESS;
1113 /* Start first request on queue if possible -> fast_io. */
1114 if (cqr->devlist.next != &device->ccw_queue) {
1115 next = list_entry(cqr->devlist.next,
1116 struct dasd_ccw_req, devlist);
1118 } else { /* error */
1119 memcpy(&cqr->irb, irb, sizeof(struct irb));
1120 /* log sense for every failed I/O to s390 debugfeature */
1121 dasd_log_sense_dbf(cqr, irb);
1122 if (device->features & DASD_FEATURE_ERPLOG) {
1123 dasd_log_sense(cqr, irb);
1127 * If we don't want complex ERP for this request, then just
1128 * reset this and retry it in the fastpath
1130 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1132 if (cqr->lpm == LPM_ANYPATH)
1133 DBF_DEV_EVENT(DBF_DEBUG, device,
1134 "default ERP in fastpath "
1135 "(%i retries left)",
1137 cqr->lpm = LPM_ANYPATH;
1138 cqr->status = DASD_CQR_QUEUED;
1141 cqr->status = DASD_CQR_ERROR;
1143 if (next && (next->status == DASD_CQR_QUEUED) &&
1144 (!device->stopped)) {
1145 if (device->discipline->start_IO(next) == 0)
1146 expires = next->expires;
1149 dasd_device_set_timer(device, expires);
1151 dasd_device_clear_timer(device);
1152 dasd_schedule_device_bh(device);
1156 * If we have an error on a dasd_block layer request then we cancel
1157 * and return all further requests from the same dasd_block as well.
1159 static void __dasd_device_recovery(struct dasd_device *device,
1160 struct dasd_ccw_req *ref_cqr)
1162 struct list_head *l, *n;
1163 struct dasd_ccw_req *cqr;
1166 * only requeue request that came from the dasd_block layer
1168 if (!ref_cqr->block)
1171 list_for_each_safe(l, n, &device->ccw_queue) {
1172 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1173 if (cqr->status == DASD_CQR_QUEUED &&
1174 ref_cqr->block == cqr->block) {
1175 cqr->status = DASD_CQR_CLEARED;
1181 * Remove those ccw requests from the queue that need to be returned
1182 * to the upper layer.
1184 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1185 struct list_head *final_queue)
1187 struct list_head *l, *n;
1188 struct dasd_ccw_req *cqr;
1190 /* Process request with final status. */
1191 list_for_each_safe(l, n, &device->ccw_queue) {
1192 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1194 /* Stop list processing at the first non-final request. */
1195 if (cqr->status == DASD_CQR_QUEUED ||
1196 cqr->status == DASD_CQR_IN_IO ||
1197 cqr->status == DASD_CQR_CLEAR_PENDING)
1199 if (cqr->status == DASD_CQR_ERROR) {
1200 __dasd_device_recovery(device, cqr);
1202 /* Rechain finished requests to final queue */
1203 list_move_tail(&cqr->devlist, final_queue);
1208 * the cqrs from the final queue are returned to the upper layer
1209 * by setting a dasd_block state and calling the callback function
1211 static void __dasd_device_process_final_queue(struct dasd_device *device,
1212 struct list_head *final_queue)
1214 struct list_head *l, *n;
1215 struct dasd_ccw_req *cqr;
1216 struct dasd_block *block;
1217 void (*callback)(struct dasd_ccw_req *, void *data);
1218 void *callback_data;
1219 char errorstring[ERRORLENGTH];
1221 list_for_each_safe(l, n, final_queue) {
1222 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1223 list_del_init(&cqr->devlist);
1225 callback = cqr->callback;
1226 callback_data = cqr->callback_data;
1228 spin_lock_bh(&block->queue_lock);
1229 switch (cqr->status) {
1230 case DASD_CQR_SUCCESS:
1231 cqr->status = DASD_CQR_DONE;
1233 case DASD_CQR_ERROR:
1234 cqr->status = DASD_CQR_NEED_ERP;
1236 case DASD_CQR_CLEARED:
1237 cqr->status = DASD_CQR_TERMINATED;
1240 /* internal error 12 - wrong cqr status*/
1241 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1242 dev_err(&device->cdev->dev,
1243 "An error occurred in the DASD device driver, "
1244 "reason=%s\n", errorstring);
1247 if (cqr->callback != NULL)
1248 (callback)(cqr, callback_data);
1250 spin_unlock_bh(&block->queue_lock);
1255 * Take a look at the first request on the ccw queue and check
1256 * if it reached its expire time. If so, terminate the IO.
1258 static void __dasd_device_check_expire(struct dasd_device *device)
1260 struct dasd_ccw_req *cqr;
1262 if (list_empty(&device->ccw_queue))
1264 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1265 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1266 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1267 if (device->discipline->term_IO(cqr) != 0) {
1268 /* Hmpf, try again in 5 sec */
1269 dev_err(&device->cdev->dev,
1270 "cqr %p timed out (%is) but cannot be "
1271 "ended, retrying in 5 s\n",
1272 cqr, (cqr->expires/HZ));
1273 cqr->expires += 5*HZ;
1274 dasd_device_set_timer(device, 5*HZ);
1276 dev_err(&device->cdev->dev,
1277 "cqr %p timed out (%is), %i retries "
1278 "remaining\n", cqr, (cqr->expires/HZ),
1285 * Take a look at the first request on the ccw queue and check
1286 * if it needs to be started.
1288 static void __dasd_device_start_head(struct dasd_device *device)
1290 struct dasd_ccw_req *cqr;
1293 if (list_empty(&device->ccw_queue))
1295 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1296 if (cqr->status != DASD_CQR_QUEUED)
1298 /* when device is stopped, return request to previous layer */
1299 if (device->stopped) {
1300 cqr->status = DASD_CQR_CLEARED;
1301 dasd_schedule_device_bh(device);
1305 rc = device->discipline->start_IO(cqr);
1307 dasd_device_set_timer(device, cqr->expires);
1308 else if (rc == -EACCES) {
1309 dasd_schedule_device_bh(device);
1311 /* Hmpf, try again in 1/2 sec */
1312 dasd_device_set_timer(device, 50);
1316 * Go through all request on the dasd_device request queue,
1317 * terminate them on the cdev if necessary, and return them to the
1318 * submitting layer via callback.
1320 * Make sure that all 'submitting layers' still exist when
1321 * this function is called!. In other words, when 'device' is a base
1322 * device then all block layer requests must have been removed before
1323 * via dasd_flush_block_queue.
1325 int dasd_flush_device_queue(struct dasd_device *device)
1327 struct dasd_ccw_req *cqr, *n;
1329 struct list_head flush_queue;
1331 INIT_LIST_HEAD(&flush_queue);
1332 spin_lock_irq(get_ccwdev_lock(device->cdev));
1334 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1335 /* Check status and move request to flush_queue */
1336 switch (cqr->status) {
1337 case DASD_CQR_IN_IO:
1338 rc = device->discipline->term_IO(cqr);
1340 /* unable to terminate requeust */
1341 dev_err(&device->cdev->dev,
1342 "Flushing the DASD request queue "
1343 "failed for request %p\n", cqr);
1344 /* stop flush processing */
1348 case DASD_CQR_QUEUED:
1349 cqr->stopclk = get_clock();
1350 cqr->status = DASD_CQR_CLEARED;
1352 default: /* no need to modify the others */
1355 list_move_tail(&cqr->devlist, &flush_queue);
1358 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1360 * After this point all requests must be in state CLEAR_PENDING,
1361 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1362 * one of the others.
1364 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1365 wait_event(dasd_flush_wq,
1366 (cqr->status != DASD_CQR_CLEAR_PENDING));
1368 * Now set each request back to TERMINATED, DONE or NEED_ERP
1369 * and call the callback function of flushed requests
1371 __dasd_device_process_final_queue(device, &flush_queue);
1376 * Acquire the device lock and process queues for the device.
1378 static void dasd_device_tasklet(struct dasd_device *device)
1380 struct list_head final_queue;
1382 atomic_set (&device->tasklet_scheduled, 0);
1383 INIT_LIST_HEAD(&final_queue);
1384 spin_lock_irq(get_ccwdev_lock(device->cdev));
1385 /* Check expire time of first request on the ccw queue. */
1386 __dasd_device_check_expire(device);
1387 /* find final requests on ccw queue */
1388 __dasd_device_process_ccw_queue(device, &final_queue);
1389 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1390 /* Now call the callback function of requests with final status */
1391 __dasd_device_process_final_queue(device, &final_queue);
1392 spin_lock_irq(get_ccwdev_lock(device->cdev));
1393 /* Now check if the head of the ccw queue needs to be started. */
1394 __dasd_device_start_head(device);
1395 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1396 dasd_put_device(device);
1400 * Schedules a call to dasd_tasklet over the device tasklet.
1402 void dasd_schedule_device_bh(struct dasd_device *device)
1404 /* Protect against rescheduling. */
1405 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1407 dasd_get_device(device);
1408 tasklet_hi_schedule(&device->tasklet);
1411 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1413 device->stopped |= bits;
1415 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1417 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1419 device->stopped &= ~bits;
1420 if (!device->stopped)
1421 wake_up(&generic_waitq);
1423 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1426 * Queue a request to the head of the device ccw_queue.
1427 * Start the I/O if possible.
1429 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1431 struct dasd_device *device;
1432 unsigned long flags;
1434 device = cqr->startdev;
1435 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1436 cqr->status = DASD_CQR_QUEUED;
1437 list_add(&cqr->devlist, &device->ccw_queue);
1438 /* let the bh start the request to keep them in order */
1439 dasd_schedule_device_bh(device);
1440 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1444 * Queue a request to the tail of the device ccw_queue.
1445 * Start the I/O if possible.
1447 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1449 struct dasd_device *device;
1450 unsigned long flags;
1452 device = cqr->startdev;
1453 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1454 cqr->status = DASD_CQR_QUEUED;
1455 list_add_tail(&cqr->devlist, &device->ccw_queue);
1456 /* let the bh start the request to keep them in order */
1457 dasd_schedule_device_bh(device);
1458 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1462 * Wakeup helper for the 'sleep_on' functions.
1464 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1466 wake_up((wait_queue_head_t *) data);
1469 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1471 struct dasd_device *device;
1474 device = cqr->startdev;
1475 spin_lock_irq(get_ccwdev_lock(device->cdev));
1476 rc = ((cqr->status == DASD_CQR_DONE ||
1477 cqr->status == DASD_CQR_NEED_ERP ||
1478 cqr->status == DASD_CQR_TERMINATED) &&
1479 list_empty(&cqr->devlist));
1480 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1485 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1487 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1489 struct dasd_device *device;
1490 dasd_erp_fn_t erp_fn;
1492 if (cqr->status == DASD_CQR_FILLED)
1494 device = cqr->startdev;
1495 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1496 if (cqr->status == DASD_CQR_TERMINATED) {
1497 device->discipline->handle_terminated_request(cqr);
1500 if (cqr->status == DASD_CQR_NEED_ERP) {
1501 erp_fn = device->discipline->erp_action(cqr);
1505 if (cqr->status == DASD_CQR_FAILED)
1506 dasd_log_sense(cqr, &cqr->irb);
1508 __dasd_process_erp(device, cqr);
1515 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1517 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1518 if (cqr->refers) /* erp is not done yet */
1520 return ((cqr->status != DASD_CQR_DONE) &&
1521 (cqr->status != DASD_CQR_FAILED));
1523 return (cqr->status == DASD_CQR_FILLED);
1526 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1528 struct dasd_device *device;
1530 struct list_head ccw_queue;
1531 struct dasd_ccw_req *cqr;
1533 INIT_LIST_HEAD(&ccw_queue);
1534 maincqr->status = DASD_CQR_FILLED;
1535 device = maincqr->startdev;
1536 list_add(&maincqr->blocklist, &ccw_queue);
1537 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1538 cqr = list_first_entry(&ccw_queue,
1539 struct dasd_ccw_req, blocklist)) {
1541 if (__dasd_sleep_on_erp(cqr))
1543 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1546 /* Non-temporary stop condition will trigger fail fast */
1547 if (device->stopped & ~DASD_STOPPED_PENDING &&
1548 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1549 (!dasd_eer_enabled(device))) {
1550 cqr->status = DASD_CQR_FAILED;
1554 /* Don't try to start requests if device is stopped */
1555 if (interruptible) {
1556 rc = wait_event_interruptible(
1557 generic_waitq, !(device->stopped));
1558 if (rc == -ERESTARTSYS) {
1559 cqr->status = DASD_CQR_FAILED;
1560 maincqr->intrc = rc;
1564 wait_event(generic_waitq, !(device->stopped));
1566 cqr->callback = dasd_wakeup_cb;
1567 cqr->callback_data = (void *) &generic_waitq;
1568 dasd_add_request_tail(cqr);
1569 if (interruptible) {
1570 rc = wait_event_interruptible(
1571 generic_waitq, _wait_for_wakeup(cqr));
1572 if (rc == -ERESTARTSYS) {
1573 dasd_cancel_req(cqr);
1574 /* wait (non-interruptible) for final status */
1575 wait_event(generic_waitq,
1576 _wait_for_wakeup(cqr));
1577 cqr->status = DASD_CQR_FAILED;
1578 maincqr->intrc = rc;
1582 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1585 maincqr->endclk = get_clock();
1586 if ((maincqr->status != DASD_CQR_DONE) &&
1587 (maincqr->intrc != -ERESTARTSYS))
1588 dasd_log_sense(maincqr, &maincqr->irb);
1589 if (maincqr->status == DASD_CQR_DONE)
1591 else if (maincqr->intrc)
1592 rc = maincqr->intrc;
1599 * Queue a request to the tail of the device ccw_queue and wait for
1602 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1604 return _dasd_sleep_on(cqr, 0);
1608 * Queue a request to the tail of the device ccw_queue and wait
1609 * interruptible for it's completion.
1611 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1613 return _dasd_sleep_on(cqr, 1);
1617 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1618 * for eckd devices) the currently running request has to be terminated
1619 * and be put back to status queued, before the special request is added
1620 * to the head of the queue. Then the special request is waited on normally.
1622 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1624 struct dasd_ccw_req *cqr;
1626 if (list_empty(&device->ccw_queue))
1628 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1629 return device->discipline->term_IO(cqr);
1632 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1634 struct dasd_device *device;
1637 device = cqr->startdev;
1638 spin_lock_irq(get_ccwdev_lock(device->cdev));
1639 rc = _dasd_term_running_cqr(device);
1641 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1645 cqr->callback = dasd_wakeup_cb;
1646 cqr->callback_data = (void *) &generic_waitq;
1647 cqr->status = DASD_CQR_QUEUED;
1648 list_add(&cqr->devlist, &device->ccw_queue);
1650 /* let the bh start the request to keep them in order */
1651 dasd_schedule_device_bh(device);
1653 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1655 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1657 if (cqr->status == DASD_CQR_DONE)
1659 else if (cqr->intrc)
1667 * Cancels a request that was started with dasd_sleep_on_req.
1668 * This is useful to timeout requests. The request will be
1669 * terminated if it is currently in i/o.
1670 * Returns 1 if the request has been terminated.
1671 * 0 if there was no need to terminate the request (not started yet)
1672 * negative error code if termination failed
1673 * Cancellation of a request is an asynchronous operation! The calling
1674 * function has to wait until the request is properly returned via callback.
1676 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1678 struct dasd_device *device = cqr->startdev;
1679 unsigned long flags;
1683 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1684 switch (cqr->status) {
1685 case DASD_CQR_QUEUED:
1686 /* request was not started - just set to cleared */
1687 cqr->status = DASD_CQR_CLEARED;
1689 case DASD_CQR_IN_IO:
1690 /* request in IO - terminate IO and release again */
1691 rc = device->discipline->term_IO(cqr);
1693 dev_err(&device->cdev->dev,
1694 "Cancelling request %p failed with rc=%d\n",
1697 cqr->stopclk = get_clock();
1700 default: /* already finished or clear pending - do nothing */
1703 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1704 dasd_schedule_device_bh(device);
1710 * SECTION: Operations of the dasd_block layer.
1714 * Timeout function for dasd_block. This is used when the block layer
1715 * is waiting for something that may not come reliably, (e.g. a state
1718 static void dasd_block_timeout(unsigned long ptr)
1720 unsigned long flags;
1721 struct dasd_block *block;
1723 block = (struct dasd_block *) ptr;
1724 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1725 /* re-activate request queue */
1726 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1727 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1728 dasd_schedule_block_bh(block);
1732 * Setup timeout for a dasd_block in jiffies.
1734 void dasd_block_set_timer(struct dasd_block *block, int expires)
1737 del_timer(&block->timer);
1739 mod_timer(&block->timer, jiffies + expires);
1743 * Clear timeout for a dasd_block.
1745 void dasd_block_clear_timer(struct dasd_block *block)
1747 del_timer(&block->timer);
1751 * Process finished error recovery ccw.
1753 static void __dasd_process_erp(struct dasd_device *device,
1754 struct dasd_ccw_req *cqr)
1756 dasd_erp_fn_t erp_fn;
1758 if (cqr->status == DASD_CQR_DONE)
1759 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1761 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1762 erp_fn = device->discipline->erp_postaction(cqr);
1767 * Fetch requests from the block device queue.
1769 static void __dasd_process_request_queue(struct dasd_block *block)
1771 struct request_queue *queue;
1772 struct request *req;
1773 struct dasd_ccw_req *cqr;
1774 struct dasd_device *basedev;
1775 unsigned long flags;
1776 queue = block->request_queue;
1777 basedev = block->base;
1778 /* No queue ? Then there is nothing to do. */
1783 * We requeue request from the block device queue to the ccw
1784 * queue only in two states. In state DASD_STATE_READY the
1785 * partition detection is done and we need to requeue requests
1786 * for that. State DASD_STATE_ONLINE is normal block device
1789 if (basedev->state < DASD_STATE_READY) {
1790 while ((req = blk_fetch_request(block->request_queue)))
1791 __blk_end_request_all(req, -EIO);
1794 /* Now we try to fetch requests from the request queue */
1795 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1796 if (basedev->features & DASD_FEATURE_READONLY &&
1797 rq_data_dir(req) == WRITE) {
1798 DBF_DEV_EVENT(DBF_ERR, basedev,
1799 "Rejecting write request %p",
1801 blk_start_request(req);
1802 __blk_end_request_all(req, -EIO);
1805 cqr = basedev->discipline->build_cp(basedev, block, req);
1807 if (PTR_ERR(cqr) == -EBUSY)
1808 break; /* normal end condition */
1809 if (PTR_ERR(cqr) == -ENOMEM)
1810 break; /* terminate request queue loop */
1811 if (PTR_ERR(cqr) == -EAGAIN) {
1813 * The current request cannot be build right
1814 * now, we have to try later. If this request
1815 * is the head-of-queue we stop the device
1818 if (!list_empty(&block->ccw_queue))
1821 get_ccwdev_lock(basedev->cdev), flags);
1822 dasd_device_set_stop_bits(basedev,
1823 DASD_STOPPED_PENDING);
1824 spin_unlock_irqrestore(
1825 get_ccwdev_lock(basedev->cdev), flags);
1826 dasd_block_set_timer(block, HZ/2);
1829 DBF_DEV_EVENT(DBF_ERR, basedev,
1830 "CCW creation failed (rc=%ld) "
1833 blk_start_request(req);
1834 __blk_end_request_all(req, -EIO);
1838 * Note: callback is set to dasd_return_cqr_cb in
1839 * __dasd_block_start_head to cover erp requests as well
1841 cqr->callback_data = (void *) req;
1842 cqr->status = DASD_CQR_FILLED;
1843 blk_start_request(req);
1844 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1845 dasd_profile_start(block, cqr, req);
1849 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1851 struct request *req;
1855 req = (struct request *) cqr->callback_data;
1856 dasd_profile_end(cqr->block, cqr, req);
1857 status = cqr->block->base->discipline->free_cp(cqr, req);
1859 error = status ? status : -EIO;
1860 __blk_end_request_all(req, error);
1864 * Process ccw request queue.
1866 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1867 struct list_head *final_queue)
1869 struct list_head *l, *n;
1870 struct dasd_ccw_req *cqr;
1871 dasd_erp_fn_t erp_fn;
1872 unsigned long flags;
1873 struct dasd_device *base = block->base;
1876 /* Process request with final status. */
1877 list_for_each_safe(l, n, &block->ccw_queue) {
1878 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1879 if (cqr->status != DASD_CQR_DONE &&
1880 cqr->status != DASD_CQR_FAILED &&
1881 cqr->status != DASD_CQR_NEED_ERP &&
1882 cqr->status != DASD_CQR_TERMINATED)
1885 if (cqr->status == DASD_CQR_TERMINATED) {
1886 base->discipline->handle_terminated_request(cqr);
1890 /* Process requests that may be recovered */
1891 if (cqr->status == DASD_CQR_NEED_ERP) {
1892 erp_fn = base->discipline->erp_action(cqr);
1897 /* log sense for fatal error */
1898 if (cqr->status == DASD_CQR_FAILED) {
1899 dasd_log_sense(cqr, &cqr->irb);
1902 /* First of all call extended error reporting. */
1903 if (dasd_eer_enabled(base) &&
1904 cqr->status == DASD_CQR_FAILED) {
1905 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1907 /* restart request */
1908 cqr->status = DASD_CQR_FILLED;
1910 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1911 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1912 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1917 /* Process finished ERP request. */
1919 __dasd_process_erp(base, cqr);
1923 /* Rechain finished requests to final queue */
1924 cqr->endclk = get_clock();
1925 list_move_tail(&cqr->blocklist, final_queue);
1929 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1931 dasd_schedule_block_bh(cqr->block);
1934 static void __dasd_block_start_head(struct dasd_block *block)
1936 struct dasd_ccw_req *cqr;
1938 if (list_empty(&block->ccw_queue))
1940 /* We allways begin with the first requests on the queue, as some
1941 * of previously started requests have to be enqueued on a
1942 * dasd_device again for error recovery.
1944 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1945 if (cqr->status != DASD_CQR_FILLED)
1947 /* Non-temporary stop condition will trigger fail fast */
1948 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1949 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1950 (!dasd_eer_enabled(block->base))) {
1951 cqr->status = DASD_CQR_FAILED;
1952 dasd_schedule_block_bh(block);
1955 /* Don't try to start requests if device is stopped */
1956 if (block->base->stopped)
1959 /* just a fail safe check, should not happen */
1961 cqr->startdev = block->base;
1963 /* make sure that the requests we submit find their way back */
1964 cqr->callback = dasd_return_cqr_cb;
1966 dasd_add_request_tail(cqr);
1971 * Central dasd_block layer routine. Takes requests from the generic
1972 * block layer request queue, creates ccw requests, enqueues them on
1973 * a dasd_device and processes ccw requests that have been returned.
1975 static void dasd_block_tasklet(struct dasd_block *block)
1977 struct list_head final_queue;
1978 struct list_head *l, *n;
1979 struct dasd_ccw_req *cqr;
1981 atomic_set(&block->tasklet_scheduled, 0);
1982 INIT_LIST_HEAD(&final_queue);
1983 spin_lock(&block->queue_lock);
1984 /* Finish off requests on ccw queue */
1985 __dasd_process_block_ccw_queue(block, &final_queue);
1986 spin_unlock(&block->queue_lock);
1987 /* Now call the callback function of requests with final status */
1988 spin_lock_irq(&block->request_queue_lock);
1989 list_for_each_safe(l, n, &final_queue) {
1990 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1991 list_del_init(&cqr->blocklist);
1992 __dasd_cleanup_cqr(cqr);
1994 spin_lock(&block->queue_lock);
1995 /* Get new request from the block device request queue */
1996 __dasd_process_request_queue(block);
1997 /* Now check if the head of the ccw queue needs to be started. */
1998 __dasd_block_start_head(block);
1999 spin_unlock(&block->queue_lock);
2000 spin_unlock_irq(&block->request_queue_lock);
2001 dasd_put_device(block->base);
2004 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2006 wake_up(&dasd_flush_wq);
2010 * Go through all request on the dasd_block request queue, cancel them
2011 * on the respective dasd_device, and return them to the generic
2014 static int dasd_flush_block_queue(struct dasd_block *block)
2016 struct dasd_ccw_req *cqr, *n;
2018 struct list_head flush_queue;
2020 INIT_LIST_HEAD(&flush_queue);
2021 spin_lock_bh(&block->queue_lock);
2024 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2025 /* if this request currently owned by a dasd_device cancel it */
2026 if (cqr->status >= DASD_CQR_QUEUED)
2027 rc = dasd_cancel_req(cqr);
2030 /* Rechain request (including erp chain) so it won't be
2031 * touched by the dasd_block_tasklet anymore.
2032 * Replace the callback so we notice when the request
2033 * is returned from the dasd_device layer.
2035 cqr->callback = _dasd_wake_block_flush_cb;
2036 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2037 list_move_tail(&cqr->blocklist, &flush_queue);
2039 /* moved more than one request - need to restart */
2042 spin_unlock_bh(&block->queue_lock);
2043 /* Now call the callback function of flushed requests */
2045 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2046 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2047 /* Process finished ERP request. */
2049 spin_lock_bh(&block->queue_lock);
2050 __dasd_process_erp(block->base, cqr);
2051 spin_unlock_bh(&block->queue_lock);
2052 /* restart list_for_xx loop since dasd_process_erp
2053 * might remove multiple elements */
2056 /* call the callback function */
2057 spin_lock_irq(&block->request_queue_lock);
2058 cqr->endclk = get_clock();
2059 list_del_init(&cqr->blocklist);
2060 __dasd_cleanup_cqr(cqr);
2061 spin_unlock_irq(&block->request_queue_lock);
2067 * Schedules a call to dasd_tasklet over the device tasklet.
2069 void dasd_schedule_block_bh(struct dasd_block *block)
2071 /* Protect against rescheduling. */
2072 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2074 /* life cycle of block is bound to it's base device */
2075 dasd_get_device(block->base);
2076 tasklet_hi_schedule(&block->tasklet);
2081 * SECTION: external block device operations
2082 * (request queue handling, open, release, etc.)
2086 * Dasd request queue function. Called from ll_rw_blk.c
2088 static void do_dasd_request(struct request_queue *queue)
2090 struct dasd_block *block;
2092 block = queue->queuedata;
2093 spin_lock(&block->queue_lock);
2094 /* Get new request from the block device request queue */
2095 __dasd_process_request_queue(block);
2096 /* Now check if the head of the ccw queue needs to be started. */
2097 __dasd_block_start_head(block);
2098 spin_unlock(&block->queue_lock);
2102 * Allocate and initialize request queue and default I/O scheduler.
2104 static int dasd_alloc_queue(struct dasd_block *block)
2108 block->request_queue = blk_init_queue(do_dasd_request,
2109 &block->request_queue_lock);
2110 if (block->request_queue == NULL)
2113 block->request_queue->queuedata = block;
2115 elevator_exit(block->request_queue->elevator);
2116 block->request_queue->elevator = NULL;
2117 rc = elevator_init(block->request_queue, "deadline");
2119 blk_cleanup_queue(block->request_queue);
2126 * Allocate and initialize request queue.
2128 static void dasd_setup_queue(struct dasd_block *block)
2132 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2133 max = block->base->discipline->max_blocks << block->s2b_shift;
2134 blk_queue_max_sectors(block->request_queue, max);
2135 blk_queue_max_phys_segments(block->request_queue, -1L);
2136 blk_queue_max_hw_segments(block->request_queue, -1L);
2137 /* with page sized segments we can translate each segement into
2140 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2141 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2142 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
2146 * Deactivate and free request queue.
2148 static void dasd_free_queue(struct dasd_block *block)
2150 if (block->request_queue) {
2151 blk_cleanup_queue(block->request_queue);
2152 block->request_queue = NULL;
2157 * Flush request on the request queue.
2159 static void dasd_flush_request_queue(struct dasd_block *block)
2161 struct request *req;
2163 if (!block->request_queue)
2166 spin_lock_irq(&block->request_queue_lock);
2167 while ((req = blk_fetch_request(block->request_queue)))
2168 __blk_end_request_all(req, -EIO);
2169 spin_unlock_irq(&block->request_queue_lock);
2172 static int dasd_open(struct block_device *bdev, fmode_t mode)
2174 struct dasd_block *block = bdev->bd_disk->private_data;
2175 struct dasd_device *base;
2182 atomic_inc(&block->open_count);
2183 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2188 if (!try_module_get(base->discipline->owner)) {
2193 if (dasd_probeonly) {
2194 dev_info(&base->cdev->dev,
2195 "Accessing the DASD failed because it is in "
2196 "probeonly mode\n");
2201 if (base->state <= DASD_STATE_BASIC) {
2202 DBF_DEV_EVENT(DBF_ERR, base, " %s",
2203 " Cannot open unrecognized device");
2211 module_put(base->discipline->owner);
2213 atomic_dec(&block->open_count);
2217 static int dasd_release(struct gendisk *disk, fmode_t mode)
2219 struct dasd_block *block = disk->private_data;
2221 atomic_dec(&block->open_count);
2222 module_put(block->base->discipline->owner);
2227 * Return disk geometry.
2229 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2231 struct dasd_block *block;
2232 struct dasd_device *base;
2234 block = bdev->bd_disk->private_data;
2239 if (!base->discipline ||
2240 !base->discipline->fill_geometry)
2243 base->discipline->fill_geometry(block, geo);
2244 geo->start = get_start_sect(bdev) >> block->s2b_shift;
2248 const struct block_device_operations
2249 dasd_device_operations = {
2250 .owner = THIS_MODULE,
2252 .release = dasd_release,
2253 .ioctl = dasd_ioctl,
2254 .compat_ioctl = dasd_ioctl,
2255 .getgeo = dasd_getgeo,
2258 /*******************************************************************************
2259 * end of block device operations
2265 #ifdef CONFIG_PROC_FS
2269 if (dasd_page_cache != NULL) {
2270 kmem_cache_destroy(dasd_page_cache);
2271 dasd_page_cache = NULL;
2273 dasd_gendisk_exit();
2275 if (dasd_debug_area != NULL) {
2276 debug_unregister(dasd_debug_area);
2277 dasd_debug_area = NULL;
2282 * SECTION: common functions for ccw_driver use
2285 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2287 struct ccw_device *cdev = data;
2290 ret = ccw_device_set_online(cdev);
2292 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2293 dev_name(&cdev->dev), ret);
2295 struct dasd_device *device = dasd_device_from_cdev(cdev);
2296 wait_event(dasd_init_waitq, _wait_for_device(device));
2297 dasd_put_device(device);
2302 * Initial attempt at a probe function. this can be simplified once
2303 * the other detection code is gone.
2305 int dasd_generic_probe(struct ccw_device *cdev,
2306 struct dasd_discipline *discipline)
2310 ret = dasd_add_sysfs_files(cdev);
2312 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2313 "dasd_generic_probe: could not add "
2317 cdev->handler = &dasd_int_handler;
2320 * Automatically online either all dasd devices (dasd_autodetect)
2321 * or all devices specified with dasd= parameters during
2324 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2325 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2326 async_schedule(dasd_generic_auto_online, cdev);
2331 * This will one day be called from a global not_oper handler.
2332 * It is also used by driver_unregister during module unload.
2334 void dasd_generic_remove(struct ccw_device *cdev)
2336 struct dasd_device *device;
2337 struct dasd_block *block;
2339 cdev->handler = NULL;
2341 dasd_remove_sysfs_files(cdev);
2342 device = dasd_device_from_cdev(cdev);
2345 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2346 /* Already doing offline processing */
2347 dasd_put_device(device);
2351 * This device is removed unconditionally. Set offline
2352 * flag to prevent dasd_open from opening it while it is
2353 * no quite down yet.
2355 dasd_set_target_state(device, DASD_STATE_NEW);
2356 /* dasd_delete_device destroys the device reference. */
2357 block = device->block;
2358 device->block = NULL;
2359 dasd_delete_device(device);
2361 * life cycle of block is bound to device, so delete it after
2362 * device was safely removed
2365 dasd_free_block(block);
2369 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2370 * the device is detected for the first time and is supposed to be used
2371 * or the user has started activation through sysfs.
2373 int dasd_generic_set_online(struct ccw_device *cdev,
2374 struct dasd_discipline *base_discipline)
2376 struct dasd_discipline *discipline;
2377 struct dasd_device *device;
2380 /* first online clears initial online feature flag */
2381 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2382 device = dasd_create_device(cdev);
2384 return PTR_ERR(device);
2386 discipline = base_discipline;
2387 if (device->features & DASD_FEATURE_USEDIAG) {
2388 if (!dasd_diag_discipline_pointer) {
2389 pr_warning("%s Setting the DASD online failed because "
2390 "of missing DIAG discipline\n",
2391 dev_name(&cdev->dev));
2392 dasd_delete_device(device);
2395 discipline = dasd_diag_discipline_pointer;
2397 if (!try_module_get(base_discipline->owner)) {
2398 dasd_delete_device(device);
2401 if (!try_module_get(discipline->owner)) {
2402 module_put(base_discipline->owner);
2403 dasd_delete_device(device);
2406 device->base_discipline = base_discipline;
2407 device->discipline = discipline;
2409 /* check_device will allocate block device if necessary */
2410 rc = discipline->check_device(device);
2412 pr_warning("%s Setting the DASD online with discipline %s "
2413 "failed with rc=%i\n",
2414 dev_name(&cdev->dev), discipline->name, rc);
2415 module_put(discipline->owner);
2416 module_put(base_discipline->owner);
2417 dasd_delete_device(device);
2421 dasd_set_target_state(device, DASD_STATE_ONLINE);
2422 if (device->state <= DASD_STATE_KNOWN) {
2423 pr_warning("%s Setting the DASD online failed because of a "
2424 "missing discipline\n", dev_name(&cdev->dev));
2426 dasd_set_target_state(device, DASD_STATE_NEW);
2428 dasd_free_block(device->block);
2429 dasd_delete_device(device);
2431 pr_debug("dasd_generic device %s found\n",
2432 dev_name(&cdev->dev));
2433 dasd_put_device(device);
2437 int dasd_generic_set_offline(struct ccw_device *cdev)
2439 struct dasd_device *device;
2440 struct dasd_block *block;
2441 int max_count, open_count;
2443 device = dasd_device_from_cdev(cdev);
2445 return PTR_ERR(device);
2446 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2447 /* Already doing offline processing */
2448 dasd_put_device(device);
2452 * We must make sure that this device is currently not in use.
2453 * The open_count is increased for every opener, that includes
2454 * the blkdev_get in dasd_scan_partitions. We are only interested
2455 * in the other openers.
2457 if (device->block) {
2458 max_count = device->block->bdev ? 0 : -1;
2459 open_count = atomic_read(&device->block->open_count);
2460 if (open_count > max_count) {
2462 pr_warning("%s: The DASD cannot be set offline "
2463 "with open count %i\n",
2464 dev_name(&cdev->dev), open_count);
2466 pr_warning("%s: The DASD cannot be set offline "
2467 "while it is in use\n",
2468 dev_name(&cdev->dev));
2469 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2470 dasd_put_device(device);
2474 dasd_set_target_state(device, DASD_STATE_NEW);
2475 /* dasd_delete_device destroys the device reference. */
2476 block = device->block;
2477 device->block = NULL;
2478 dasd_delete_device(device);
2480 * life cycle of block is bound to device, so delete it after
2481 * device was safely removed
2484 dasd_free_block(block);
2488 int dasd_generic_notify(struct ccw_device *cdev, int event)
2490 struct dasd_device *device;
2491 struct dasd_ccw_req *cqr;
2494 device = dasd_device_from_cdev_locked(cdev);
2502 /* First of all call extended error reporting. */
2503 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2505 if (device->state < DASD_STATE_BASIC)
2507 /* Device is active. We want to keep it. */
2508 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2509 if (cqr->status == DASD_CQR_IN_IO) {
2510 cqr->status = DASD_CQR_QUEUED;
2513 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2514 dasd_device_clear_timer(device);
2515 dasd_schedule_device_bh(device);
2519 /* FIXME: add a sanity check. */
2520 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2521 if (device->stopped & DASD_UNRESUMED_PM) {
2522 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2523 dasd_restore_device(device);
2527 dasd_schedule_device_bh(device);
2529 dasd_schedule_block_bh(device->block);
2533 dasd_put_device(device);
2537 int dasd_generic_pm_freeze(struct ccw_device *cdev)
2539 struct dasd_ccw_req *cqr, *n;
2541 struct list_head freeze_queue;
2542 struct dasd_device *device = dasd_device_from_cdev(cdev);
2545 return PTR_ERR(device);
2546 /* disallow new I/O */
2547 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2548 /* clear active requests */
2549 INIT_LIST_HEAD(&freeze_queue);
2550 spin_lock_irq(get_ccwdev_lock(cdev));
2552 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2553 /* Check status and move request to flush_queue */
2554 if (cqr->status == DASD_CQR_IN_IO) {
2555 rc = device->discipline->term_IO(cqr);
2557 /* unable to terminate requeust */
2558 dev_err(&device->cdev->dev,
2559 "Unable to terminate request %p "
2560 "on suspend\n", cqr);
2561 spin_unlock_irq(get_ccwdev_lock(cdev));
2562 dasd_put_device(device);
2566 list_move_tail(&cqr->devlist, &freeze_queue);
2569 spin_unlock_irq(get_ccwdev_lock(cdev));
2571 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2572 wait_event(dasd_flush_wq,
2573 (cqr->status != DASD_CQR_CLEAR_PENDING));
2574 if (cqr->status == DASD_CQR_CLEARED)
2575 cqr->status = DASD_CQR_QUEUED;
2577 /* move freeze_queue to start of the ccw_queue */
2578 spin_lock_irq(get_ccwdev_lock(cdev));
2579 list_splice_tail(&freeze_queue, &device->ccw_queue);
2580 spin_unlock_irq(get_ccwdev_lock(cdev));
2582 if (device->discipline->freeze)
2583 rc = device->discipline->freeze(device);
2585 dasd_put_device(device);
2588 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2590 int dasd_generic_restore_device(struct ccw_device *cdev)
2592 struct dasd_device *device = dasd_device_from_cdev(cdev);
2596 return PTR_ERR(device);
2598 /* allow new IO again */
2599 dasd_device_remove_stop_bits(device,
2600 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2602 dasd_schedule_device_bh(device);
2605 * call discipline restore function
2606 * if device is stopped do nothing e.g. for disconnected devices
2608 if (device->discipline->restore && !(device->stopped))
2609 rc = device->discipline->restore(device);
2610 if (rc || device->stopped)
2612 * if the resume failed for the DASD we put it in
2613 * an UNRESUMED stop state
2615 device->stopped |= DASD_UNRESUMED_PM;
2618 dasd_schedule_block_bh(device->block);
2620 dasd_put_device(device);
2623 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2625 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2627 int rdc_buffer_size,
2630 struct dasd_ccw_req *cqr;
2632 unsigned long *idaw;
2634 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2637 /* internal error 13 - Allocating the RDC request failed*/
2638 dev_err(&device->cdev->dev,
2639 "An error occurred in the DASD device driver, "
2640 "reason=%s\n", "13");
2645 ccw->cmd_code = CCW_CMD_RDC;
2646 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2647 idaw = (unsigned long *) (cqr->data);
2648 ccw->cda = (__u32)(addr_t) idaw;
2649 ccw->flags = CCW_FLAG_IDA;
2650 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2652 ccw->cda = (__u32)(addr_t) rdc_buffer;
2656 ccw->count = rdc_buffer_size;
2657 cqr->startdev = device;
2658 cqr->memdev = device;
2659 cqr->expires = 10*HZ;
2661 cqr->buildclk = get_clock();
2662 cqr->status = DASD_CQR_FILLED;
2667 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2668 void *rdc_buffer, int rdc_buffer_size)
2671 struct dasd_ccw_req *cqr;
2673 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2676 return PTR_ERR(cqr);
2678 ret = dasd_sleep_on(cqr);
2679 dasd_sfree_request(cqr, cqr->memdev);
2682 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2685 * In command mode and transport mode we need to look for sense
2686 * data in different places. The sense data itself is allways
2687 * an array of 32 bytes, so we can unify the sense data access
2690 char *dasd_get_sense(struct irb *irb)
2692 struct tsb *tsb = NULL;
2695 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2696 if (irb->scsw.tm.tcw)
2697 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2699 if (tsb && tsb->length == 64 && tsb->flags)
2700 switch (tsb->flags & 0x07) {
2701 case 1: /* tsa_iostat */
2702 sense = tsb->tsa.iostat.sense;
2704 case 2: /* tsa_ddpc */
2705 sense = tsb->tsa.ddpc.sense;
2708 /* currently we don't use interrogate data */
2711 } else if (irb->esw.esw0.erw.cons) {
2716 EXPORT_SYMBOL_GPL(dasd_get_sense);
2718 static int __init dasd_init(void)
2722 init_waitqueue_head(&dasd_init_waitq);
2723 init_waitqueue_head(&dasd_flush_wq);
2724 init_waitqueue_head(&generic_waitq);
2726 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2727 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2728 if (dasd_debug_area == NULL) {
2732 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2733 debug_set_level(dasd_debug_area, DBF_WARNING);
2735 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2737 dasd_diag_discipline_pointer = NULL;
2739 rc = dasd_devmap_init();
2742 rc = dasd_gendisk_init();
2748 rc = dasd_eer_init();
2751 #ifdef CONFIG_PROC_FS
2752 rc = dasd_proc_init();
2759 pr_info("The DASD device driver could not be initialized\n");
2764 module_init(dasd_init);
2765 module_exit(dasd_exit);
2767 EXPORT_SYMBOL(dasd_debug_area);
2768 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2770 EXPORT_SYMBOL(dasd_add_request_head);
2771 EXPORT_SYMBOL(dasd_add_request_tail);
2772 EXPORT_SYMBOL(dasd_cancel_req);
2773 EXPORT_SYMBOL(dasd_device_clear_timer);
2774 EXPORT_SYMBOL(dasd_block_clear_timer);
2775 EXPORT_SYMBOL(dasd_enable_device);
2776 EXPORT_SYMBOL(dasd_int_handler);
2777 EXPORT_SYMBOL(dasd_kfree_request);
2778 EXPORT_SYMBOL(dasd_kick_device);
2779 EXPORT_SYMBOL(dasd_kmalloc_request);
2780 EXPORT_SYMBOL(dasd_schedule_device_bh);
2781 EXPORT_SYMBOL(dasd_schedule_block_bh);
2782 EXPORT_SYMBOL(dasd_set_target_state);
2783 EXPORT_SYMBOL(dasd_device_set_timer);
2784 EXPORT_SYMBOL(dasd_block_set_timer);
2785 EXPORT_SYMBOL(dasd_sfree_request);
2786 EXPORT_SYMBOL(dasd_sleep_on);
2787 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2788 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2789 EXPORT_SYMBOL(dasd_smalloc_request);
2790 EXPORT_SYMBOL(dasd_start_IO);
2791 EXPORT_SYMBOL(dasd_term_IO);
2793 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2794 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2795 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2796 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2797 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2798 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2799 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2800 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2801 EXPORT_SYMBOL_GPL(dasd_free_block);