]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/s390/char/vmur.c
[S390] qdio: Refresh buffer states for IQDIO Asynchronous output queue
[net-next-2.6.git] / drivers / s390 / char / vmur.c
CommitLineData
810cb5b3
FM
1/*
2 * Linux driver for System z and s390 unit record devices
3 * (z/VM virtual punch, reader, printer)
4 *
5 * Copyright IBM Corp. 2001, 2007
6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Frank Munzert <munzert@de.ibm.com>
9 */
10
11#include <linux/cdev.h>
12
13#include <asm/uaccess.h>
14#include <asm/cio.h>
15#include <asm/ccwdev.h>
16#include <asm/debug.h>
17
18#include "vmur.h"
19
20/*
21 * Driver overview
22 *
23 * Unit record device support is implemented as a character device driver.
24 * We can fit at least 16 bits into a device minor number and use the
25 * simple method of mapping a character device number with minor abcd
26 * to the unit record device with devno abcd.
27 * I/O to virtual unit record devices is handled as follows:
28 * Reads: Diagnose code 0x14 (input spool file manipulation)
29 * is used to read spool data page-wise.
30 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
31 * is available by reading sysfs attr reclen. Each write() to the device
32 * must specify an integral multiple (maximal 511) of reclen.
33 */
34
35static char ur_banner[] = "z/VM virtual unit record device driver";
36
37MODULE_AUTHOR("IBM Corporation");
38MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
39MODULE_LICENSE("GPL");
40
41#define PRINTK_HEADER "vmur: "
42
43static dev_t ur_first_dev_maj_min;
44static struct class *vmur_class;
45static struct debug_info *vmur_dbf;
46
47/* We put the device's record length (for writes) in the driver_info field */
48static struct ccw_device_id ur_ids[] = {
49 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
50 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
51 { /* end of list */ }
52};
53
54MODULE_DEVICE_TABLE(ccw, ur_ids);
55
56static int ur_probe(struct ccw_device *cdev);
57static void ur_remove(struct ccw_device *cdev);
58static int ur_set_online(struct ccw_device *cdev);
59static int ur_set_offline(struct ccw_device *cdev);
60
61static struct ccw_driver ur_driver = {
62 .name = "vmur",
63 .owner = THIS_MODULE,
64 .ids = ur_ids,
65 .probe = ur_probe,
66 .remove = ur_remove,
67 .set_online = ur_set_online,
68 .set_offline = ur_set_offline,
69};
70
71/*
72 * Allocation, freeing, getting and putting of urdev structures
73 */
74static struct urdev *urdev_alloc(struct ccw_device *cdev)
75{
76 struct urdev *urd;
77
78 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
79 if (!urd)
80 return NULL;
81 urd->cdev = cdev;
82 urd->reclen = cdev->id.driver_info;
83 ccw_device_get_id(cdev, &urd->dev_id);
84 mutex_init(&urd->io_mutex);
85 mutex_init(&urd->open_mutex);
86 return urd;
87}
88
89static void urdev_free(struct urdev *urd)
90{
91 kfree(urd);
92}
93
94/*
95 * This is how the character device driver gets a reference to a
96 * ur device. When this call returns successfully, a reference has
97 * been taken (by get_device) on the underlying kobject. The recipient
98 * of this urdev pointer must eventually drop it with urdev_put(urd)
99 * which does the corresponding put_device().
100 */
101static struct urdev *urdev_get_from_devno(u16 devno)
102{
103 char bus_id[16];
104 struct ccw_device *cdev;
105
106 sprintf(bus_id, "0.0.%04x", devno);
107 cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
108 if (!cdev)
109 return NULL;
110
111 return cdev->dev.driver_data;
112}
113
114static void urdev_put(struct urdev *urd)
115{
116 put_device(&urd->cdev->dev);
117}
118
119/*
120 * Low-level functions to do I/O to a ur device.
121 * alloc_chan_prog
1eade380 122 * free_chan_prog
810cb5b3
FM
123 * do_ur_io
124 * ur_int_handler
125 *
126 * alloc_chan_prog allocates and builds the channel program
1eade380 127 * free_chan_prog frees memory of the channel program
810cb5b3
FM
128 *
129 * do_ur_io issues the channel program to the device and blocks waiting
130 * on a completion event it publishes at urd->io_done. The function
131 * serialises itself on the device's mutex so that only one I/O
132 * is issued at a time (and that I/O is synchronous).
133 *
134 * ur_int_handler catches the "I/O done" interrupt, writes the
135 * subchannel status word into the scsw member of the urdev structure
136 * and complete()s the io_done to wake the waiting do_ur_io.
137 *
138 * The caller of do_ur_io is responsible for kfree()ing the channel program
139 * address pointer that alloc_chan_prog returned.
140 */
141
1eade380
MH
142static void free_chan_prog(struct ccw1 *cpa)
143{
144 struct ccw1 *ptr = cpa;
145
146 while (ptr->cda) {
147 kfree((void *)(addr_t) ptr->cda);
148 ptr++;
149 }
150 kfree(cpa);
151}
810cb5b3
FM
152
153/*
154 * alloc_chan_prog
155 * The channel program we use is write commands chained together
156 * with a final NOP CCW command-chained on (which ensures that CE and DE
157 * are presented together in a single interrupt instead of as separate
158 * interrupts unless an incorrect length indication kicks in first). The
1eade380 159 * data length in each CCW is reclen.
810cb5b3 160 */
1eade380
MH
161static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
162 int reclen)
810cb5b3 163{
810cb5b3 164 struct ccw1 *cpa;
1eade380 165 void *kbuf;
810cb5b3
FM
166 int i;
167
1eade380 168 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
810cb5b3
FM
169
170 /*
171 * We chain a NOP onto the writes to force CE+DE together.
172 * That means we allocate room for CCWs to cover count/reclen
173 * records plus a NOP.
174 */
1eade380
MH
175 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
176 GFP_KERNEL | GFP_DMA);
810cb5b3 177 if (!cpa)
1eade380 178 return ERR_PTR(-ENOMEM);
810cb5b3 179
1eade380 180 for (i = 0; i < rec_count; i++) {
810cb5b3
FM
181 cpa[i].cmd_code = WRITE_CCW_CMD;
182 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
183 cpa[i].count = reclen;
1eade380
MH
184 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
185 if (!kbuf) {
186 free_chan_prog(cpa);
187 return ERR_PTR(-ENOMEM);
188 }
189 cpa[i].cda = (u32)(addr_t) kbuf;
190 if (copy_from_user(kbuf, ubuf, reclen)) {
191 free_chan_prog(cpa);
192 return ERR_PTR(-EFAULT);
193 }
194 ubuf += reclen;
810cb5b3
FM
195 }
196 /* The following NOP CCW forces CE+DE to be presented together */
197 cpa[i].cmd_code = CCW_CMD_NOOP;
810cb5b3
FM
198 return cpa;
199}
200
201static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
202{
203 int rc;
204 struct ccw_device *cdev = urd->cdev;
278bc68c 205 DECLARE_COMPLETION_ONSTACK(event);
810cb5b3
FM
206
207 TRACE("do_ur_io: cpa=%p\n", cpa);
208
209 rc = mutex_lock_interruptible(&urd->io_mutex);
210 if (rc)
211 return rc;
212
213 urd->io_done = &event;
214
215 spin_lock_irq(get_ccwdev_lock(cdev));
216 rc = ccw_device_start(cdev, cpa, 1, 0, 0);
217 spin_unlock_irq(get_ccwdev_lock(cdev));
218
219 TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
220 if (rc)
221 goto out;
222
223 wait_for_completion(&event);
224 TRACE("do_ur_io: I/O complete\n");
225 rc = 0;
226
227out:
228 mutex_unlock(&urd->io_mutex);
229 return rc;
230}
231
232/*
233 * ur interrupt handler, called from the ccw_device layer
234 */
235static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
236 struct irb *irb)
237{
238 struct urdev *urd;
239
240 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
241 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count);
242
243 if (!intparm) {
244 TRACE("ur_int_handler: unsolicited interrupt\n");
245 return;
246 }
247 urd = cdev->dev.driver_data;
248 /* On special conditions irb is an error pointer */
249 if (IS_ERR(irb))
250 urd->io_request_rc = PTR_ERR(irb);
251 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
252 urd->io_request_rc = 0;
253 else
254 urd->io_request_rc = -EIO;
255
256 complete(urd->io_done);
257}
258
259/*
260 * reclen sysfs attribute - The record length to be used for write CCWs
261 */
262static ssize_t ur_attr_reclen_show(struct device *dev,
263 struct device_attribute *attr, char *buf)
264{
265 struct urdev *urd = dev->driver_data;
266
267 return sprintf(buf, "%zu\n", urd->reclen);
268}
269
270static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
271
272static int ur_create_attributes(struct device *dev)
273{
274 return device_create_file(dev, &dev_attr_reclen);
275}
276
277static void ur_remove_attributes(struct device *dev)
278{
279 device_remove_file(dev, &dev_attr_reclen);
280}
281
282/*
283 * diagnose code 0x210 - retrieve device information
284 * cc=0 normal completion, we have a real device
285 * cc=1 CP paging error
286 * cc=2 The virtual device exists, but is not associated with a real device
287 * cc=3 Invalid device address, or the virtual device does not exist
288 */
289static int get_urd_class(struct urdev *urd)
290{
291 static struct diag210 ur_diag210;
292 int cc;
293
294 ur_diag210.vrdcdvno = urd->dev_id.devno;
295 ur_diag210.vrdclen = sizeof(struct diag210);
296
297 cc = diag210(&ur_diag210);
298 switch (cc) {
299 case 0:
300 return -ENOTSUPP;
301 case 2:
302 return ur_diag210.vrdcvcla; /* virtual device class */
303 case 3:
304 return -ENODEV;
305 default:
306 return -EIO;
307 }
308}
309
310/*
311 * Allocation and freeing of urfile structures
312 */
313static struct urfile *urfile_alloc(struct urdev *urd)
314{
315 struct urfile *urf;
316
317 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
318 if (!urf)
319 return NULL;
320 urf->urd = urd;
321
322 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
323 urf->dev_reclen);
324
325 return urf;
326}
327
328static void urfile_free(struct urfile *urf)
329{
330 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
331 kfree(urf);
332}
333
334/*
335 * The fops implementation of the character device driver
336 */
337static ssize_t do_write(struct urdev *urd, const char __user *udata,
338 size_t count, size_t reclen, loff_t *ppos)
339{
340 struct ccw1 *cpa;
810cb5b3
FM
341 int rc;
342
1eade380
MH
343 cpa = alloc_chan_prog(udata, count / reclen, reclen);
344 if (IS_ERR(cpa))
345 return PTR_ERR(cpa);
810cb5b3
FM
346
347 rc = do_ur_io(urd, cpa);
348 if (rc)
349 goto fail_kfree_cpa;
350
351 if (urd->io_request_rc) {
352 rc = urd->io_request_rc;
353 goto fail_kfree_cpa;
354 }
355 *ppos += count;
356 rc = count;
1eade380 357
810cb5b3 358fail_kfree_cpa:
1eade380 359 free_chan_prog(cpa);
810cb5b3
FM
360 return rc;
361}
362
363static ssize_t ur_write(struct file *file, const char __user *udata,
364 size_t count, loff_t *ppos)
365{
366 struct urfile *urf = file->private_data;
367
368 TRACE("ur_write: count=%zu\n", count);
369
370 if (count == 0)
371 return 0;
372
373 if (count % urf->dev_reclen)
374 return -EINVAL; /* count must be a multiple of reclen */
375
376 if (count > urf->dev_reclen * MAX_RECS_PER_IO)
377 count = urf->dev_reclen * MAX_RECS_PER_IO;
378
379 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
380}
381
382static int do_diag_14(unsigned long rx, unsigned long ry1,
383 unsigned long subcode)
384{
385 register unsigned long _ry1 asm("2") = ry1;
386 register unsigned long _ry2 asm("3") = subcode;
387 int rc = 0;
388
389 asm volatile(
390#ifdef CONFIG_64BIT
391 " sam31\n"
392 " diag %2,2,0x14\n"
393 " sam64\n"
394#else
395 " diag %2,2,0x14\n"
396#endif
397 " ipm %0\n"
398 " srl %0,28\n"
399 : "=d" (rc), "+d" (_ry2)
400 : "d" (rx), "d" (_ry1)
401 : "cc");
402
403 TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
404 return rc;
405}
406
407/*
408 * diagnose code 0x14 subcode 0x0028 - position spool file to designated
409 * record
410 * cc=0 normal completion
411 * cc=2 no file active on the virtual reader or device not ready
412 * cc=3 record specified is beyond EOF
413 */
414static int diag_position_to_record(int devno, int record)
415{
416 int cc;
417
418 cc = do_diag_14(record, devno, 0x28);
419 switch (cc) {
420 case 0:
421 return 0;
422 case 2:
423 return -ENOMEDIUM;
424 case 3:
425 return -ENODATA; /* position beyond end of file */
426 default:
427 return -EIO;
428 }
429}
430
431/*
432 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
433 * cc=0 normal completion
434 * cc=1 EOF reached
435 * cc=2 no file active on the virtual reader, and no file eligible
436 * cc=3 file already active on the virtual reader or specified virtual
437 * reader does not exist or is not a reader
438 */
439static int diag_read_file(int devno, char *buf)
440{
441 int cc;
442
443 cc = do_diag_14((unsigned long) buf, devno, 0x00);
444 switch (cc) {
445 case 0:
446 return 0;
447 case 1:
448 return -ENODATA;
449 case 2:
450 return -ENOMEDIUM;
451 default:
452 return -EIO;
453 }
454}
455
456static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
457 loff_t *offs)
458{
459 size_t len, copied, res;
460 char *buf;
461 int rc;
462 u16 reclen;
463 struct urdev *urd;
464
465 urd = ((struct urfile *) file->private_data)->urd;
466 reclen = ((struct urfile *) file->private_data)->file_reclen;
467
468 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
469 if (rc == -ENODATA)
470 return 0;
471 if (rc)
472 return rc;
473
474 len = min((size_t) PAGE_SIZE, count);
3eed13cc 475 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
810cb5b3
FM
476 if (!buf)
477 return -ENOMEM;
478
479 copied = 0;
480 res = (size_t) (*offs % PAGE_SIZE);
481 do {
482 rc = diag_read_file(urd->dev_id.devno, buf);
483 if (rc == -ENODATA) {
484 break;
485 }
486 if (rc)
487 goto fail;
2b3d8c9e 488 if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
810cb5b3
FM
489 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
490 len = min(count - copied, PAGE_SIZE - res);
491 if (copy_to_user(ubuf + copied, buf + res, len)) {
492 rc = -EFAULT;
493 goto fail;
494 }
495 res = 0;
496 copied += len;
497 } while (copied != count);
498
499 *offs += copied;
500 rc = copied;
501fail:
3eed13cc 502 free_page((unsigned long) buf);
810cb5b3
FM
503 return rc;
504}
505
506static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
507 loff_t *offs)
508{
509 struct urdev *urd;
510 int rc;
511
512 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
513
514 if (count == 0)
515 return 0;
516
517 urd = ((struct urfile *) file->private_data)->urd;
518 rc = mutex_lock_interruptible(&urd->io_mutex);
519 if (rc)
520 return rc;
521 rc = diag14_read(file, ubuf, count, offs);
522 mutex_unlock(&urd->io_mutex);
523 return rc;
524}
525
526/*
527 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
528 * cc=0 normal completion
529 * cc=1 no files on reader queue or no subsequent file
530 * cc=2 spid specified is invalid
531 */
532static int diag_read_next_file_info(struct file_control_block *buf, int spid)
533{
534 int cc;
535
536 cc = do_diag_14((unsigned long) buf, spid, 0xfff);
537 switch (cc) {
538 case 0:
539 return 0;
540 default:
541 return -ENODATA;
542 }
543}
544
3eed13cc 545static int verify_uri_device(struct urdev *urd)
810cb5b3 546{
3eed13cc 547 struct file_control_block *fcb;
810cb5b3
FM
548 char *buf;
549 int rc;
550
3eed13cc
MH
551 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
552 if (!fcb)
553 return -ENOMEM;
554
555 /* check for empty reader device (beginning of chain) */
556 rc = diag_read_next_file_info(fcb, 0);
557 if (rc)
558 goto fail_free_fcb;
559
560 /* if file is in hold status, we do not read it */
561 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
562 rc = -EPERM;
563 goto fail_free_fcb;
564 }
565
566 /* open file on virtual reader */
567 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
568 if (!buf) {
569 rc = -ENOMEM;
570 goto fail_free_fcb;
571 }
572 rc = diag_read_file(urd->dev_id.devno, buf);
573 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
574 goto fail_free_buf;
575
576 /* check if the file on top of the queue is open now */
577 rc = diag_read_next_file_info(fcb, 0);
578 if (rc)
579 goto fail_free_buf;
580 if (!(fcb->file_stat & FLG_IN_USE)) {
581 rc = -EMFILE;
582 goto fail_free_buf;
583 }
584 rc = 0;
585
586fail_free_buf:
587 free_page((unsigned long) buf);
588fail_free_fcb:
589 kfree(fcb);
590 return rc;
591}
592
593static int verify_device(struct urdev *urd)
594{
810cb5b3
FM
595 switch (urd->class) {
596 case DEV_CLASS_UR_O:
597 return 0; /* no check needed here */
598 case DEV_CLASS_UR_I:
3eed13cc 599 return verify_uri_device(urd);
810cb5b3
FM
600 default:
601 return -ENOTSUPP;
602 }
603}
604
3eed13cc 605static int get_uri_file_reclen(struct urdev *urd)
810cb5b3 606{
3eed13cc 607 struct file_control_block *fcb;
810cb5b3
FM
608 int rc;
609
3eed13cc
MH
610 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
611 if (!fcb)
612 return -ENOMEM;
613 rc = diag_read_next_file_info(fcb, 0);
614 if (rc)
615 goto fail_free;
616 if (fcb->file_stat & FLG_CP_DUMP)
617 rc = 0;
618 else
619 rc = fcb->rec_len;
620
621fail_free:
622 kfree(fcb);
623 return rc;
624}
625
626static int get_file_reclen(struct urdev *urd)
627{
810cb5b3
FM
628 switch (urd->class) {
629 case DEV_CLASS_UR_O:
630 return 0;
631 case DEV_CLASS_UR_I:
3eed13cc 632 return get_uri_file_reclen(urd);
810cb5b3
FM
633 default:
634 return -ENOTSUPP;
635 }
810cb5b3
FM
636}
637
638static int ur_open(struct inode *inode, struct file *file)
639{
640 u16 devno;
641 struct urdev *urd;
642 struct urfile *urf;
643 unsigned short accmode;
644 int rc;
645
646 accmode = file->f_flags & O_ACCMODE;
647
648 if (accmode == O_RDWR)
649 return -EACCES;
650
651 /*
652 * We treat the minor number as the devno of the ur device
653 * to find in the driver tree.
654 */
655 devno = MINOR(file->f_dentry->d_inode->i_rdev);
656
657 urd = urdev_get_from_devno(devno);
658 if (!urd)
659 return -ENXIO;
660
661 if (file->f_flags & O_NONBLOCK) {
662 if (!mutex_trylock(&urd->open_mutex)) {
663 rc = -EBUSY;
664 goto fail_put;
665 }
666 } else {
667 if (mutex_lock_interruptible(&urd->open_mutex)) {
668 rc = -ERESTARTSYS;
669 goto fail_put;
670 }
671 }
672
673 TRACE("ur_open\n");
674
675 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
676 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
677 TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
678 rc = -EACCES;
679 goto fail_unlock;
680 }
681
682 rc = verify_device(urd);
683 if (rc)
684 goto fail_unlock;
685
686 urf = urfile_alloc(urd);
687 if (!urf) {
688 rc = -ENOMEM;
689 goto fail_unlock;
690 }
691
692 urf->dev_reclen = urd->reclen;
693 rc = get_file_reclen(urd);
694 if (rc < 0)
695 goto fail_urfile_free;
696 urf->file_reclen = rc;
697 file->private_data = urf;
698 return 0;
699
700fail_urfile_free:
701 urfile_free(urf);
702fail_unlock:
703 mutex_unlock(&urd->open_mutex);
704fail_put:
705 urdev_put(urd);
706 return rc;
707}
708
709static int ur_release(struct inode *inode, struct file *file)
710{
711 struct urfile *urf = file->private_data;
712
713 TRACE("ur_release\n");
714 mutex_unlock(&urf->urd->open_mutex);
715 urdev_put(urf->urd);
716 urfile_free(urf);
717 return 0;
718}
719
720static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
721{
722 loff_t newpos;
723
724 if ((file->f_flags & O_ACCMODE) != O_RDONLY)
725 return -ESPIPE; /* seek allowed only for reader */
726 if (offset % PAGE_SIZE)
727 return -ESPIPE; /* only multiples of 4K allowed */
728 switch (whence) {
729 case 0: /* SEEK_SET */
730 newpos = offset;
731 break;
732 case 1: /* SEEK_CUR */
733 newpos = file->f_pos + offset;
734 break;
735 default:
736 return -EINVAL;
737 }
738 file->f_pos = newpos;
739 return newpos;
740}
741
742static struct file_operations ur_fops = {
743 .owner = THIS_MODULE,
744 .open = ur_open,
745 .release = ur_release,
746 .read = ur_read,
747 .write = ur_write,
748 .llseek = ur_llseek,
749};
750
751/*
752 * ccw_device infrastructure:
753 * ur_probe gets its own ref to the device (i.e. get_device),
754 * creates the struct urdev, the device attributes, sets up
755 * the interrupt handler and validates the virtual unit record device.
756 * ur_remove removes the device attributes, frees the struct urdev
757 * and drops (put_device) the ref to the device we got in ur_probe.
758 */
759static int ur_probe(struct ccw_device *cdev)
760{
761 struct urdev *urd;
762 int rc;
763
764 TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private);
765
766 if (!get_device(&cdev->dev))
767 return -ENODEV;
768
769 urd = urdev_alloc(cdev);
770 if (!urd) {
771 rc = -ENOMEM;
772 goto fail;
773 }
774 rc = ur_create_attributes(&cdev->dev);
775 if (rc) {
776 rc = -ENOMEM;
777 goto fail;
778 }
779 cdev->dev.driver_data = urd;
780 cdev->handler = ur_int_handler;
781
782 /* validate virtual unit record device */
783 urd->class = get_urd_class(urd);
784 if (urd->class < 0) {
785 rc = urd->class;
786 goto fail;
787 }
788 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
789 rc = -ENOTSUPP;
790 goto fail;
791 }
792
793 return 0;
794
795fail:
796 urdev_free(urd);
797 put_device(&cdev->dev);
798 return rc;
799}
800
801static void ur_remove(struct ccw_device *cdev)
802{
803 struct urdev *urd = cdev->dev.driver_data;
804
805 TRACE("ur_remove\n");
806 if (cdev->online)
807 ur_set_offline(cdev);
808 ur_remove_attributes(&cdev->dev);
809 urdev_free(urd);
810 put_device(&cdev->dev);
811}
812
813static int ur_set_online(struct ccw_device *cdev)
814{
815 struct urdev *urd;
816 int minor, major, rc;
817 char node_id[16];
818
819 TRACE("ur_set_online: cdev=%p state=%d\n", cdev,
820 *(int *) cdev->private);
821
822 if (!try_module_get(ur_driver.owner))
823 return -EINVAL;
824
825 urd = (struct urdev *) cdev->dev.driver_data;
826 minor = urd->dev_id.devno;
827 major = MAJOR(ur_first_dev_maj_min);
828
829 urd->char_device = cdev_alloc();
830 if (!urd->char_device) {
831 rc = -ENOMEM;
832 goto fail_module_put;
833 }
834
835 cdev_init(urd->char_device, &ur_fops);
836 urd->char_device->dev = MKDEV(major, minor);
837 urd->char_device->owner = ur_fops.owner;
838
839 rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
840 if (rc)
841 goto fail_free_cdev;
842 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
843 if (urd->class == DEV_CLASS_UR_I)
844 sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id);
845 if (urd->class == DEV_CLASS_UR_O)
846 sprintf(node_id, "vmpun-%s", cdev->dev.bus_id);
847 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
848 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
849 } else {
850 rc = -ENOTSUPP;
851 goto fail_free_cdev;
852 }
853
854 urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
855 "%s", node_id);
856 if (IS_ERR(urd->device)) {
857 rc = PTR_ERR(urd->device);
858 TRACE("ur_set_online: device_create rc=%d\n", rc);
859 goto fail_free_cdev;
860 }
861
862 return 0;
863
864fail_free_cdev:
865 cdev_del(urd->char_device);
866fail_module_put:
867 module_put(ur_driver.owner);
868
869 return rc;
870}
871
872static int ur_set_offline(struct ccw_device *cdev)
873{
874 struct urdev *urd;
875
876 TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n",
877 cdev, cdev->private, *(int *) cdev->private);
878 urd = (struct urdev *) cdev->dev.driver_data;
879 device_destroy(vmur_class, urd->char_device->dev);
880 cdev_del(urd->char_device);
881 module_put(ur_driver.owner);
882
883 return 0;
884}
885
886/*
887 * Module initialisation and cleanup
888 */
889static int __init ur_init(void)
890{
891 int rc;
892 dev_t dev;
893
894 if (!MACHINE_IS_VM) {
895 PRINT_ERR("%s is only available under z/VM.\n", ur_banner);
896 return -ENODEV;
897 }
898
899 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
900 if (!vmur_dbf)
901 return -ENOMEM;
902 rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
903 if (rc)
904 goto fail_free_dbf;
905
906 debug_set_level(vmur_dbf, 6);
907
908 rc = ccw_driver_register(&ur_driver);
909 if (rc)
910 goto fail_free_dbf;
911
912 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
913 if (rc) {
914 PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc);
915 goto fail_unregister_driver;
916 }
917 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
918
919 vmur_class = class_create(THIS_MODULE, "vmur");
920 if (IS_ERR(vmur_class)) {
921 rc = PTR_ERR(vmur_class);
922 goto fail_unregister_region;
923 }
924 PRINT_INFO("%s loaded.\n", ur_banner);
925 return 0;
926
927fail_unregister_region:
928 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
929fail_unregister_driver:
930 ccw_driver_unregister(&ur_driver);
931fail_free_dbf:
932 debug_unregister(vmur_dbf);
933 return rc;
934}
935
936static void __exit ur_exit(void)
937{
938 class_destroy(vmur_class);
939 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
940 ccw_driver_unregister(&ur_driver);
941 debug_unregister(vmur_dbf);
942 PRINT_INFO("%s unloaded.\n", ur_banner);
943}
944
945module_init(ur_init);
946module_exit(ur_exit);