]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/s390/cio/css.c
[S390] cio: introduce css_eval_scheduled
[net-next-2.6.git] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2009
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <asm/isc.h>
22 #include <asm/crw.h>
23
24 #include "css.h"
25 #include "cio.h"
26 #include "cio_debug.h"
27 #include "ioasm.h"
28 #include "chsc.h"
29 #include "device.h"
30 #include "idset.h"
31 #include "chp.h"
32
33 int css_init_done = 0;
34 static int need_reprobe = 0;
35 static int max_ssid = 0;
36
37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38
39 int
40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 {
42         struct subchannel_id schid;
43         int ret;
44
45         init_subchannel_id(&schid);
46         ret = -ENODEV;
47         do {
48                 do {
49                         ret = fn(schid, data);
50                         if (ret)
51                                 break;
52                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53                 schid.sch_no = 0;
54         } while (schid.ssid++ < max_ssid);
55         return ret;
56 }
57
58 struct cb_data {
59         void *data;
60         struct idset *set;
61         int (*fn_known_sch)(struct subchannel *, void *);
62         int (*fn_unknown_sch)(struct subchannel_id, void *);
63 };
64
65 static int call_fn_known_sch(struct device *dev, void *data)
66 {
67         struct subchannel *sch = to_subchannel(dev);
68         struct cb_data *cb = data;
69         int rc = 0;
70
71         idset_sch_del(cb->set, sch->schid);
72         if (cb->fn_known_sch)
73                 rc = cb->fn_known_sch(sch, cb->data);
74         return rc;
75 }
76
77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 {
79         struct cb_data *cb = data;
80         int rc = 0;
81
82         if (idset_sch_contains(cb->set, schid))
83                 rc = cb->fn_unknown_sch(schid, cb->data);
84         return rc;
85 }
86
87 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 {
89         struct cb_data *cb = data;
90         struct subchannel *sch;
91         int rc = 0;
92
93         sch = get_subchannel_by_schid(schid);
94         if (sch) {
95                 if (cb->fn_known_sch)
96                         rc = cb->fn_known_sch(sch, cb->data);
97                 put_device(&sch->dev);
98         } else {
99                 if (cb->fn_unknown_sch)
100                         rc = cb->fn_unknown_sch(schid, cb->data);
101         }
102
103         return rc;
104 }
105
106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
107                                int (*fn_unknown)(struct subchannel_id,
108                                void *), void *data)
109 {
110         struct cb_data cb;
111         int rc;
112
113         cb.data = data;
114         cb.fn_known_sch = fn_known;
115         cb.fn_unknown_sch = fn_unknown;
116
117         cb.set = idset_sch_new();
118         if (!cb.set)
119                 /* fall back to brute force scanning in case of oom */
120                 return for_each_subchannel(call_fn_all_sch, &cb);
121
122         idset_fill(cb.set);
123
124         /* Process registered subchannels. */
125         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
126         if (rc)
127                 goto out;
128         /* Process unregistered subchannels. */
129         if (fn_unknown)
130                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
131 out:
132         idset_free(cb.set);
133
134         return rc;
135 }
136
137 static struct subchannel *
138 css_alloc_subchannel(struct subchannel_id schid)
139 {
140         struct subchannel *sch;
141         int ret;
142
143         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
144         if (sch == NULL)
145                 return ERR_PTR(-ENOMEM);
146         ret = cio_validate_subchannel (sch, schid);
147         if (ret < 0) {
148                 kfree(sch);
149                 return ERR_PTR(ret);
150         }
151         return sch;
152 }
153
154 static void
155 css_subchannel_release(struct device *dev)
156 {
157         struct subchannel *sch;
158
159         sch = to_subchannel(dev);
160         if (!cio_is_console(sch->schid)) {
161                 /* Reset intparm to zeroes. */
162                 sch->config.intparm = 0;
163                 cio_commit_config(sch);
164                 kfree(sch->lock);
165                 kfree(sch);
166         }
167 }
168
169 static int css_sch_device_register(struct subchannel *sch)
170 {
171         int ret;
172
173         mutex_lock(&sch->reg_mutex);
174         dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
175                      sch->schid.sch_no);
176         ret = device_register(&sch->dev);
177         mutex_unlock(&sch->reg_mutex);
178         return ret;
179 }
180
181 /**
182  * css_sch_device_unregister - unregister a subchannel
183  * @sch: subchannel to be unregistered
184  */
185 void css_sch_device_unregister(struct subchannel *sch)
186 {
187         mutex_lock(&sch->reg_mutex);
188         if (device_is_registered(&sch->dev))
189                 device_unregister(&sch->dev);
190         mutex_unlock(&sch->reg_mutex);
191 }
192 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
193
194 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
195 {
196         int i;
197         int mask;
198
199         memset(ssd, 0, sizeof(struct chsc_ssd_info));
200         ssd->path_mask = pmcw->pim;
201         for (i = 0; i < 8; i++) {
202                 mask = 0x80 >> i;
203                 if (pmcw->pim & mask) {
204                         chp_id_init(&ssd->chpid[i]);
205                         ssd->chpid[i].id = pmcw->chpid[i];
206                 }
207         }
208 }
209
210 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
211 {
212         int i;
213         int mask;
214
215         for (i = 0; i < 8; i++) {
216                 mask = 0x80 >> i;
217                 if (ssd->path_mask & mask)
218                         if (!chp_is_registered(ssd->chpid[i]))
219                                 chp_new(ssd->chpid[i]);
220         }
221 }
222
223 void css_update_ssd_info(struct subchannel *sch)
224 {
225         int ret;
226
227         if (cio_is_console(sch->schid)) {
228                 /* Console is initialized too early for functions requiring
229                  * memory allocation. */
230                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
231         } else {
232                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
233                 if (ret)
234                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
235                 ssd_register_chpids(&sch->ssd_info);
236         }
237 }
238
239 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
240                          char *buf)
241 {
242         struct subchannel *sch = to_subchannel(dev);
243
244         return sprintf(buf, "%01x\n", sch->st);
245 }
246
247 static DEVICE_ATTR(type, 0444, type_show, NULL);
248
249 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
250                              char *buf)
251 {
252         struct subchannel *sch = to_subchannel(dev);
253
254         return sprintf(buf, "css:t%01X\n", sch->st);
255 }
256
257 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
258
259 static struct attribute *subch_attrs[] = {
260         &dev_attr_type.attr,
261         &dev_attr_modalias.attr,
262         NULL,
263 };
264
265 static struct attribute_group subch_attr_group = {
266         .attrs = subch_attrs,
267 };
268
269 static const struct attribute_group *default_subch_attr_groups[] = {
270         &subch_attr_group,
271         NULL,
272 };
273
274 static int css_register_subchannel(struct subchannel *sch)
275 {
276         int ret;
277
278         /* Initialize the subchannel structure */
279         sch->dev.parent = &channel_subsystems[0]->device;
280         sch->dev.bus = &css_bus_type;
281         sch->dev.release = &css_subchannel_release;
282         sch->dev.groups = default_subch_attr_groups;
283         /*
284          * We don't want to generate uevents for I/O subchannels that don't
285          * have a working ccw device behind them since they will be
286          * unregistered before they can be used anyway, so we delay the add
287          * uevent until after device recognition was successful.
288          * Note that we suppress the uevent for all subchannel types;
289          * the subchannel driver can decide itself when it wants to inform
290          * userspace of its existence.
291          */
292         dev_set_uevent_suppress(&sch->dev, 1);
293         css_update_ssd_info(sch);
294         /* make it known to the system */
295         ret = css_sch_device_register(sch);
296         if (ret) {
297                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
298                               sch->schid.ssid, sch->schid.sch_no, ret);
299                 return ret;
300         }
301         if (!sch->driver) {
302                 /*
303                  * No driver matched. Generate the uevent now so that
304                  * a fitting driver module may be loaded based on the
305                  * modalias.
306                  */
307                 dev_set_uevent_suppress(&sch->dev, 0);
308                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
309         }
310         return ret;
311 }
312
313 int css_probe_device(struct subchannel_id schid)
314 {
315         int ret;
316         struct subchannel *sch;
317
318         sch = css_alloc_subchannel(schid);
319         if (IS_ERR(sch))
320                 return PTR_ERR(sch);
321         ret = css_register_subchannel(sch);
322         if (ret)
323                 put_device(&sch->dev);
324         return ret;
325 }
326
327 static int
328 check_subchannel(struct device * dev, void * data)
329 {
330         struct subchannel *sch;
331         struct subchannel_id *schid = data;
332
333         sch = to_subchannel(dev);
334         return schid_equal(&sch->schid, schid);
335 }
336
337 struct subchannel *
338 get_subchannel_by_schid(struct subchannel_id schid)
339 {
340         struct device *dev;
341
342         dev = bus_find_device(&css_bus_type, NULL,
343                               &schid, check_subchannel);
344
345         return dev ? to_subchannel(dev) : NULL;
346 }
347
348 /**
349  * css_sch_is_valid() - check if a subchannel is valid
350  * @schib: subchannel information block for the subchannel
351  */
352 int css_sch_is_valid(struct schib *schib)
353 {
354         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
355                 return 0;
356         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
357                 return 0;
358         return 1;
359 }
360 EXPORT_SYMBOL_GPL(css_sch_is_valid);
361
362 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
363 {
364         struct schib schib;
365
366         if (!slow) {
367                 /* Will be done on the slow path. */
368                 return -EAGAIN;
369         }
370         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
371                 /* Unusable - ignore. */
372                 return 0;
373         }
374         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
375                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
376
377         return css_probe_device(schid);
378 }
379
380 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
381 {
382         int ret = 0;
383
384         if (sch->driver) {
385                 if (sch->driver->sch_event)
386                         ret = sch->driver->sch_event(sch, slow);
387                 else
388                         dev_dbg(&sch->dev,
389                                 "Got subchannel machine check but "
390                                 "no sch_event handler provided.\n");
391         }
392         return ret;
393 }
394
395 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
396 {
397         struct subchannel *sch;
398         int ret;
399
400         sch = get_subchannel_by_schid(schid);
401         if (sch) {
402                 ret = css_evaluate_known_subchannel(sch, slow);
403                 put_device(&sch->dev);
404         } else
405                 ret = css_evaluate_new_subchannel(schid, slow);
406         if (ret == -EAGAIN)
407                 css_schedule_eval(schid);
408 }
409
410 static struct idset *slow_subchannel_set;
411 static spinlock_t slow_subchannel_lock;
412 static wait_queue_head_t css_eval_wq;
413 static atomic_t css_eval_scheduled;
414
415 static int __init slow_subchannel_init(void)
416 {
417         spin_lock_init(&slow_subchannel_lock);
418         atomic_set(&css_eval_scheduled, 0);
419         init_waitqueue_head(&css_eval_wq);
420         slow_subchannel_set = idset_sch_new();
421         if (!slow_subchannel_set) {
422                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
423                 return -ENOMEM;
424         }
425         return 0;
426 }
427
428 static int slow_eval_known_fn(struct subchannel *sch, void *data)
429 {
430         int eval;
431         int rc;
432
433         spin_lock_irq(&slow_subchannel_lock);
434         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
435         idset_sch_del(slow_subchannel_set, sch->schid);
436         spin_unlock_irq(&slow_subchannel_lock);
437         if (eval) {
438                 rc = css_evaluate_known_subchannel(sch, 1);
439                 if (rc == -EAGAIN)
440                         css_schedule_eval(sch->schid);
441         }
442         return 0;
443 }
444
445 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
446 {
447         int eval;
448         int rc = 0;
449
450         spin_lock_irq(&slow_subchannel_lock);
451         eval = idset_sch_contains(slow_subchannel_set, schid);
452         idset_sch_del(slow_subchannel_set, schid);
453         spin_unlock_irq(&slow_subchannel_lock);
454         if (eval) {
455                 rc = css_evaluate_new_subchannel(schid, 1);
456                 switch (rc) {
457                 case -EAGAIN:
458                         css_schedule_eval(schid);
459                         rc = 0;
460                         break;
461                 case -ENXIO:
462                 case -ENOMEM:
463                 case -EIO:
464                         /* These should abort looping */
465                         break;
466                 default:
467                         rc = 0;
468                 }
469         }
470         return rc;
471 }
472
473 static void css_slow_path_func(struct work_struct *unused)
474 {
475         unsigned long flags;
476
477         CIO_TRACE_EVENT(4, "slowpath");
478         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
479                                    NULL);
480         spin_lock_irqsave(&slow_subchannel_lock, flags);
481         if (idset_is_empty(slow_subchannel_set)) {
482                 atomic_set(&css_eval_scheduled, 0);
483                 wake_up(&css_eval_wq);
484         }
485         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
486 }
487
488 static DECLARE_WORK(slow_path_work, css_slow_path_func);
489 struct workqueue_struct *slow_path_wq;
490
491 void css_schedule_eval(struct subchannel_id schid)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&slow_subchannel_lock, flags);
496         idset_sch_add(slow_subchannel_set, schid);
497         atomic_set(&css_eval_scheduled, 1);
498         queue_work(slow_path_wq, &slow_path_work);
499         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
500 }
501
502 void css_schedule_eval_all(void)
503 {
504         unsigned long flags;
505
506         spin_lock_irqsave(&slow_subchannel_lock, flags);
507         idset_fill(slow_subchannel_set);
508         atomic_set(&css_eval_scheduled, 1);
509         queue_work(slow_path_wq, &slow_path_work);
510         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
511 }
512
513 void css_wait_for_slow_path(void)
514 {
515         flush_workqueue(slow_path_wq);
516 }
517
518 /* Reprobe subchannel if unregistered. */
519 static int reprobe_subchannel(struct subchannel_id schid, void *data)
520 {
521         int ret;
522
523         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
524                       schid.ssid, schid.sch_no);
525         if (need_reprobe)
526                 return -EAGAIN;
527
528         ret = css_probe_device(schid);
529         switch (ret) {
530         case 0:
531                 break;
532         case -ENXIO:
533         case -ENOMEM:
534         case -EIO:
535                 /* These should abort looping */
536                 break;
537         default:
538                 ret = 0;
539         }
540
541         return ret;
542 }
543
544 static void reprobe_after_idle(struct work_struct *unused)
545 {
546         /* Make sure initial subchannel scan is done. */
547         wait_event(ccw_device_init_wq,
548                    atomic_read(&ccw_device_init_count) == 0);
549         if (need_reprobe)
550                 css_schedule_reprobe();
551 }
552
553 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
554
555 /* Work function used to reprobe all unregistered subchannels. */
556 static void reprobe_all(struct work_struct *unused)
557 {
558         int ret;
559
560         CIO_MSG_EVENT(4, "reprobe start\n");
561
562         /* Make sure initial subchannel scan is done. */
563         if (atomic_read(&ccw_device_init_count) != 0) {
564                 queue_work(ccw_device_work, &reprobe_idle_work);
565                 return;
566         }
567         need_reprobe = 0;
568         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
569
570         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
571                       need_reprobe);
572 }
573
574 static DECLARE_WORK(css_reprobe_work, reprobe_all);
575
576 /* Schedule reprobing of all unregistered subchannels. */
577 void css_schedule_reprobe(void)
578 {
579         need_reprobe = 1;
580         queue_work(slow_path_wq, &css_reprobe_work);
581 }
582
583 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
584
585 /*
586  * Called from the machine check handler for subchannel report words.
587  */
588 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
589 {
590         struct subchannel_id mchk_schid;
591
592         if (overflow) {
593                 css_schedule_eval_all();
594                 return;
595         }
596         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
597                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
598                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
599                       crw0->erc, crw0->rsid);
600         if (crw1)
601                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
602                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
603                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
604                               crw1->anc, crw1->erc, crw1->rsid);
605         init_subchannel_id(&mchk_schid);
606         mchk_schid.sch_no = crw0->rsid;
607         if (crw1)
608                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
609
610         /*
611          * Since we are always presented with IPI in the CRW, we have to
612          * use stsch() to find out if the subchannel in question has come
613          * or gone.
614          */
615         css_evaluate_subchannel(mchk_schid, 0);
616 }
617
618 static int __init setup_subchannel(struct subchannel_id schid, void *data)
619 {
620         struct subchannel *sch;
621         int ret;
622
623         if (cio_is_console(schid))
624                 sch = cio_get_console_subchannel();
625         else {
626                 sch = css_alloc_subchannel(schid);
627                 if (IS_ERR(sch))
628                         ret = PTR_ERR(sch);
629                 else
630                         ret = 0;
631                 switch (ret) {
632                 case 0:
633                         break;
634                 case -ENOMEM:
635                         panic("Out of memory in init_channel_subsystem\n");
636                 /* -ENXIO: no more subchannels. */
637                 case -ENXIO:
638                         return ret;
639                 /* -EIO: this subchannel set not supported. */
640                 case -EIO:
641                         return ret;
642                 default:
643                         return 0;
644                 }
645         }
646         /*
647          * We register ALL valid subchannels in ioinfo, even those
648          * that have been present before init_channel_subsystem.
649          * These subchannels can't have been registered yet (kmalloc
650          * not working) so we do it now. This is true e.g. for the
651          * console subchannel.
652          */
653         if (css_register_subchannel(sch)) {
654                 if (!cio_is_console(schid))
655                         put_device(&sch->dev);
656         }
657         return 0;
658 }
659
660 static void __init
661 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
662 {
663         if (css_general_characteristics.mcss) {
664                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
665                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
666         } else {
667 #ifdef CONFIG_SMP
668                 css->global_pgid.pgid_high.cpu_addr = stap();
669 #else
670                 css->global_pgid.pgid_high.cpu_addr = 0;
671 #endif
672         }
673         css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
674         css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
675         css->global_pgid.tod_high = tod_high;
676
677 }
678
679 static void
680 channel_subsystem_release(struct device *dev)
681 {
682         struct channel_subsystem *css;
683
684         css = to_css(dev);
685         mutex_destroy(&css->mutex);
686         if (css->pseudo_subchannel) {
687                 /* Implies that it has been generated but never registered. */
688                 css_subchannel_release(&css->pseudo_subchannel->dev);
689                 css->pseudo_subchannel = NULL;
690         }
691         kfree(css);
692 }
693
694 static ssize_t
695 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
696                    char *buf)
697 {
698         struct channel_subsystem *css = to_css(dev);
699         int ret;
700
701         if (!css)
702                 return 0;
703         mutex_lock(&css->mutex);
704         ret = sprintf(buf, "%x\n", css->cm_enabled);
705         mutex_unlock(&css->mutex);
706         return ret;
707 }
708
709 static ssize_t
710 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
711                     const char *buf, size_t count)
712 {
713         struct channel_subsystem *css = to_css(dev);
714         int ret;
715         unsigned long val;
716
717         ret = strict_strtoul(buf, 16, &val);
718         if (ret)
719                 return ret;
720         mutex_lock(&css->mutex);
721         switch (val) {
722         case 0:
723                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
724                 break;
725         case 1:
726                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
727                 break;
728         default:
729                 ret = -EINVAL;
730         }
731         mutex_unlock(&css->mutex);
732         return ret < 0 ? ret : count;
733 }
734
735 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
736
737 static int __init setup_css(int nr)
738 {
739         u32 tod_high;
740         int ret;
741         struct channel_subsystem *css;
742
743         css = channel_subsystems[nr];
744         memset(css, 0, sizeof(struct channel_subsystem));
745         css->pseudo_subchannel =
746                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
747         if (!css->pseudo_subchannel)
748                 return -ENOMEM;
749         css->pseudo_subchannel->dev.parent = &css->device;
750         css->pseudo_subchannel->dev.release = css_subchannel_release;
751         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
752         ret = cio_create_sch_lock(css->pseudo_subchannel);
753         if (ret) {
754                 kfree(css->pseudo_subchannel);
755                 return ret;
756         }
757         mutex_init(&css->mutex);
758         css->valid = 1;
759         css->cssid = nr;
760         dev_set_name(&css->device, "css%x", nr);
761         css->device.release = channel_subsystem_release;
762         tod_high = (u32) (get_clock() >> 32);
763         css_generate_pgid(css, tod_high);
764         return 0;
765 }
766
767 static int css_reboot_event(struct notifier_block *this,
768                             unsigned long event,
769                             void *ptr)
770 {
771         int ret, i;
772
773         ret = NOTIFY_DONE;
774         for (i = 0; i <= __MAX_CSSID; i++) {
775                 struct channel_subsystem *css;
776
777                 css = channel_subsystems[i];
778                 mutex_lock(&css->mutex);
779                 if (css->cm_enabled)
780                         if (chsc_secm(css, 0))
781                                 ret = NOTIFY_BAD;
782                 mutex_unlock(&css->mutex);
783         }
784
785         return ret;
786 }
787
788 static struct notifier_block css_reboot_notifier = {
789         .notifier_call = css_reboot_event,
790 };
791
792 /*
793  * Since the css devices are neither on a bus nor have a class
794  * nor have a special device type, we cannot stop/restart channel
795  * path measurements via the normal suspend/resume callbacks, but have
796  * to use notifiers.
797  */
798 static int css_power_event(struct notifier_block *this, unsigned long event,
799                            void *ptr)
800 {
801         void *secm_area;
802         int ret, i;
803
804         switch (event) {
805         case PM_HIBERNATION_PREPARE:
806         case PM_SUSPEND_PREPARE:
807                 ret = NOTIFY_DONE;
808                 for (i = 0; i <= __MAX_CSSID; i++) {
809                         struct channel_subsystem *css;
810
811                         css = channel_subsystems[i];
812                         mutex_lock(&css->mutex);
813                         if (!css->cm_enabled) {
814                                 mutex_unlock(&css->mutex);
815                                 continue;
816                         }
817                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
818                                                             GFP_DMA);
819                         if (secm_area) {
820                                 if (__chsc_do_secm(css, 0, secm_area))
821                                         ret = NOTIFY_BAD;
822                                 free_page((unsigned long)secm_area);
823                         } else
824                                 ret = NOTIFY_BAD;
825
826                         mutex_unlock(&css->mutex);
827                 }
828                 break;
829         case PM_POST_HIBERNATION:
830         case PM_POST_SUSPEND:
831                 ret = NOTIFY_DONE;
832                 for (i = 0; i <= __MAX_CSSID; i++) {
833                         struct channel_subsystem *css;
834
835                         css = channel_subsystems[i];
836                         mutex_lock(&css->mutex);
837                         if (!css->cm_enabled) {
838                                 mutex_unlock(&css->mutex);
839                                 continue;
840                         }
841                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
842                                                             GFP_DMA);
843                         if (secm_area) {
844                                 if (__chsc_do_secm(css, 1, secm_area))
845                                         ret = NOTIFY_BAD;
846                                 free_page((unsigned long)secm_area);
847                         } else
848                                 ret = NOTIFY_BAD;
849
850                         mutex_unlock(&css->mutex);
851                 }
852                 /* search for subchannels, which appeared during hibernation */
853                 css_schedule_reprobe();
854                 break;
855         default:
856                 ret = NOTIFY_DONE;
857         }
858         return ret;
859
860 }
861 static struct notifier_block css_power_notifier = {
862         .notifier_call = css_power_event,
863 };
864
865 /*
866  * Now that the driver core is running, we can setup our channel subsystem.
867  * The struct subchannel's are created during probing (except for the
868  * static console subchannel).
869  */
870 static int __init css_bus_init(void)
871 {
872         int ret, i;
873
874         ret = chsc_determine_css_characteristics();
875         if (ret == -ENOMEM)
876                 goto out;
877
878         ret = chsc_alloc_sei_area();
879         if (ret)
880                 goto out;
881
882         ret = slow_subchannel_init();
883         if (ret)
884                 goto out;
885
886         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
887         if (ret)
888                 goto out;
889
890         if ((ret = bus_register(&css_bus_type)))
891                 goto out;
892
893         /* Try to enable MSS. */
894         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
895         switch (ret) {
896         case 0: /* Success. */
897                 max_ssid = __MAX_SSID;
898                 break;
899         case -ENOMEM:
900                 goto out_bus;
901         default:
902                 max_ssid = 0;
903         }
904         /* Setup css structure. */
905         for (i = 0; i <= __MAX_CSSID; i++) {
906                 struct channel_subsystem *css;
907
908                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
909                 if (!css) {
910                         ret = -ENOMEM;
911                         goto out_unregister;
912                 }
913                 channel_subsystems[i] = css;
914                 ret = setup_css(i);
915                 if (ret) {
916                         kfree(channel_subsystems[i]);
917                         goto out_unregister;
918                 }
919                 ret = device_register(&css->device);
920                 if (ret) {
921                         put_device(&css->device);
922                         goto out_unregister;
923                 }
924                 if (css_chsc_characteristics.secm) {
925                         ret = device_create_file(&css->device,
926                                                  &dev_attr_cm_enable);
927                         if (ret)
928                                 goto out_device;
929                 }
930                 ret = device_register(&css->pseudo_subchannel->dev);
931                 if (ret) {
932                         put_device(&css->pseudo_subchannel->dev);
933                         goto out_file;
934                 }
935         }
936         ret = register_reboot_notifier(&css_reboot_notifier);
937         if (ret)
938                 goto out_unregister;
939         ret = register_pm_notifier(&css_power_notifier);
940         if (ret) {
941                 unregister_reboot_notifier(&css_reboot_notifier);
942                 goto out_unregister;
943         }
944         css_init_done = 1;
945
946         /* Enable default isc for I/O subchannels. */
947         isc_register(IO_SCH_ISC);
948
949         return 0;
950 out_file:
951         if (css_chsc_characteristics.secm)
952                 device_remove_file(&channel_subsystems[i]->device,
953                                    &dev_attr_cm_enable);
954 out_device:
955         device_unregister(&channel_subsystems[i]->device);
956 out_unregister:
957         while (i > 0) {
958                 struct channel_subsystem *css;
959
960                 i--;
961                 css = channel_subsystems[i];
962                 device_unregister(&css->pseudo_subchannel->dev);
963                 css->pseudo_subchannel = NULL;
964                 if (css_chsc_characteristics.secm)
965                         device_remove_file(&css->device,
966                                            &dev_attr_cm_enable);
967                 device_unregister(&css->device);
968         }
969 out_bus:
970         bus_unregister(&css_bus_type);
971 out:
972         crw_unregister_handler(CRW_RSC_CSS);
973         chsc_free_sei_area();
974         kfree(slow_subchannel_set);
975         pr_alert("The CSS device driver initialization failed with "
976                  "errno=%d\n", ret);
977         return ret;
978 }
979
980 static void __init css_bus_cleanup(void)
981 {
982         struct channel_subsystem *css;
983         int i;
984
985         for (i = 0; i <= __MAX_CSSID; i++) {
986                 css = channel_subsystems[i];
987                 device_unregister(&css->pseudo_subchannel->dev);
988                 css->pseudo_subchannel = NULL;
989                 if (css_chsc_characteristics.secm)
990                         device_remove_file(&css->device, &dev_attr_cm_enable);
991                 device_unregister(&css->device);
992         }
993         bus_unregister(&css_bus_type);
994         crw_unregister_handler(CRW_RSC_CSS);
995         chsc_free_sei_area();
996         kfree(slow_subchannel_set);
997         isc_unregister(IO_SCH_ISC);
998 }
999
1000 static int __init channel_subsystem_init(void)
1001 {
1002         int ret;
1003
1004         ret = css_bus_init();
1005         if (ret)
1006                 return ret;
1007
1008         ret = io_subchannel_init();
1009         if (ret)
1010                 css_bus_cleanup();
1011
1012         return ret;
1013 }
1014 subsys_initcall(channel_subsystem_init);
1015
1016 /*
1017  * Wait for the initialization of devices to finish, to make sure we are
1018  * done with our setup if the search for the root device starts.
1019  */
1020 static int __init channel_subsystem_init_sync(void)
1021 {
1022         /* Allocate and register subchannels. */
1023         for_each_subchannel(setup_subchannel, NULL);
1024         /* Wait for the evaluation of subchannels to finish. */
1025         wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
1026
1027         /* Wait for the initialization of ccw devices to finish. */
1028         wait_event(ccw_device_init_wq,
1029                    atomic_read(&ccw_device_init_count) == 0);
1030         flush_workqueue(ccw_device_work);
1031
1032         return 0;
1033 }
1034 subsys_initcall_sync(channel_subsystem_init_sync);
1035
1036 int sch_is_pseudo_sch(struct subchannel *sch)
1037 {
1038         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1039 }
1040
1041 static int css_bus_match(struct device *dev, struct device_driver *drv)
1042 {
1043         struct subchannel *sch = to_subchannel(dev);
1044         struct css_driver *driver = to_cssdriver(drv);
1045         struct css_device_id *id;
1046
1047         for (id = driver->subchannel_type; id->match_flags; id++) {
1048                 if (sch->st == id->type)
1049                         return 1;
1050         }
1051
1052         return 0;
1053 }
1054
1055 static int css_probe(struct device *dev)
1056 {
1057         struct subchannel *sch;
1058         int ret;
1059
1060         sch = to_subchannel(dev);
1061         sch->driver = to_cssdriver(dev->driver);
1062         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1063         if (ret)
1064                 sch->driver = NULL;
1065         return ret;
1066 }
1067
1068 static int css_remove(struct device *dev)
1069 {
1070         struct subchannel *sch;
1071         int ret;
1072
1073         sch = to_subchannel(dev);
1074         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1075         sch->driver = NULL;
1076         return ret;
1077 }
1078
1079 static void css_shutdown(struct device *dev)
1080 {
1081         struct subchannel *sch;
1082
1083         sch = to_subchannel(dev);
1084         if (sch->driver && sch->driver->shutdown)
1085                 sch->driver->shutdown(sch);
1086 }
1087
1088 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1089 {
1090         struct subchannel *sch = to_subchannel(dev);
1091         int ret;
1092
1093         ret = add_uevent_var(env, "ST=%01X", sch->st);
1094         if (ret)
1095                 return ret;
1096         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1097         return ret;
1098 }
1099
1100 static int css_pm_prepare(struct device *dev)
1101 {
1102         struct subchannel *sch = to_subchannel(dev);
1103         struct css_driver *drv;
1104
1105         if (mutex_is_locked(&sch->reg_mutex))
1106                 return -EAGAIN;
1107         if (!sch->dev.driver)
1108                 return 0;
1109         drv = to_cssdriver(sch->dev.driver);
1110         /* Notify drivers that they may not register children. */
1111         return drv->prepare ? drv->prepare(sch) : 0;
1112 }
1113
1114 static void css_pm_complete(struct device *dev)
1115 {
1116         struct subchannel *sch = to_subchannel(dev);
1117         struct css_driver *drv;
1118
1119         if (!sch->dev.driver)
1120                 return;
1121         drv = to_cssdriver(sch->dev.driver);
1122         if (drv->complete)
1123                 drv->complete(sch);
1124 }
1125
1126 static int css_pm_freeze(struct device *dev)
1127 {
1128         struct subchannel *sch = to_subchannel(dev);
1129         struct css_driver *drv;
1130
1131         if (!sch->dev.driver)
1132                 return 0;
1133         drv = to_cssdriver(sch->dev.driver);
1134         return drv->freeze ? drv->freeze(sch) : 0;
1135 }
1136
1137 static int css_pm_thaw(struct device *dev)
1138 {
1139         struct subchannel *sch = to_subchannel(dev);
1140         struct css_driver *drv;
1141
1142         if (!sch->dev.driver)
1143                 return 0;
1144         drv = to_cssdriver(sch->dev.driver);
1145         return drv->thaw ? drv->thaw(sch) : 0;
1146 }
1147
1148 static int css_pm_restore(struct device *dev)
1149 {
1150         struct subchannel *sch = to_subchannel(dev);
1151         struct css_driver *drv;
1152
1153         if (!sch->dev.driver)
1154                 return 0;
1155         drv = to_cssdriver(sch->dev.driver);
1156         return drv->restore ? drv->restore(sch) : 0;
1157 }
1158
1159 static struct dev_pm_ops css_pm_ops = {
1160         .prepare = css_pm_prepare,
1161         .complete = css_pm_complete,
1162         .freeze = css_pm_freeze,
1163         .thaw = css_pm_thaw,
1164         .restore = css_pm_restore,
1165 };
1166
1167 struct bus_type css_bus_type = {
1168         .name     = "css",
1169         .match    = css_bus_match,
1170         .probe    = css_probe,
1171         .remove   = css_remove,
1172         .shutdown = css_shutdown,
1173         .uevent   = css_uevent,
1174         .pm = &css_pm_ops,
1175 };
1176
1177 /**
1178  * css_driver_register - register a css driver
1179  * @cdrv: css driver to register
1180  *
1181  * This is mainly a wrapper around driver_register that sets name
1182  * and bus_type in the embedded struct device_driver correctly.
1183  */
1184 int css_driver_register(struct css_driver *cdrv)
1185 {
1186         cdrv->drv.name = cdrv->name;
1187         cdrv->drv.bus = &css_bus_type;
1188         cdrv->drv.owner = cdrv->owner;
1189         return driver_register(&cdrv->drv);
1190 }
1191 EXPORT_SYMBOL_GPL(css_driver_register);
1192
1193 /**
1194  * css_driver_unregister - unregister a css driver
1195  * @cdrv: css driver to unregister
1196  *
1197  * This is a wrapper around driver_unregister.
1198  */
1199 void css_driver_unregister(struct css_driver *cdrv)
1200 {
1201         driver_unregister(&cdrv->drv);
1202 }
1203 EXPORT_SYMBOL_GPL(css_driver_unregister);
1204
1205 MODULE_LICENSE("GPL");
1206 EXPORT_SYMBOL(css_bus_type);