]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/s390/cio/chsc.c | |
3 | * S/390 common I/O routines -- channel subsystem call | |
c63307f1 | 4 | * $Revision: 1.120 $ |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | |
7 | * IBM Corporation | |
8 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | |
9 | * Cornelia Huck (cohuck@de.ibm.com) | |
10 | * Arnd Bergmann (arndb@de.ibm.com) | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/config.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/device.h> | |
18 | ||
19 | #include <asm/cio.h> | |
20 | ||
21 | #include "css.h" | |
22 | #include "cio.h" | |
23 | #include "cio_debug.h" | |
24 | #include "ioasm.h" | |
25 | #include "chsc.h" | |
26 | ||
1da177e4 LT |
27 | static void *sei_page; |
28 | ||
29 | static int new_channel_path(int chpid); | |
30 | ||
31 | static inline void | |
32 | set_chp_logically_online(int chp, int onoff) | |
33 | { | |
a28c6944 | 34 | css[0]->chps[chp]->state = onoff; |
1da177e4 LT |
35 | } |
36 | ||
37 | static int | |
38 | get_chp_status(int chp) | |
39 | { | |
a28c6944 | 40 | return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); |
1da177e4 LT |
41 | } |
42 | ||
43 | void | |
44 | chsc_validate_chpids(struct subchannel *sch) | |
45 | { | |
46 | int mask, chp; | |
47 | ||
48 | for (chp = 0; chp <= 7; chp++) { | |
49 | mask = 0x80 >> chp; | |
50 | if (!get_chp_status(sch->schib.pmcw.chpid[chp])) | |
51 | /* disable using this path */ | |
52 | sch->opm &= ~mask; | |
53 | } | |
54 | } | |
55 | ||
56 | void | |
57 | chpid_is_actually_online(int chp) | |
58 | { | |
59 | int state; | |
60 | ||
61 | state = get_chp_status(chp); | |
62 | if (state < 0) { | |
63 | need_rescan = 1; | |
64 | queue_work(slow_path_wq, &slow_path_work); | |
65 | } else | |
66 | WARN_ON(!state); | |
67 | } | |
68 | ||
69 | /* FIXME: this is _always_ called for every subchannel. shouldn't we | |
70 | * process more than one at a time? */ | |
71 | static int | |
72 | chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | |
73 | { | |
74 | int ccode, j; | |
75 | ||
76 | struct { | |
77 | struct chsc_header request; | |
78 | u16 reserved1; | |
79 | u16 f_sch; /* first subchannel */ | |
80 | u16 reserved2; | |
81 | u16 l_sch; /* last subchannel */ | |
82 | u32 reserved3; | |
83 | struct chsc_header response; | |
84 | u32 reserved4; | |
85 | u8 sch_valid : 1; | |
86 | u8 dev_valid : 1; | |
87 | u8 st : 3; /* subchannel type */ | |
88 | u8 zeroes : 3; | |
89 | u8 unit_addr; /* unit address */ | |
90 | u16 devno; /* device number */ | |
91 | u8 path_mask; | |
92 | u8 fla_valid_mask; | |
93 | u16 sch; /* subchannel */ | |
94 | u8 chpid[8]; /* chpids 0-7 */ | |
95 | u16 fla[8]; /* full link addresses 0-7 */ | |
96 | } *ssd_area; | |
97 | ||
98 | ssd_area = page; | |
99 | ||
100 | ssd_area->request = (struct chsc_header) { | |
101 | .length = 0x0010, | |
102 | .code = 0x0004, | |
103 | }; | |
104 | ||
a8237fc4 CH |
105 | ssd_area->f_sch = sch->schid.sch_no; |
106 | ssd_area->l_sch = sch->schid.sch_no; | |
1da177e4 LT |
107 | |
108 | ccode = chsc(ssd_area); | |
109 | if (ccode > 0) { | |
110 | pr_debug("chsc returned with ccode = %d\n", ccode); | |
111 | return (ccode == 3) ? -ENODEV : -EBUSY; | |
112 | } | |
113 | ||
114 | switch (ssd_area->response.code) { | |
115 | case 0x0001: /* everything ok */ | |
116 | break; | |
117 | case 0x0002: | |
118 | CIO_CRW_EVENT(2, "Invalid command!\n"); | |
119 | return -EINVAL; | |
120 | case 0x0003: | |
121 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | |
122 | return -EINVAL; | |
123 | case 0x0004: | |
124 | CIO_CRW_EVENT(2, "Model does not provide ssd\n"); | |
125 | return -EOPNOTSUPP; | |
126 | default: | |
127 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | |
128 | ssd_area->response.code); | |
129 | return -EIO; | |
130 | } | |
131 | ||
132 | /* | |
133 | * ssd_area->st stores the type of the detected | |
134 | * subchannel, with the following definitions: | |
135 | * | |
136 | * 0: I/O subchannel: All fields have meaning | |
137 | * 1: CHSC subchannel: Only sch_val, st and sch | |
138 | * have meaning | |
139 | * 2: Message subchannel: All fields except unit_addr | |
140 | * have meaning | |
141 | * 3: ADM subchannel: Only sch_val, st and sch | |
142 | * have meaning | |
143 | * | |
144 | * Other types are currently undefined. | |
145 | */ | |
146 | if (ssd_area->st > 3) { /* uhm, that looks strange... */ | |
147 | CIO_CRW_EVENT(0, "Strange subchannel type %d" | |
a8237fc4 CH |
148 | " for sch %04x\n", ssd_area->st, |
149 | sch->schid.sch_no); | |
1da177e4 LT |
150 | /* |
151 | * There may have been a new subchannel type defined in the | |
152 | * time since this code was written; since we don't know which | |
153 | * fields have meaning and what to do with it we just jump out | |
154 | */ | |
155 | return 0; | |
156 | } else { | |
157 | const char *type[4] = {"I/O", "chsc", "message", "ADM"}; | |
158 | CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", | |
a8237fc4 | 159 | sch->schid.sch_no, type[ssd_area->st]); |
1da177e4 LT |
160 | |
161 | sch->ssd_info.valid = 1; | |
162 | sch->ssd_info.type = ssd_area->st; | |
163 | } | |
164 | ||
165 | if (ssd_area->st == 0 || ssd_area->st == 2) { | |
166 | for (j = 0; j < 8; j++) { | |
167 | if (!((0x80 >> j) & ssd_area->path_mask & | |
168 | ssd_area->fla_valid_mask)) | |
169 | continue; | |
170 | sch->ssd_info.chpid[j] = ssd_area->chpid[j]; | |
171 | sch->ssd_info.fla[j] = ssd_area->fla[j]; | |
172 | } | |
173 | } | |
174 | return 0; | |
175 | } | |
176 | ||
177 | int | |
178 | css_get_ssd_info(struct subchannel *sch) | |
179 | { | |
180 | int ret; | |
181 | void *page; | |
182 | ||
183 | page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | |
184 | if (!page) | |
185 | return -ENOMEM; | |
186 | spin_lock_irq(&sch->lock); | |
187 | ret = chsc_get_sch_desc_irq(sch, page); | |
188 | if (ret) { | |
189 | static int cio_chsc_err_msg; | |
190 | ||
191 | if (!cio_chsc_err_msg) { | |
192 | printk(KERN_ERR | |
193 | "chsc_get_sch_descriptions:" | |
194 | " Error %d while doing chsc; " | |
195 | "processing some machine checks may " | |
196 | "not work\n", ret); | |
197 | cio_chsc_err_msg = 1; | |
198 | } | |
199 | } | |
200 | spin_unlock_irq(&sch->lock); | |
201 | free_page((unsigned long)page); | |
202 | if (!ret) { | |
203 | int j, chpid; | |
204 | /* Allocate channel path structures, if needed. */ | |
205 | for (j = 0; j < 8; j++) { | |
206 | chpid = sch->ssd_info.chpid[j]; | |
207 | if (chpid && (get_chp_status(chpid) < 0)) | |
208 | new_channel_path(chpid); | |
209 | } | |
210 | } | |
211 | return ret; | |
212 | } | |
213 | ||
214 | static int | |
215 | s390_subchannel_remove_chpid(struct device *dev, void *data) | |
216 | { | |
217 | int j; | |
218 | int mask; | |
219 | struct subchannel *sch; | |
a28c6944 | 220 | struct channel_path *chpid; |
1da177e4 LT |
221 | struct schib schib; |
222 | ||
223 | sch = to_subchannel(dev); | |
224 | chpid = data; | |
225 | for (j = 0; j < 8; j++) | |
a28c6944 | 226 | if (sch->schib.pmcw.chpid[j] == chpid->id) |
1da177e4 LT |
227 | break; |
228 | if (j >= 8) | |
229 | return 0; | |
230 | ||
231 | mask = 0x80 >> j; | |
232 | spin_lock(&sch->lock); | |
233 | ||
a8237fc4 | 234 | stsch(sch->schid, &schib); |
1da177e4 LT |
235 | if (!schib.pmcw.dnv) |
236 | goto out_unreg; | |
237 | memcpy(&sch->schib, &schib, sizeof(struct schib)); | |
238 | /* Check for single path devices. */ | |
239 | if (sch->schib.pmcw.pim == 0x80) | |
240 | goto out_unreg; | |
241 | if (sch->vpm == mask) | |
242 | goto out_unreg; | |
243 | ||
244 | if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND | | |
245 | SCSW_ACTL_HALT_PEND | | |
246 | SCSW_ACTL_START_PEND | | |
247 | SCSW_ACTL_RESUME_PEND)) && | |
248 | (sch->schib.pmcw.lpum == mask)) { | |
249 | int cc = cio_cancel(sch); | |
250 | ||
251 | if (cc == -ENODEV) | |
252 | goto out_unreg; | |
253 | ||
254 | if (cc == -EINVAL) { | |
255 | cc = cio_clear(sch); | |
256 | if (cc == -ENODEV) | |
257 | goto out_unreg; | |
258 | /* Call handler. */ | |
259 | if (sch->driver && sch->driver->termination) | |
260 | sch->driver->termination(&sch->dev); | |
261 | goto out_unlock; | |
262 | } | |
263 | } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | |
264 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | |
265 | (sch->schib.pmcw.lpum == mask)) { | |
266 | int cc; | |
267 | ||
268 | cc = cio_clear(sch); | |
269 | if (cc == -ENODEV) | |
270 | goto out_unreg; | |
271 | /* Call handler. */ | |
272 | if (sch->driver && sch->driver->termination) | |
273 | sch->driver->termination(&sch->dev); | |
274 | goto out_unlock; | |
275 | } | |
276 | ||
277 | /* trigger path verification. */ | |
278 | if (sch->driver && sch->driver->verify) | |
279 | sch->driver->verify(&sch->dev); | |
280 | out_unlock: | |
281 | spin_unlock(&sch->lock); | |
282 | return 0; | |
283 | out_unreg: | |
284 | spin_unlock(&sch->lock); | |
285 | sch->lpm = 0; | |
a8237fc4 | 286 | if (css_enqueue_subchannel_slow(sch->schid)) { |
1da177e4 LT |
287 | css_clear_subchannel_slow_list(); |
288 | need_rescan = 1; | |
289 | } | |
290 | return 0; | |
291 | } | |
292 | ||
293 | static inline void | |
294 | s390_set_chpid_offline( __u8 chpid) | |
295 | { | |
296 | char dbf_txt[15]; | |
a28c6944 | 297 | struct device *dev; |
1da177e4 LT |
298 | |
299 | sprintf(dbf_txt, "chpr%x", chpid); | |
300 | CIO_TRACE_EVENT(2, dbf_txt); | |
301 | ||
302 | if (get_chp_status(chpid) <= 0) | |
303 | return; | |
a28c6944 CH |
304 | dev = get_device(&css[0]->chps[chpid]->dev); |
305 | bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), | |
1da177e4 LT |
306 | s390_subchannel_remove_chpid); |
307 | ||
308 | if (need_rescan || css_slow_subchannels_exist()) | |
309 | queue_work(slow_path_wq, &slow_path_work); | |
a28c6944 | 310 | put_device(dev); |
1da177e4 LT |
311 | } |
312 | ||
f97a56fb CH |
313 | struct res_acc_data { |
314 | struct channel_path *chp; | |
315 | u32 fla_mask; | |
316 | u16 fla; | |
317 | }; | |
318 | ||
1da177e4 | 319 | static int |
f97a56fb | 320 | s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) |
1da177e4 LT |
321 | { |
322 | int found; | |
323 | int chp; | |
324 | int ccode; | |
325 | ||
326 | found = 0; | |
327 | for (chp = 0; chp <= 7; chp++) | |
328 | /* | |
329 | * check if chpid is in information updated by ssd | |
330 | */ | |
331 | if (sch->ssd_info.valid && | |
f97a56fb CH |
332 | sch->ssd_info.chpid[chp] == res_data->chp->id && |
333 | (sch->ssd_info.fla[chp] & res_data->fla_mask) | |
334 | == res_data->fla) { | |
1da177e4 LT |
335 | found = 1; |
336 | break; | |
337 | } | |
338 | ||
339 | if (found == 0) | |
340 | return 0; | |
341 | ||
342 | /* | |
343 | * Do a stsch to update our subchannel structure with the | |
344 | * new path information and eventually check for logically | |
345 | * offline chpids. | |
346 | */ | |
a8237fc4 | 347 | ccode = stsch(sch->schid, &sch->schib); |
1da177e4 LT |
348 | if (ccode > 0) |
349 | return 0; | |
350 | ||
351 | return 0x80 >> chp; | |
352 | } | |
353 | ||
f97a56fb CH |
354 | static inline int |
355 | s390_process_res_acc_new_sch(struct subchannel_id schid) | |
356 | { | |
357 | struct schib schib; | |
358 | int ret; | |
359 | /* | |
360 | * We don't know the device yet, but since a path | |
361 | * may be available now to the device we'll have | |
362 | * to do recognition again. | |
363 | * Since we don't have any idea about which chpid | |
364 | * that beast may be on we'll have to do a stsch | |
365 | * on all devices, grr... | |
366 | */ | |
367 | if (stsch(schid, &schib)) | |
368 | /* We're through */ | |
369 | return need_rescan ? -EAGAIN : -ENXIO; | |
370 | ||
371 | /* Put it on the slow path. */ | |
372 | ret = css_enqueue_subchannel_slow(schid); | |
373 | if (ret) { | |
374 | css_clear_subchannel_slow_list(); | |
375 | need_rescan = 1; | |
376 | return -EAGAIN; | |
377 | } | |
378 | return 0; | |
379 | } | |
380 | ||
1da177e4 | 381 | static int |
f97a56fb | 382 | __s390_process_res_acc(struct subchannel_id schid, void *data) |
1da177e4 | 383 | { |
f97a56fb CH |
384 | int chp_mask, old_lpm; |
385 | struct res_acc_data *res_data; | |
1da177e4 | 386 | struct subchannel *sch; |
f97a56fb CH |
387 | |
388 | res_data = (struct res_acc_data *)data; | |
389 | sch = get_subchannel_by_schid(schid); | |
390 | if (!sch) | |
391 | /* Check if a subchannel is newly available. */ | |
392 | return s390_process_res_acc_new_sch(schid); | |
393 | ||
394 | spin_lock_irq(&sch->lock); | |
395 | ||
396 | chp_mask = s390_process_res_acc_sch(res_data, sch); | |
397 | ||
398 | if (chp_mask == 0) { | |
399 | spin_unlock_irq(&sch->lock); | |
400 | return 0; | |
401 | } | |
402 | old_lpm = sch->lpm; | |
403 | sch->lpm = ((sch->schib.pmcw.pim & | |
404 | sch->schib.pmcw.pam & | |
405 | sch->schib.pmcw.pom) | |
406 | | chp_mask) & sch->opm; | |
407 | if (!old_lpm && sch->lpm) | |
408 | device_trigger_reprobe(sch); | |
409 | else if (sch->driver && sch->driver->verify) | |
410 | sch->driver->verify(&sch->dev); | |
411 | ||
412 | spin_unlock_irq(&sch->lock); | |
413 | put_device(&sch->dev); | |
414 | return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; | |
415 | } | |
416 | ||
417 | ||
418 | static int | |
419 | s390_process_res_acc (struct res_acc_data *res_data) | |
420 | { | |
a8237fc4 | 421 | int rc; |
1da177e4 LT |
422 | char dbf_txt[15]; |
423 | ||
f97a56fb | 424 | sprintf(dbf_txt, "accpr%x", res_data->chp->id); |
1da177e4 | 425 | CIO_TRACE_EVENT( 2, dbf_txt); |
f97a56fb CH |
426 | if (res_data->fla != 0) { |
427 | sprintf(dbf_txt, "fla%x", res_data->fla); | |
1da177e4 LT |
428 | CIO_TRACE_EVENT( 2, dbf_txt); |
429 | } | |
430 | ||
431 | /* | |
432 | * I/O resources may have become accessible. | |
433 | * Scan through all subchannels that may be concerned and | |
434 | * do a validation on those. | |
435 | * The more information we have (info), the less scanning | |
436 | * will we have to do. | |
437 | */ | |
f97a56fb CH |
438 | rc = for_each_subchannel(__s390_process_res_acc, res_data); |
439 | if (css_slow_subchannels_exist()) | |
440 | rc = -EAGAIN; | |
441 | else if (rc != -EAGAIN) | |
442 | rc = 0; | |
1da177e4 LT |
443 | return rc; |
444 | } | |
445 | ||
446 | static int | |
447 | __get_chpid_from_lir(void *data) | |
448 | { | |
449 | struct lir { | |
450 | u8 iq; | |
451 | u8 ic; | |
452 | u16 sci; | |
453 | /* incident-node descriptor */ | |
454 | u32 indesc[28]; | |
455 | /* attached-node descriptor */ | |
456 | u32 andesc[28]; | |
457 | /* incident-specific information */ | |
458 | u32 isinfo[28]; | |
459 | } *lir; | |
460 | ||
461 | lir = (struct lir*) data; | |
462 | if (!(lir->iq&0x80)) | |
463 | /* NULL link incident record */ | |
464 | return -EINVAL; | |
465 | if (!(lir->indesc[0]&0xc0000000)) | |
466 | /* node descriptor not valid */ | |
467 | return -EINVAL; | |
468 | if (!(lir->indesc[0]&0x10000000)) | |
469 | /* don't handle device-type nodes - FIXME */ | |
470 | return -EINVAL; | |
471 | /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ | |
472 | ||
473 | return (u16) (lir->indesc[0]&0x000000ff); | |
474 | } | |
475 | ||
476 | int | |
477 | chsc_process_crw(void) | |
478 | { | |
479 | int chpid, ret; | |
f97a56fb | 480 | struct res_acc_data res_data; |
1da177e4 LT |
481 | struct { |
482 | struct chsc_header request; | |
483 | u32 reserved1; | |
484 | u32 reserved2; | |
485 | u32 reserved3; | |
486 | struct chsc_header response; | |
487 | u32 reserved4; | |
488 | u8 flags; | |
489 | u8 vf; /* validity flags */ | |
490 | u8 rs; /* reporting source */ | |
491 | u8 cc; /* content code */ | |
492 | u16 fla; /* full link address */ | |
493 | u16 rsid; /* reporting source id */ | |
494 | u32 reserved5; | |
495 | u32 reserved6; | |
496 | u32 ccdf[96]; /* content-code dependent field */ | |
497 | /* ccdf has to be big enough for a link-incident record */ | |
498 | } *sei_area; | |
499 | ||
500 | if (!sei_page) | |
501 | return 0; | |
502 | /* | |
503 | * build the chsc request block for store event information | |
504 | * and do the call | |
505 | * This function is only called by the machine check handler thread, | |
506 | * so we don't need locking for the sei_page. | |
507 | */ | |
508 | sei_area = sei_page; | |
509 | ||
510 | CIO_TRACE_EVENT( 2, "prcss"); | |
511 | ret = 0; | |
512 | do { | |
513 | int ccode, status; | |
a28c6944 | 514 | struct device *dev; |
1da177e4 | 515 | memset(sei_area, 0, sizeof(*sei_area)); |
f97a56fb | 516 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
1da177e4 LT |
517 | sei_area->request = (struct chsc_header) { |
518 | .length = 0x0010, | |
519 | .code = 0x000e, | |
520 | }; | |
521 | ||
522 | ccode = chsc(sei_area); | |
523 | if (ccode > 0) | |
524 | return 0; | |
525 | ||
526 | switch (sei_area->response.code) { | |
527 | /* for debug purposes, check for problems */ | |
528 | case 0x0001: | |
529 | CIO_CRW_EVENT(4, "chsc_process_crw: event information " | |
530 | "successfully stored\n"); | |
531 | break; /* everything ok */ | |
532 | case 0x0002: | |
533 | CIO_CRW_EVENT(2, | |
534 | "chsc_process_crw: invalid command!\n"); | |
535 | return 0; | |
536 | case 0x0003: | |
537 | CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " | |
538 | "request block!\n"); | |
539 | return 0; | |
540 | case 0x0005: | |
541 | CIO_CRW_EVENT(2, "chsc_process_crw: no event " | |
542 | "information stored\n"); | |
543 | return 0; | |
544 | default: | |
545 | CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", | |
546 | sei_area->response.code); | |
547 | return 0; | |
548 | } | |
549 | ||
550 | /* Check if we might have lost some information. */ | |
551 | if (sei_area->flags & 0x40) | |
552 | CIO_CRW_EVENT(2, "chsc_process_crw: Event information " | |
553 | "has been lost due to overflow!\n"); | |
554 | ||
555 | if (sei_area->rs != 4) { | |
556 | CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " | |
557 | "(%04X) isn't a chpid!\n", | |
558 | sei_area->rsid); | |
559 | continue; | |
560 | } | |
561 | ||
562 | /* which kind of information was stored? */ | |
563 | switch (sei_area->cc) { | |
564 | case 1: /* link incident*/ | |
565 | CIO_CRW_EVENT(4, "chsc_process_crw: " | |
566 | "channel subsystem reports link incident," | |
567 | " reporting source is chpid %x\n", | |
568 | sei_area->rsid); | |
569 | chpid = __get_chpid_from_lir(sei_area->ccdf); | |
570 | if (chpid < 0) | |
571 | CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", | |
572 | __FUNCTION__); | |
573 | else | |
574 | s390_set_chpid_offline(chpid); | |
575 | break; | |
576 | ||
577 | case 2: /* i/o resource accessibiliy */ | |
578 | CIO_CRW_EVENT(4, "chsc_process_crw: " | |
579 | "channel subsystem reports some I/O " | |
580 | "devices may have become accessible\n"); | |
581 | pr_debug("Data received after sei: \n"); | |
582 | pr_debug("Validity flags: %x\n", sei_area->vf); | |
583 | ||
584 | /* allocate a new channel path structure, if needed */ | |
585 | status = get_chp_status(sei_area->rsid); | |
586 | if (status < 0) | |
587 | new_channel_path(sei_area->rsid); | |
588 | else if (!status) | |
f97a56fb | 589 | break; |
a28c6944 CH |
590 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); |
591 | res_data.chp = to_channelpath(dev); | |
f97a56fb CH |
592 | pr_debug("chpid: %x", sei_area->rsid); |
593 | if ((sei_area->vf & 0xc0) != 0) { | |
594 | res_data.fla = sei_area->fla; | |
595 | if ((sei_area->vf & 0xc0) == 0xc0) { | |
596 | pr_debug(" full link addr: %x", | |
597 | sei_area->fla); | |
598 | res_data.fla_mask = 0xffff; | |
599 | } else { | |
600 | pr_debug(" link addr: %x", | |
601 | sei_area->fla); | |
602 | res_data.fla_mask = 0xff00; | |
603 | } | |
1da177e4 | 604 | } |
f97a56fb CH |
605 | ret = s390_process_res_acc(&res_data); |
606 | pr_debug("\n\n"); | |
a28c6944 | 607 | put_device(dev); |
1da177e4 LT |
608 | break; |
609 | ||
610 | default: /* other stuff */ | |
611 | CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", | |
612 | sei_area->cc); | |
613 | break; | |
614 | } | |
615 | } while (sei_area->flags & 0x80); | |
616 | return ret; | |
617 | } | |
618 | ||
f97a56fb CH |
619 | static inline int |
620 | __chp_add_new_sch(struct subchannel_id schid) | |
621 | { | |
622 | struct schib schib; | |
623 | int ret; | |
624 | ||
625 | if (stsch(schid, &schib)) | |
626 | /* We're through */ | |
627 | return need_rescan ? -EAGAIN : -ENXIO; | |
628 | ||
629 | /* Put it on the slow path. */ | |
630 | ret = css_enqueue_subchannel_slow(schid); | |
631 | if (ret) { | |
632 | css_clear_subchannel_slow_list(); | |
633 | need_rescan = 1; | |
634 | return -EAGAIN; | |
635 | } | |
636 | return 0; | |
637 | } | |
638 | ||
639 | ||
1da177e4 | 640 | static int |
f97a56fb | 641 | __chp_add(struct subchannel_id schid, void *data) |
1da177e4 | 642 | { |
f97a56fb CH |
643 | int i; |
644 | struct channel_path *chp; | |
1da177e4 | 645 | struct subchannel *sch; |
f97a56fb CH |
646 | |
647 | chp = (struct channel_path *)data; | |
648 | sch = get_subchannel_by_schid(schid); | |
649 | if (!sch) | |
650 | /* Check if the subchannel is now available. */ | |
651 | return __chp_add_new_sch(schid); | |
652 | spin_lock(&sch->lock); | |
653 | for (i=0; i<8; i++) | |
654 | if (sch->schib.pmcw.chpid[i] == chp->id) { | |
655 | if (stsch(sch->schid, &sch->schib) != 0) { | |
656 | /* Endgame. */ | |
657 | spin_unlock(&sch->lock); | |
658 | return -ENXIO; | |
659 | } | |
660 | break; | |
661 | } | |
662 | if (i==8) { | |
663 | spin_unlock(&sch->lock); | |
664 | return 0; | |
665 | } | |
666 | sch->lpm = ((sch->schib.pmcw.pim & | |
667 | sch->schib.pmcw.pam & | |
668 | sch->schib.pmcw.pom) | |
669 | | 0x80 >> i) & sch->opm; | |
670 | ||
671 | if (sch->driver && sch->driver->verify) | |
672 | sch->driver->verify(&sch->dev); | |
673 | ||
674 | spin_unlock(&sch->lock); | |
675 | put_device(&sch->dev); | |
676 | return 0; | |
677 | } | |
678 | ||
679 | static int | |
680 | chp_add(int chpid) | |
681 | { | |
682 | int rc; | |
1da177e4 | 683 | char dbf_txt[15]; |
a28c6944 | 684 | struct device *dev; |
1da177e4 LT |
685 | |
686 | if (!get_chp_status(chpid)) | |
687 | return 0; /* no need to do the rest */ | |
688 | ||
689 | sprintf(dbf_txt, "cadd%x", chpid); | |
690 | CIO_TRACE_EVENT(2, dbf_txt); | |
691 | ||
a28c6944 CH |
692 | dev = get_device(&css[0]->chps[chpid]->dev); |
693 | rc = for_each_subchannel(__chp_add, to_channelpath(dev)); | |
f97a56fb CH |
694 | if (css_slow_subchannels_exist()) |
695 | rc = -EAGAIN; | |
696 | if (rc != -EAGAIN) | |
697 | rc = 0; | |
a28c6944 | 698 | put_device(dev); |
1da177e4 LT |
699 | return rc; |
700 | } | |
701 | ||
702 | /* | |
703 | * Handling of crw machine checks with channel path source. | |
704 | */ | |
705 | int | |
706 | chp_process_crw(int chpid, int on) | |
707 | { | |
708 | if (on == 0) { | |
709 | /* Path has gone. We use the link incident routine.*/ | |
710 | s390_set_chpid_offline(chpid); | |
711 | return 0; /* De-register is async anyway. */ | |
712 | } | |
713 | /* | |
714 | * Path has come. Allocate a new channel path structure, | |
715 | * if needed. | |
716 | */ | |
717 | if (get_chp_status(chpid) < 0) | |
718 | new_channel_path(chpid); | |
719 | /* Avoid the extra overhead in process_rec_acc. */ | |
720 | return chp_add(chpid); | |
721 | } | |
722 | ||
723 | static inline int | |
724 | __check_for_io_and_kill(struct subchannel *sch, int index) | |
725 | { | |
726 | int cc; | |
727 | ||
728 | if (!device_is_online(sch)) | |
729 | /* cio could be doing I/O. */ | |
730 | return 0; | |
a8237fc4 | 731 | cc = stsch(sch->schid, &sch->schib); |
1da177e4 LT |
732 | if (cc) |
733 | return 0; | |
734 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { | |
735 | device_set_waiting(sch); | |
736 | return 1; | |
737 | } | |
738 | return 0; | |
739 | } | |
740 | ||
741 | static inline void | |
742 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | |
743 | { | |
744 | int chp, old_lpm; | |
745 | unsigned long flags; | |
746 | ||
747 | if (!sch->ssd_info.valid) | |
748 | return; | |
749 | ||
750 | spin_lock_irqsave(&sch->lock, flags); | |
751 | old_lpm = sch->lpm; | |
752 | for (chp = 0; chp < 8; chp++) { | |
753 | if (sch->ssd_info.chpid[chp] != chpid) | |
754 | continue; | |
755 | ||
756 | if (on) { | |
757 | sch->opm |= (0x80 >> chp); | |
758 | sch->lpm |= (0x80 >> chp); | |
759 | if (!old_lpm) | |
760 | device_trigger_reprobe(sch); | |
761 | else if (sch->driver && sch->driver->verify) | |
762 | sch->driver->verify(&sch->dev); | |
763 | } else { | |
764 | sch->opm &= ~(0x80 >> chp); | |
765 | sch->lpm &= ~(0x80 >> chp); | |
766 | /* | |
767 | * Give running I/O a grace period in which it | |
768 | * can successfully terminate, even using the | |
769 | * just varied off path. Then kill it. | |
770 | */ | |
771 | if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { | |
a8237fc4 | 772 | if (css_enqueue_subchannel_slow(sch->schid)) { |
1da177e4 LT |
773 | css_clear_subchannel_slow_list(); |
774 | need_rescan = 1; | |
775 | } | |
776 | } else if (sch->driver && sch->driver->verify) | |
777 | sch->driver->verify(&sch->dev); | |
778 | } | |
779 | break; | |
780 | } | |
781 | spin_unlock_irqrestore(&sch->lock, flags); | |
782 | } | |
783 | ||
784 | static int | |
785 | s390_subchannel_vary_chpid_off(struct device *dev, void *data) | |
786 | { | |
787 | struct subchannel *sch; | |
788 | __u8 *chpid; | |
789 | ||
790 | sch = to_subchannel(dev); | |
791 | chpid = data; | |
792 | ||
793 | __s390_subchannel_vary_chpid(sch, *chpid, 0); | |
794 | return 0; | |
795 | } | |
796 | ||
797 | static int | |
798 | s390_subchannel_vary_chpid_on(struct device *dev, void *data) | |
799 | { | |
800 | struct subchannel *sch; | |
801 | __u8 *chpid; | |
802 | ||
803 | sch = to_subchannel(dev); | |
804 | chpid = data; | |
805 | ||
806 | __s390_subchannel_vary_chpid(sch, *chpid, 1); | |
807 | return 0; | |
808 | } | |
809 | ||
f97a56fb CH |
810 | static int |
811 | __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |
812 | { | |
813 | struct schib schib; | |
814 | struct subchannel *sch; | |
815 | ||
816 | sch = get_subchannel_by_schid(schid); | |
817 | if (sch) { | |
818 | put_device(&sch->dev); | |
819 | return 0; | |
820 | } | |
821 | if (stsch(schid, &schib)) | |
822 | /* We're through */ | |
823 | return -ENXIO; | |
824 | /* Put it on the slow path. */ | |
825 | if (css_enqueue_subchannel_slow(schid)) { | |
826 | css_clear_subchannel_slow_list(); | |
827 | need_rescan = 1; | |
828 | return -EAGAIN; | |
829 | } | |
830 | return 0; | |
831 | } | |
832 | ||
1da177e4 LT |
833 | /* |
834 | * Function: s390_vary_chpid | |
835 | * Varies the specified chpid online or offline | |
836 | */ | |
837 | static int | |
838 | s390_vary_chpid( __u8 chpid, int on) | |
839 | { | |
840 | char dbf_text[15]; | |
f97a56fb | 841 | int status; |
1da177e4 LT |
842 | |
843 | sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); | |
844 | CIO_TRACE_EVENT( 2, dbf_text); | |
845 | ||
846 | status = get_chp_status(chpid); | |
847 | if (status < 0) { | |
848 | printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); | |
849 | return -EINVAL; | |
850 | } | |
851 | ||
852 | if (!on && !status) { | |
853 | printk(KERN_ERR "chpid %x is already offline\n", chpid); | |
854 | return -EINVAL; | |
855 | } | |
856 | ||
857 | set_chp_logically_online(chpid, on); | |
858 | ||
859 | /* | |
860 | * Redo PathVerification on the devices the chpid connects to | |
861 | */ | |
862 | ||
863 | bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? | |
864 | s390_subchannel_vary_chpid_on : | |
865 | s390_subchannel_vary_chpid_off); | |
f97a56fb CH |
866 | if (on) |
867 | /* Scan for new devices on varied on path. */ | |
868 | for_each_subchannel(__s390_vary_chpid_on, NULL); | |
1da177e4 LT |
869 | if (need_rescan || css_slow_subchannels_exist()) |
870 | queue_work(slow_path_wq, &slow_path_work); | |
871 | return 0; | |
872 | } | |
873 | ||
874 | /* | |
875 | * Files for the channel path entries. | |
876 | */ | |
877 | static ssize_t | |
3fd3c0a5 | 878 | chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
879 | { |
880 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | |
881 | ||
882 | if (!chp) | |
883 | return 0; | |
884 | return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : | |
885 | sprintf(buf, "offline\n")); | |
886 | } | |
887 | ||
888 | static ssize_t | |
3fd3c0a5 | 889 | chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
1da177e4 LT |
890 | { |
891 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | |
892 | char cmd[10]; | |
893 | int num_args; | |
894 | int error; | |
895 | ||
896 | num_args = sscanf(buf, "%5s", cmd); | |
897 | if (!num_args) | |
898 | return count; | |
899 | ||
900 | if (!strnicmp(cmd, "on", 2)) | |
901 | error = s390_vary_chpid(cp->id, 1); | |
902 | else if (!strnicmp(cmd, "off", 3)) | |
903 | error = s390_vary_chpid(cp->id, 0); | |
904 | else | |
905 | error = -EINVAL; | |
906 | ||
907 | return error < 0 ? error : count; | |
908 | ||
909 | } | |
910 | ||
911 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | |
912 | ||
913 | static ssize_t | |
3fd3c0a5 | 914 | chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
915 | { |
916 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | |
917 | ||
918 | if (!chp) | |
919 | return 0; | |
920 | return sprintf(buf, "%x\n", chp->desc.desc); | |
921 | } | |
922 | ||
923 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | |
924 | ||
925 | static struct attribute * chp_attrs[] = { | |
926 | &dev_attr_status.attr, | |
927 | &dev_attr_type.attr, | |
928 | NULL, | |
929 | }; | |
930 | ||
931 | static struct attribute_group chp_attr_group = { | |
932 | .attrs = chp_attrs, | |
933 | }; | |
934 | ||
935 | static void | |
936 | chp_release(struct device *dev) | |
937 | { | |
938 | struct channel_path *cp; | |
939 | ||
940 | cp = container_of(dev, struct channel_path, dev); | |
941 | kfree(cp); | |
942 | } | |
943 | ||
944 | static int | |
945 | chsc_determine_channel_path_description(int chpid, | |
946 | struct channel_path_desc *desc) | |
947 | { | |
948 | int ccode, ret; | |
949 | ||
950 | struct { | |
951 | struct chsc_header request; | |
952 | u32 : 24; | |
953 | u32 first_chpid : 8; | |
954 | u32 : 24; | |
955 | u32 last_chpid : 8; | |
956 | u32 zeroes1; | |
957 | struct chsc_header response; | |
958 | u32 zeroes2; | |
959 | struct channel_path_desc desc; | |
960 | } *scpd_area; | |
961 | ||
962 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | |
963 | if (!scpd_area) | |
964 | return -ENOMEM; | |
965 | ||
966 | scpd_area->request = (struct chsc_header) { | |
967 | .length = 0x0010, | |
968 | .code = 0x0002, | |
969 | }; | |
970 | ||
971 | scpd_area->first_chpid = chpid; | |
972 | scpd_area->last_chpid = chpid; | |
973 | ||
974 | ccode = chsc(scpd_area); | |
975 | if (ccode > 0) { | |
976 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | |
977 | goto out; | |
978 | } | |
979 | ||
980 | switch (scpd_area->response.code) { | |
981 | case 0x0001: /* Success. */ | |
982 | memcpy(desc, &scpd_area->desc, | |
983 | sizeof(struct channel_path_desc)); | |
984 | ret = 0; | |
985 | break; | |
986 | case 0x0003: /* Invalid block. */ | |
987 | case 0x0007: /* Invalid format. */ | |
988 | case 0x0008: /* Other invalid block. */ | |
989 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | |
990 | ret = -EINVAL; | |
991 | break; | |
992 | case 0x0004: /* Command not provided in model. */ | |
993 | CIO_CRW_EVENT(2, "Model does not provide scpd\n"); | |
994 | ret = -EOPNOTSUPP; | |
995 | break; | |
996 | default: | |
997 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | |
998 | scpd_area->response.code); | |
999 | ret = -EIO; | |
1000 | } | |
1001 | out: | |
1002 | free_page((unsigned long)scpd_area); | |
1003 | return ret; | |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * Entries for chpids on the system bus. | |
1008 | * This replaces /proc/chpids. | |
1009 | */ | |
1010 | static int | |
1011 | new_channel_path(int chpid) | |
1012 | { | |
1013 | struct channel_path *chp; | |
1014 | int ret; | |
1015 | ||
1016 | chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL); | |
1017 | if (!chp) | |
1018 | return -ENOMEM; | |
1019 | memset(chp, 0, sizeof(struct channel_path)); | |
1020 | ||
1021 | /* fill in status, etc. */ | |
1022 | chp->id = chpid; | |
1023 | chp->state = 1; | |
1024 | chp->dev = (struct device) { | |
a28c6944 | 1025 | .parent = &css[0]->device, |
1da177e4 LT |
1026 | .release = chp_release, |
1027 | }; | |
1028 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); | |
1029 | ||
1030 | /* Obtain channel path description and fill it in. */ | |
1031 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | |
1032 | if (ret) | |
1033 | goto out_free; | |
1034 | ||
1035 | /* make it known to the system */ | |
1036 | ret = device_register(&chp->dev); | |
1037 | if (ret) { | |
1038 | printk(KERN_WARNING "%s: could not register %02x\n", | |
1039 | __func__, chpid); | |
1040 | goto out_free; | |
1041 | } | |
1042 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | |
1043 | if (ret) { | |
1044 | device_unregister(&chp->dev); | |
1045 | goto out_free; | |
1046 | } else | |
a28c6944 | 1047 | css[0]->chps[chpid] = chp; |
1da177e4 LT |
1048 | return ret; |
1049 | out_free: | |
1050 | kfree(chp); | |
1051 | return ret; | |
1052 | } | |
1053 | ||
1054 | void * | |
1055 | chsc_get_chp_desc(struct subchannel *sch, int chp_no) | |
1056 | { | |
1057 | struct channel_path *chp; | |
1058 | struct channel_path_desc *desc; | |
1059 | ||
a28c6944 | 1060 | chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; |
1da177e4 LT |
1061 | if (!chp) |
1062 | return NULL; | |
1063 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | |
1064 | if (!desc) | |
1065 | return NULL; | |
1066 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | |
1067 | return desc; | |
1068 | } | |
1069 | ||
1070 | ||
1071 | static int __init | |
1072 | chsc_alloc_sei_area(void) | |
1073 | { | |
1074 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | |
1075 | if (!sei_page) | |
1076 | printk(KERN_WARNING"Can't allocate page for processing of " \ | |
1077 | "chsc machine checks!\n"); | |
1078 | return (sei_page ? 0 : -ENOMEM); | |
1079 | } | |
1080 | ||
1081 | subsys_initcall(chsc_alloc_sei_area); | |
1082 | ||
1083 | struct css_general_char css_general_characteristics; | |
1084 | struct css_chsc_char css_chsc_characteristics; | |
1085 | ||
1086 | int __init | |
1087 | chsc_determine_css_characteristics(void) | |
1088 | { | |
1089 | int result; | |
1090 | struct { | |
1091 | struct chsc_header request; | |
1092 | u32 reserved1; | |
1093 | u32 reserved2; | |
1094 | u32 reserved3; | |
1095 | struct chsc_header response; | |
1096 | u32 reserved4; | |
1097 | u32 general_char[510]; | |
1098 | u32 chsc_char[518]; | |
1099 | } *scsc_area; | |
1100 | ||
1101 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | |
1102 | if (!scsc_area) { | |
1103 | printk(KERN_WARNING"cio: Was not able to determine available" \ | |
1104 | "CHSCs due to no memory.\n"); | |
1105 | return -ENOMEM; | |
1106 | } | |
1107 | ||
1108 | scsc_area->request = (struct chsc_header) { | |
1109 | .length = 0x0010, | |
1110 | .code = 0x0010, | |
1111 | }; | |
1112 | ||
1113 | result = chsc(scsc_area); | |
1114 | if (result) { | |
1115 | printk(KERN_WARNING"cio: Was not able to determine " \ | |
1116 | "available CHSCs, cc=%i.\n", result); | |
1117 | result = -EIO; | |
1118 | goto exit; | |
1119 | } | |
1120 | ||
1121 | if (scsc_area->response.code != 1) { | |
1122 | printk(KERN_WARNING"cio: Was not able to determine " \ | |
1123 | "available CHSCs.\n"); | |
1124 | result = -EIO; | |
1125 | goto exit; | |
1126 | } | |
1127 | memcpy(&css_general_characteristics, scsc_area->general_char, | |
1128 | sizeof(css_general_characteristics)); | |
1129 | memcpy(&css_chsc_characteristics, scsc_area->chsc_char, | |
1130 | sizeof(css_chsc_characteristics)); | |
1131 | exit: | |
1132 | free_page ((unsigned long) scsc_area); | |
1133 | return result; | |
1134 | } | |
1135 | ||
1136 | EXPORT_SYMBOL_GPL(css_general_characteristics); | |
1137 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); |