]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/s390/cio/chsc.c
[S390] stp support.
[linux-2.6-omap-h63xx.git] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright IBM Corp. 1999,2008
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Arnd Bergmann (arndb@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/device.h>
15
16 #include <asm/cio.h>
17 #include <asm/chpid.h>
18
19 #include "../s390mach.h"
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 static int chsc_error_from_response(int response)
30 {
31         switch (response) {
32         case 0x0001:
33                 return 0;
34         case 0x0002:
35         case 0x0003:
36         case 0x0006:
37         case 0x0007:
38         case 0x0008:
39         case 0x000a:
40                 return -EINVAL;
41         case 0x0004:
42                 return -EOPNOTSUPP;
43         default:
44                 return -EIO;
45         }
46 }
47
48 struct chsc_ssd_area {
49         struct chsc_header request;
50         u16 :10;
51         u16 ssid:2;
52         u16 :4;
53         u16 f_sch;        /* first subchannel */
54         u16 :16;
55         u16 l_sch;        /* last subchannel */
56         u32 :32;
57         struct chsc_header response;
58         u32 :32;
59         u8 sch_valid : 1;
60         u8 dev_valid : 1;
61         u8 st        : 3; /* subchannel type */
62         u8 zeroes    : 3;
63         u8  unit_addr;    /* unit address */
64         u16 devno;        /* device number */
65         u8 path_mask;
66         u8 fla_valid_mask;
67         u16 sch;          /* subchannel */
68         u8 chpid[8];      /* chpids 0-7 */
69         u16 fla[8];       /* full link addresses 0-7 */
70 } __attribute__ ((packed));
71
72 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
73 {
74         unsigned long page;
75         struct chsc_ssd_area *ssd_area;
76         int ccode;
77         int ret;
78         int i;
79         int mask;
80
81         page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
82         if (!page)
83                 return -ENOMEM;
84         ssd_area = (struct chsc_ssd_area *) page;
85         ssd_area->request.length = 0x0010;
86         ssd_area->request.code = 0x0004;
87         ssd_area->ssid = schid.ssid;
88         ssd_area->f_sch = schid.sch_no;
89         ssd_area->l_sch = schid.sch_no;
90
91         ccode = chsc(ssd_area);
92         /* Check response. */
93         if (ccode > 0) {
94                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
95                 goto out_free;
96         }
97         ret = chsc_error_from_response(ssd_area->response.code);
98         if (ret != 0) {
99                 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
100                               schid.ssid, schid.sch_no,
101                               ssd_area->response.code);
102                 goto out_free;
103         }
104         if (!ssd_area->sch_valid) {
105                 ret = -ENODEV;
106                 goto out_free;
107         }
108         /* Copy data */
109         ret = 0;
110         memset(ssd, 0, sizeof(struct chsc_ssd_info));
111         if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
112             (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113                 goto out_free;
114         ssd->path_mask = ssd_area->path_mask;
115         ssd->fla_valid_mask = ssd_area->fla_valid_mask;
116         for (i = 0; i < 8; i++) {
117                 mask = 0x80 >> i;
118                 if (ssd_area->path_mask & mask) {
119                         chp_id_init(&ssd->chpid[i]);
120                         ssd->chpid[i].id = ssd_area->chpid[i];
121                 }
122                 if (ssd_area->fla_valid_mask & mask)
123                         ssd->fla[i] = ssd_area->fla[i];
124         }
125 out_free:
126         free_page(page);
127         return ret;
128 }
129
130 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
131 {
132         spin_lock_irq(sch->lock);
133         if (sch->driver && sch->driver->chp_event)
134                 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
135                         goto out_unreg;
136         spin_unlock_irq(sch->lock);
137         return 0;
138
139 out_unreg:
140         sch->lpm = 0;
141         spin_unlock_irq(sch->lock);
142         css_schedule_eval(sch->schid);
143         return 0;
144 }
145
146 void chsc_chp_offline(struct chp_id chpid)
147 {
148         char dbf_txt[15];
149
150         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
151         CIO_TRACE_EVENT(2, dbf_txt);
152
153         if (chp_get_status(chpid) <= 0)
154                 return;
155         /* Wait until previous actions have settled. */
156         css_wait_for_slow_path();
157         for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
158 }
159
160 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
161 {
162         struct schib schib;
163         /*
164          * We don't know the device yet, but since a path
165          * may be available now to the device we'll have
166          * to do recognition again.
167          * Since we don't have any idea about which chpid
168          * that beast may be on we'll have to do a stsch
169          * on all devices, grr...
170          */
171         if (stsch_err(schid, &schib))
172                 /* We're through */
173                 return -ENXIO;
174
175         /* Put it on the slow path. */
176         css_schedule_eval(schid);
177         return 0;
178 }
179
180 static int __s390_process_res_acc(struct subchannel *sch, void *data)
181 {
182         spin_lock_irq(sch->lock);
183         if (sch->driver && sch->driver->chp_event)
184                 sch->driver->chp_event(sch, data, CHP_ONLINE);
185         spin_unlock_irq(sch->lock);
186
187         return 0;
188 }
189
190 static void s390_process_res_acc (struct res_acc_data *res_data)
191 {
192         char dbf_txt[15];
193
194         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
195                 res_data->chpid.id);
196         CIO_TRACE_EVENT( 2, dbf_txt);
197         if (res_data->fla != 0) {
198                 sprintf(dbf_txt, "fla%x", res_data->fla);
199                 CIO_TRACE_EVENT( 2, dbf_txt);
200         }
201         /* Wait until previous actions have settled. */
202         css_wait_for_slow_path();
203         /*
204          * I/O resources may have become accessible.
205          * Scan through all subchannels that may be concerned and
206          * do a validation on those.
207          * The more information we have (info), the less scanning
208          * will we have to do.
209          */
210         for_each_subchannel_staged(__s390_process_res_acc,
211                                    s390_process_res_acc_new_sch, res_data);
212 }
213
214 static int
215 __get_chpid_from_lir(void *data)
216 {
217         struct lir {
218                 u8  iq;
219                 u8  ic;
220                 u16 sci;
221                 /* incident-node descriptor */
222                 u32 indesc[28];
223                 /* attached-node descriptor */
224                 u32 andesc[28];
225                 /* incident-specific information */
226                 u32 isinfo[28];
227         } __attribute__ ((packed)) *lir;
228
229         lir = data;
230         if (!(lir->iq&0x80))
231                 /* NULL link incident record */
232                 return -EINVAL;
233         if (!(lir->indesc[0]&0xc0000000))
234                 /* node descriptor not valid */
235                 return -EINVAL;
236         if (!(lir->indesc[0]&0x10000000))
237                 /* don't handle device-type nodes - FIXME */
238                 return -EINVAL;
239         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
240
241         return (u16) (lir->indesc[0]&0x000000ff);
242 }
243
244 struct chsc_sei_area {
245         struct chsc_header request;
246         u32 reserved1;
247         u32 reserved2;
248         u32 reserved3;
249         struct chsc_header response;
250         u32 reserved4;
251         u8  flags;
252         u8  vf;         /* validity flags */
253         u8  rs;         /* reporting source */
254         u8  cc;         /* content code */
255         u16 fla;        /* full link address */
256         u16 rsid;       /* reporting source id */
257         u32 reserved5;
258         u32 reserved6;
259         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
260         /* ccdf has to be big enough for a link-incident record */
261 } __attribute__ ((packed));
262
263 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
264 {
265         struct chp_id chpid;
266         int id;
267
268         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
269                       sei_area->rs, sei_area->rsid);
270         if (sei_area->rs != 4)
271                 return;
272         id = __get_chpid_from_lir(sei_area->ccdf);
273         if (id < 0)
274                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
275         else {
276                 chp_id_init(&chpid);
277                 chpid.id = id;
278                 chsc_chp_offline(chpid);
279         }
280 }
281
282 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
283 {
284         struct res_acc_data res_data;
285         struct chp_id chpid;
286         int status;
287
288         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
289                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
290         if (sei_area->rs != 4)
291                 return;
292         chp_id_init(&chpid);
293         chpid.id = sei_area->rsid;
294         /* allocate a new channel path structure, if needed */
295         status = chp_get_status(chpid);
296         if (status < 0)
297                 chp_new(chpid);
298         else if (!status)
299                 return;
300         memset(&res_data, 0, sizeof(struct res_acc_data));
301         res_data.chpid = chpid;
302         if ((sei_area->vf & 0xc0) != 0) {
303                 res_data.fla = sei_area->fla;
304                 if ((sei_area->vf & 0xc0) == 0xc0)
305                         /* full link address */
306                         res_data.fla_mask = 0xffff;
307                 else
308                         /* link address */
309                         res_data.fla_mask = 0xff00;
310         }
311         s390_process_res_acc(&res_data);
312 }
313
314 struct chp_config_data {
315         u8 map[32];
316         u8 op;
317         u8 pc;
318 };
319
320 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
321 {
322         struct chp_config_data *data;
323         struct chp_id chpid;
324         int num;
325
326         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
327         if (sei_area->rs != 0)
328                 return;
329         data = (struct chp_config_data *) &(sei_area->ccdf);
330         chp_id_init(&chpid);
331         for (num = 0; num <= __MAX_CHPID; num++) {
332                 if (!chp_test_bit(data->map, num))
333                         continue;
334                 chpid.id = num;
335                 printk(KERN_WARNING "cio: processing configure event %d for "
336                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
337                 switch (data->op) {
338                 case 0:
339                         chp_cfg_schedule(chpid, 1);
340                         break;
341                 case 1:
342                         chp_cfg_schedule(chpid, 0);
343                         break;
344                 case 2:
345                         chp_cfg_cancel_deconfigure(chpid);
346                         break;
347                 }
348         }
349 }
350
351 static void chsc_process_sei(struct chsc_sei_area *sei_area)
352 {
353         /* Check if we might have lost some information. */
354         if (sei_area->flags & 0x40) {
355                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
356                 css_schedule_eval_all();
357         }
358         /* which kind of information was stored? */
359         switch (sei_area->cc) {
360         case 1: /* link incident*/
361                 chsc_process_sei_link_incident(sei_area);
362                 break;
363         case 2: /* i/o resource accessibiliy */
364                 chsc_process_sei_res_acc(sei_area);
365                 break;
366         case 8: /* channel-path-configuration notification */
367                 chsc_process_sei_chp_config(sei_area);
368                 break;
369         default: /* other stuff */
370                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
371                               sei_area->cc);
372                 break;
373         }
374 }
375
376 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
377 {
378         struct chsc_sei_area *sei_area;
379
380         if (overflow) {
381                 css_schedule_eval_all();
382                 return;
383         }
384         CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
385                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
386                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
387                       crw0->erc, crw0->rsid);
388         if (!sei_page)
389                 return;
390         /* Access to sei_page is serialized through machine check handler
391          * thread, so no need for locking. */
392         sei_area = sei_page;
393
394         CIO_TRACE_EVENT(2, "prcss");
395         do {
396                 memset(sei_area, 0, sizeof(*sei_area));
397                 sei_area->request.length = 0x0010;
398                 sei_area->request.code = 0x000e;
399                 if (chsc(sei_area))
400                         break;
401
402                 if (sei_area->response.code == 0x0001) {
403                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
404                         chsc_process_sei(sei_area);
405                 } else {
406                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
407                                       sei_area->response.code);
408                         break;
409                 }
410         } while (sei_area->flags & 0x80);
411 }
412
413 void chsc_chp_online(struct chp_id chpid)
414 {
415         char dbf_txt[15];
416         struct res_acc_data res_data;
417
418         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
419         CIO_TRACE_EVENT(2, dbf_txt);
420
421         if (chp_get_status(chpid) != 0) {
422                 memset(&res_data, 0, sizeof(struct res_acc_data));
423                 res_data.chpid = chpid;
424                 /* Wait until previous actions have settled. */
425                 css_wait_for_slow_path();
426                 for_each_subchannel_staged(__s390_process_res_acc, NULL,
427                                            &res_data);
428         }
429 }
430
431 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
432                                          struct chp_id chpid, int on)
433 {
434         unsigned long flags;
435         struct res_acc_data res_data;
436
437         memset(&res_data, 0, sizeof(struct res_acc_data));
438         res_data.chpid = chpid;
439         spin_lock_irqsave(sch->lock, flags);
440         if (sch->driver && sch->driver->chp_event)
441                 sch->driver->chp_event(sch, &res_data,
442                                        on ? CHP_VARY_ON : CHP_VARY_OFF);
443         spin_unlock_irqrestore(sch->lock, flags);
444 }
445
446 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
447 {
448         struct chp_id *chpid = data;
449
450         __s390_subchannel_vary_chpid(sch, *chpid, 0);
451         return 0;
452 }
453
454 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
455 {
456         struct chp_id *chpid = data;
457
458         __s390_subchannel_vary_chpid(sch, *chpid, 1);
459         return 0;
460 }
461
462 static int
463 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
464 {
465         struct schib schib;
466
467         if (stsch_err(schid, &schib))
468                 /* We're through */
469                 return -ENXIO;
470         /* Put it on the slow path. */
471         css_schedule_eval(schid);
472         return 0;
473 }
474
475 /**
476  * chsc_chp_vary - propagate channel-path vary operation to subchannels
477  * @chpid: channl-path ID
478  * @on: non-zero for vary online, zero for vary offline
479  */
480 int chsc_chp_vary(struct chp_id chpid, int on)
481 {
482         /* Wait until previous actions have settled. */
483         css_wait_for_slow_path();
484         /*
485          * Redo PathVerification on the devices the chpid connects to
486          */
487
488         if (on)
489                 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
490                                            __s390_vary_chpid_on, &chpid);
491         else
492                 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
493                                            NULL, &chpid);
494
495         return 0;
496 }
497
498 static void
499 chsc_remove_cmg_attr(struct channel_subsystem *css)
500 {
501         int i;
502
503         for (i = 0; i <= __MAX_CHPID; i++) {
504                 if (!css->chps[i])
505                         continue;
506                 chp_remove_cmg_attr(css->chps[i]);
507         }
508 }
509
510 static int
511 chsc_add_cmg_attr(struct channel_subsystem *css)
512 {
513         int i, ret;
514
515         ret = 0;
516         for (i = 0; i <= __MAX_CHPID; i++) {
517                 if (!css->chps[i])
518                         continue;
519                 ret = chp_add_cmg_attr(css->chps[i]);
520                 if (ret)
521                         goto cleanup;
522         }
523         return ret;
524 cleanup:
525         for (--i; i >= 0; i--) {
526                 if (!css->chps[i])
527                         continue;
528                 chp_remove_cmg_attr(css->chps[i]);
529         }
530         return ret;
531 }
532
533 static int
534 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
535 {
536         struct {
537                 struct chsc_header request;
538                 u32 operation_code : 2;
539                 u32 : 30;
540                 u32 key : 4;
541                 u32 : 28;
542                 u32 zeroes1;
543                 u32 cub_addr1;
544                 u32 zeroes2;
545                 u32 cub_addr2;
546                 u32 reserved[13];
547                 struct chsc_header response;
548                 u32 status : 8;
549                 u32 : 4;
550                 u32 fmt : 4;
551                 u32 : 16;
552         } __attribute__ ((packed)) *secm_area;
553         int ret, ccode;
554
555         secm_area = page;
556         secm_area->request.length = 0x0050;
557         secm_area->request.code = 0x0016;
558
559         secm_area->key = PAGE_DEFAULT_KEY;
560         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
561         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
562
563         secm_area->operation_code = enable ? 0 : 1;
564
565         ccode = chsc(secm_area);
566         if (ccode > 0)
567                 return (ccode == 3) ? -ENODEV : -EBUSY;
568
569         switch (secm_area->response.code) {
570         case 0x0102:
571         case 0x0103:
572                 ret = -EINVAL;
573         default:
574                 ret = chsc_error_from_response(secm_area->response.code);
575         }
576         if (ret != 0)
577                 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
578                               secm_area->response.code);
579         return ret;
580 }
581
582 int
583 chsc_secm(struct channel_subsystem *css, int enable)
584 {
585         void  *secm_area;
586         int ret;
587
588         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
589         if (!secm_area)
590                 return -ENOMEM;
591
592         if (enable && !css->cm_enabled) {
593                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
594                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
595                 if (!css->cub_addr1 || !css->cub_addr2) {
596                         free_page((unsigned long)css->cub_addr1);
597                         free_page((unsigned long)css->cub_addr2);
598                         free_page((unsigned long)secm_area);
599                         return -ENOMEM;
600                 }
601         }
602         ret = __chsc_do_secm(css, enable, secm_area);
603         if (!ret) {
604                 css->cm_enabled = enable;
605                 if (css->cm_enabled) {
606                         ret = chsc_add_cmg_attr(css);
607                         if (ret) {
608                                 memset(secm_area, 0, PAGE_SIZE);
609                                 __chsc_do_secm(css, 0, secm_area);
610                                 css->cm_enabled = 0;
611                         }
612                 } else
613                         chsc_remove_cmg_attr(css);
614         }
615         if (!css->cm_enabled) {
616                 free_page((unsigned long)css->cub_addr1);
617                 free_page((unsigned long)css->cub_addr2);
618         }
619         free_page((unsigned long)secm_area);
620         return ret;
621 }
622
623 int chsc_determine_channel_path_description(struct chp_id chpid,
624                                             struct channel_path_desc *desc)
625 {
626         int ccode, ret;
627
628         struct {
629                 struct chsc_header request;
630                 u32 : 24;
631                 u32 first_chpid : 8;
632                 u32 : 24;
633                 u32 last_chpid : 8;
634                 u32 zeroes1;
635                 struct chsc_header response;
636                 u32 zeroes2;
637                 struct channel_path_desc desc;
638         } __attribute__ ((packed)) *scpd_area;
639
640         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
641         if (!scpd_area)
642                 return -ENOMEM;
643
644         scpd_area->request.length = 0x0010;
645         scpd_area->request.code = 0x0002;
646
647         scpd_area->first_chpid = chpid.id;
648         scpd_area->last_chpid = chpid.id;
649
650         ccode = chsc(scpd_area);
651         if (ccode > 0) {
652                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
653                 goto out;
654         }
655
656         ret = chsc_error_from_response(scpd_area->response.code);
657         if (ret == 0)
658                 /* Success. */
659                 memcpy(desc, &scpd_area->desc,
660                        sizeof(struct channel_path_desc));
661         else
662                 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
663                               scpd_area->response.code);
664 out:
665         free_page((unsigned long)scpd_area);
666         return ret;
667 }
668
669 static void
670 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
671                           struct cmg_chars *chars)
672 {
673         switch (chp->cmg) {
674         case 2:
675         case 3:
676                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
677                                          GFP_KERNEL);
678                 if (chp->cmg_chars) {
679                         int i, mask;
680                         struct cmg_chars *cmg_chars;
681
682                         cmg_chars = chp->cmg_chars;
683                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
684                                 mask = 0x80 >> (i + 3);
685                                 if (cmcv & mask)
686                                         cmg_chars->values[i] = chars->values[i];
687                                 else
688                                         cmg_chars->values[i] = 0;
689                         }
690                 }
691                 break;
692         default:
693                 /* No cmg-dependent data. */
694                 break;
695         }
696 }
697
698 int chsc_get_channel_measurement_chars(struct channel_path *chp)
699 {
700         int ccode, ret;
701
702         struct {
703                 struct chsc_header request;
704                 u32 : 24;
705                 u32 first_chpid : 8;
706                 u32 : 24;
707                 u32 last_chpid : 8;
708                 u32 zeroes1;
709                 struct chsc_header response;
710                 u32 zeroes2;
711                 u32 not_valid : 1;
712                 u32 shared : 1;
713                 u32 : 22;
714                 u32 chpid : 8;
715                 u32 cmcv : 5;
716                 u32 : 11;
717                 u32 cmgq : 8;
718                 u32 cmg : 8;
719                 u32 zeroes3;
720                 u32 data[NR_MEASUREMENT_CHARS];
721         } __attribute__ ((packed)) *scmc_area;
722
723         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
724         if (!scmc_area)
725                 return -ENOMEM;
726
727         scmc_area->request.length = 0x0010;
728         scmc_area->request.code = 0x0022;
729
730         scmc_area->first_chpid = chp->chpid.id;
731         scmc_area->last_chpid = chp->chpid.id;
732
733         ccode = chsc(scmc_area);
734         if (ccode > 0) {
735                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
736                 goto out;
737         }
738
739         ret = chsc_error_from_response(scmc_area->response.code);
740         if (ret == 0) {
741                 /* Success. */
742                 if (!scmc_area->not_valid) {
743                         chp->cmg = scmc_area->cmg;
744                         chp->shared = scmc_area->shared;
745                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
746                                                   (struct cmg_chars *)
747                                                   &scmc_area->data);
748                 } else {
749                         chp->cmg = -1;
750                         chp->shared = -1;
751                 }
752         } else {
753                 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
754                               scmc_area->response.code);
755         }
756 out:
757         free_page((unsigned long)scmc_area);
758         return ret;
759 }
760
761 int __init chsc_alloc_sei_area(void)
762 {
763         int ret;
764
765         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
766         if (!sei_page) {
767                 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
768                               "chsc machine checks!\n");
769                 return -ENOMEM;
770         }
771         ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
772         if (ret)
773                 kfree(sei_page);
774         return ret;
775 }
776
777 void __init chsc_free_sei_area(void)
778 {
779         s390_unregister_crw_handler(CRW_RSC_CSS);
780         kfree(sei_page);
781 }
782
783 int __init
784 chsc_enable_facility(int operation_code)
785 {
786         int ret;
787         struct {
788                 struct chsc_header request;
789                 u8 reserved1:4;
790                 u8 format:4;
791                 u8 reserved2;
792                 u16 operation_code;
793                 u32 reserved3;
794                 u32 reserved4;
795                 u32 operation_data_area[252];
796                 struct chsc_header response;
797                 u32 reserved5:4;
798                 u32 format2:4;
799                 u32 reserved6:24;
800         } __attribute__ ((packed)) *sda_area;
801
802         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
803         if (!sda_area)
804                 return -ENOMEM;
805         sda_area->request.length = 0x0400;
806         sda_area->request.code = 0x0031;
807         sda_area->operation_code = operation_code;
808
809         ret = chsc(sda_area);
810         if (ret > 0) {
811                 ret = (ret == 3) ? -ENODEV : -EBUSY;
812                 goto out;
813         }
814
815         switch (sda_area->response.code) {
816         case 0x0101:
817                 ret = -EOPNOTSUPP;
818                 break;
819         default:
820                 ret = chsc_error_from_response(sda_area->response.code);
821         }
822         if (ret != 0)
823                 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
824                               operation_code, sda_area->response.code);
825  out:
826         free_page((unsigned long)sda_area);
827         return ret;
828 }
829
830 struct css_general_char css_general_characteristics;
831 struct css_chsc_char css_chsc_characteristics;
832
833 int __init
834 chsc_determine_css_characteristics(void)
835 {
836         int result;
837         struct {
838                 struct chsc_header request;
839                 u32 reserved1;
840                 u32 reserved2;
841                 u32 reserved3;
842                 struct chsc_header response;
843                 u32 reserved4;
844                 u32 general_char[510];
845                 u32 chsc_char[518];
846         } __attribute__ ((packed)) *scsc_area;
847
848         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
849         if (!scsc_area)
850                 return -ENOMEM;
851
852         scsc_area->request.length = 0x0010;
853         scsc_area->request.code = 0x0010;
854
855         result = chsc(scsc_area);
856         if (result) {
857                 result = (result == 3) ? -ENODEV : -EBUSY;
858                 goto exit;
859         }
860
861         result = chsc_error_from_response(scsc_area->response.code);
862         if (result == 0) {
863                 memcpy(&css_general_characteristics, scsc_area->general_char,
864                        sizeof(css_general_characteristics));
865                 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
866                        sizeof(css_chsc_characteristics));
867         } else
868                 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
869                               scsc_area->response.code);
870 exit:
871         free_page ((unsigned long) scsc_area);
872         return result;
873 }
874
875 EXPORT_SYMBOL_GPL(css_general_characteristics);
876 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
877
878 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
879 {
880         struct {
881                 struct chsc_header request;
882                 unsigned int rsvd0;
883                 unsigned int op : 8;
884                 unsigned int rsvd1 : 8;
885                 unsigned int ctrl : 16;
886                 unsigned int rsvd2[5];
887                 struct chsc_header response;
888                 unsigned int rsvd3[7];
889         } __attribute__ ((packed)) *rr;
890         int rc;
891
892         memset(page, 0, PAGE_SIZE);
893         rr = page;
894         rr->request.length = 0x0020;
895         rr->request.code = 0x0033;
896         rr->op = op;
897         rr->ctrl = ctrl;
898         rc = chsc(rr);
899         if (rc)
900                 return -EIO;
901         rc = (rr->response.code == 0x0001) ? 0 : -EIO;
902         return rc;
903 }
904
905 int chsc_sstpi(void *page, void *result, size_t size)
906 {
907         struct {
908                 struct chsc_header request;
909                 unsigned int rsvd0[3];
910                 struct chsc_header response;
911                 char data[size];
912         } __attribute__ ((packed)) *rr;
913         int rc;
914
915         memset(page, 0, PAGE_SIZE);
916         rr = page;
917         rr->request.length = 0x0010;
918         rr->request.code = 0x0038;
919         rc = chsc(rr);
920         if (rc)
921                 return -EIO;
922         memcpy(result, &rr->data, size);
923         return (rr->response.code == 0x0001) ? 0 : -EIO;
924 }
925