2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
22 #include "cio_debug.h"
26 static void *sei_page;
28 static int new_channel_path(int chpid);
31 set_chp_logically_online(int chp, int onoff)
33 css[0]->chps[chp]->state = onoff;
37 get_chp_status(int chp)
39 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
43 chsc_validate_chpids(struct subchannel *sch)
47 for (chp = 0; chp <= 7; chp++) {
49 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
50 /* disable using this path */
56 chpid_is_actually_online(int chp)
60 state = get_chp_status(chp);
63 queue_work(slow_path_wq, &slow_path_work);
68 /* FIXME: this is _always_ called for every subchannel. shouldn't we
69 * process more than one at a time? */
71 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
76 struct chsc_header request;
80 u16 f_sch; /* first subchannel */
82 u16 l_sch; /* last subchannel */
84 struct chsc_header response;
88 u8 st : 3; /* subchannel type */
90 u8 unit_addr; /* unit address */
91 u16 devno; /* device number */
94 u16 sch; /* subchannel */
95 u8 chpid[8]; /* chpids 0-7 */
96 u16 fla[8]; /* full link addresses 0-7 */
101 ssd_area->request.length = 0x0010;
102 ssd_area->request.code = 0x0004;
104 ssd_area->ssid = sch->schid.ssid;
105 ssd_area->f_sch = sch->schid.sch_no;
106 ssd_area->l_sch = sch->schid.sch_no;
108 ccode = chsc(ssd_area);
110 pr_debug("chsc returned with ccode = %d\n", ccode);
111 return (ccode == 3) ? -ENODEV : -EBUSY;
114 switch (ssd_area->response.code) {
115 case 0x0001: /* everything ok */
118 CIO_CRW_EVENT(2, "Invalid command!\n");
121 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
128 ssd_area->response.code);
133 * ssd_area->st stores the type of the detected
134 * subchannel, with the following definitions:
136 * 0: I/O subchannel: All fields have meaning
137 * 1: CHSC subchannel: Only sch_val, st and sch
139 * 2: Message subchannel: All fields except unit_addr
141 * 3: ADM subchannel: Only sch_val, st and sch
144 * Other types are currently undefined.
146 if (ssd_area->st > 3) { /* uhm, that looks strange... */
147 CIO_CRW_EVENT(0, "Strange subchannel type %d"
148 " for sch 0.%x.%04x\n", ssd_area->st,
149 sch->schid.ssid, sch->schid.sch_no);
151 * There may have been a new subchannel type defined in the
152 * time since this code was written; since we don't know which
153 * fields have meaning and what to do with it we just jump out
157 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
158 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
159 sch->schid.ssid, sch->schid.sch_no,
162 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st;
166 if (ssd_area->st == 0 || ssd_area->st == 2) {
167 for (j = 0; j < 8; j++) {
168 if (!((0x80 >> j) & ssd_area->path_mask &
169 ssd_area->fla_valid_mask))
171 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
172 sch->ssd_info.fla[j] = ssd_area->fla[j];
179 css_get_ssd_info(struct subchannel *sch)
184 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
187 spin_lock_irq(&sch->lock);
188 ret = chsc_get_sch_desc_irq(sch, page);
190 static int cio_chsc_err_msg;
192 if (!cio_chsc_err_msg) {
194 "chsc_get_sch_descriptions:"
195 " Error %d while doing chsc; "
196 "processing some machine checks may "
198 cio_chsc_err_msg = 1;
201 spin_unlock_irq(&sch->lock);
202 free_page((unsigned long)page);
205 /* Allocate channel path structures, if needed. */
206 for (j = 0; j < 8; j++) {
207 chpid = sch->ssd_info.chpid[j];
208 if (chpid && (get_chp_status(chpid) < 0))
209 new_channel_path(chpid);
216 s390_subchannel_remove_chpid(struct device *dev, void *data)
220 struct subchannel *sch;
221 struct channel_path *chpid;
224 sch = to_subchannel(dev);
226 for (j = 0; j < 8; j++)
227 if (sch->schib.pmcw.chpid[j] == chpid->id)
233 spin_lock_irq(&sch->lock);
235 stsch(sch->schid, &schib);
238 memcpy(&sch->schib, &schib, sizeof(struct schib));
239 /* Check for single path devices. */
240 if (sch->schib.pmcw.pim == 0x80)
242 if (sch->vpm == mask)
245 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
246 SCSW_ACTL_HALT_PEND |
247 SCSW_ACTL_START_PEND |
248 SCSW_ACTL_RESUME_PEND)) &&
249 (sch->schib.pmcw.lpum == mask)) {
250 int cc = cio_cancel(sch);
260 if (sch->driver && sch->driver->termination)
261 sch->driver->termination(&sch->dev);
264 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
265 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
266 (sch->schib.pmcw.lpum == mask)) {
273 if (sch->driver && sch->driver->termination)
274 sch->driver->termination(&sch->dev);
278 /* trigger path verification. */
279 if (sch->driver && sch->driver->verify)
280 sch->driver->verify(&sch->dev);
282 spin_unlock_irq(&sch->lock);
285 spin_unlock_irq(&sch->lock);
287 if (css_enqueue_subchannel_slow(sch->schid)) {
288 css_clear_subchannel_slow_list();
295 s390_set_chpid_offline( __u8 chpid)
300 sprintf(dbf_txt, "chpr%x", chpid);
301 CIO_TRACE_EVENT(2, dbf_txt);
303 if (get_chp_status(chpid) <= 0)
305 dev = get_device(&css[0]->chps[chpid]->dev);
306 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
307 s390_subchannel_remove_chpid);
309 if (need_rescan || css_slow_subchannels_exist())
310 queue_work(slow_path_wq, &slow_path_work);
314 struct res_acc_data {
315 struct channel_path *chp;
321 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
328 for (chp = 0; chp <= 7; chp++)
330 * check if chpid is in information updated by ssd
332 if (sch->ssd_info.valid &&
333 sch->ssd_info.chpid[chp] == res_data->chp->id &&
334 (sch->ssd_info.fla[chp] & res_data->fla_mask)
344 * Do a stsch to update our subchannel structure with the
345 * new path information and eventually check for logically
348 ccode = stsch(sch->schid, &sch->schib);
356 s390_process_res_acc_new_sch(struct subchannel_id schid)
361 * We don't know the device yet, but since a path
362 * may be available now to the device we'll have
363 * to do recognition again.
364 * Since we don't have any idea about which chpid
365 * that beast may be on we'll have to do a stsch
366 * on all devices, grr...
368 if (stsch_err(schid, &schib))
370 return need_rescan ? -EAGAIN : -ENXIO;
372 /* Put it on the slow path. */
373 ret = css_enqueue_subchannel_slow(schid);
375 css_clear_subchannel_slow_list();
383 __s390_process_res_acc(struct subchannel_id schid, void *data)
385 int chp_mask, old_lpm;
386 struct res_acc_data *res_data;
387 struct subchannel *sch;
389 res_data = (struct res_acc_data *)data;
390 sch = get_subchannel_by_schid(schid);
392 /* Check if a subchannel is newly available. */
393 return s390_process_res_acc_new_sch(schid);
395 spin_lock_irq(&sch->lock);
397 chp_mask = s390_process_res_acc_sch(res_data, sch);
400 spin_unlock_irq(&sch->lock);
404 sch->lpm = ((sch->schib.pmcw.pim &
405 sch->schib.pmcw.pam &
407 | chp_mask) & sch->opm;
408 if (!old_lpm && sch->lpm)
409 device_trigger_reprobe(sch);
410 else if (sch->driver && sch->driver->verify)
411 sch->driver->verify(&sch->dev);
413 spin_unlock_irq(&sch->lock);
414 put_device(&sch->dev);
415 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
420 s390_process_res_acc (struct res_acc_data *res_data)
425 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
426 CIO_TRACE_EVENT( 2, dbf_txt);
427 if (res_data->fla != 0) {
428 sprintf(dbf_txt, "fla%x", res_data->fla);
429 CIO_TRACE_EVENT( 2, dbf_txt);
433 * I/O resources may have become accessible.
434 * Scan through all subchannels that may be concerned and
435 * do a validation on those.
436 * The more information we have (info), the less scanning
437 * will we have to do.
439 rc = for_each_subchannel(__s390_process_res_acc, res_data);
440 if (css_slow_subchannels_exist())
442 else if (rc != -EAGAIN)
448 __get_chpid_from_lir(void *data)
454 /* incident-node descriptor */
456 /* attached-node descriptor */
458 /* incident-specific information */
462 lir = (struct lir*) data;
464 /* NULL link incident record */
466 if (!(lir->indesc[0]&0xc0000000))
467 /* node descriptor not valid */
469 if (!(lir->indesc[0]&0x10000000))
470 /* don't handle device-type nodes - FIXME */
472 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
474 return (u16) (lir->indesc[0]&0x000000ff);
478 chsc_process_crw(void)
481 struct res_acc_data res_data;
483 struct chsc_header request;
487 struct chsc_header response;
490 u8 vf; /* validity flags */
491 u8 rs; /* reporting source */
492 u8 cc; /* content code */
493 u16 fla; /* full link address */
494 u16 rsid; /* reporting source id */
497 u32 ccdf[96]; /* content-code dependent field */
498 /* ccdf has to be big enough for a link-incident record */
504 * build the chsc request block for store event information
506 * This function is only called by the machine check handler thread,
507 * so we don't need locking for the sei_page.
511 CIO_TRACE_EVENT( 2, "prcss");
516 memset(sei_area, 0, sizeof(*sei_area));
517 memset(&res_data, 0, sizeof(struct res_acc_data));
518 sei_area->request.length = 0x0010;
519 sei_area->request.code = 0x000e;
521 ccode = chsc(sei_area);
525 switch (sei_area->response.code) {
526 /* for debug purposes, check for problems */
528 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
529 "successfully stored\n");
530 break; /* everything ok */
533 "chsc_process_crw: invalid command!\n");
536 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
540 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
541 "information stored\n");
544 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
545 sei_area->response.code);
549 /* Check if we might have lost some information. */
550 if (sei_area->flags & 0x40)
551 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
552 "has been lost due to overflow!\n");
554 if (sei_area->rs != 4) {
555 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
556 "(%04X) isn't a chpid!\n",
561 /* which kind of information was stored? */
562 switch (sei_area->cc) {
563 case 1: /* link incident*/
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports link incident,"
566 " reporting source is chpid %x\n",
568 chpid = __get_chpid_from_lir(sei_area->ccdf);
570 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
573 s390_set_chpid_offline(chpid);
576 case 2: /* i/o resource accessibiliy */
577 CIO_CRW_EVENT(4, "chsc_process_crw: "
578 "channel subsystem reports some I/O "
579 "devices may have become accessible\n");
580 pr_debug("Data received after sei: \n");
581 pr_debug("Validity flags: %x\n", sei_area->vf);
583 /* allocate a new channel path structure, if needed */
584 status = get_chp_status(sei_area->rsid);
586 new_channel_path(sei_area->rsid);
589 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
590 res_data.chp = to_channelpath(dev);
591 pr_debug("chpid: %x", sei_area->rsid);
592 if ((sei_area->vf & 0xc0) != 0) {
593 res_data.fla = sei_area->fla;
594 if ((sei_area->vf & 0xc0) == 0xc0) {
595 pr_debug(" full link addr: %x",
597 res_data.fla_mask = 0xffff;
599 pr_debug(" link addr: %x",
601 res_data.fla_mask = 0xff00;
604 ret = s390_process_res_acc(&res_data);
609 default: /* other stuff */
610 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
614 } while (sei_area->flags & 0x80);
619 __chp_add_new_sch(struct subchannel_id schid)
624 if (stsch(schid, &schib))
626 return need_rescan ? -EAGAIN : -ENXIO;
628 /* Put it on the slow path. */
629 ret = css_enqueue_subchannel_slow(schid);
631 css_clear_subchannel_slow_list();
640 __chp_add(struct subchannel_id schid, void *data)
643 struct channel_path *chp;
644 struct subchannel *sch;
646 chp = (struct channel_path *)data;
647 sch = get_subchannel_by_schid(schid);
649 /* Check if the subchannel is now available. */
650 return __chp_add_new_sch(schid);
651 spin_lock_irq(&sch->lock);
653 if (sch->schib.pmcw.chpid[i] == chp->id) {
654 if (stsch(sch->schid, &sch->schib) != 0) {
656 spin_unlock(&sch->lock);
662 spin_unlock(&sch->lock);
665 sch->lpm = ((sch->schib.pmcw.pim &
666 sch->schib.pmcw.pam &
668 | 0x80 >> i) & sch->opm;
670 if (sch->driver && sch->driver->verify)
671 sch->driver->verify(&sch->dev);
673 spin_unlock_irq(&sch->lock);
674 put_device(&sch->dev);
685 if (!get_chp_status(chpid))
686 return 0; /* no need to do the rest */
688 sprintf(dbf_txt, "cadd%x", chpid);
689 CIO_TRACE_EVENT(2, dbf_txt);
691 dev = get_device(&css[0]->chps[chpid]->dev);
692 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
693 if (css_slow_subchannels_exist())
702 * Handling of crw machine checks with channel path source.
705 chp_process_crw(int chpid, int on)
708 /* Path has gone. We use the link incident routine.*/
709 s390_set_chpid_offline(chpid);
710 return 0; /* De-register is async anyway. */
713 * Path has come. Allocate a new channel path structure,
716 if (get_chp_status(chpid) < 0)
717 new_channel_path(chpid);
718 /* Avoid the extra overhead in process_rec_acc. */
719 return chp_add(chpid);
723 __check_for_io_and_kill(struct subchannel *sch, int index)
727 if (!device_is_online(sch))
728 /* cio could be doing I/O. */
730 cc = stsch(sch->schid, &sch->schib);
733 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
734 device_set_waiting(sch);
741 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746 if (!sch->ssd_info.valid)
749 spin_lock_irqsave(&sch->lock, flags);
751 for (chp = 0; chp < 8; chp++) {
752 if (sch->ssd_info.chpid[chp] != chpid)
756 sch->opm |= (0x80 >> chp);
757 sch->lpm |= (0x80 >> chp);
759 device_trigger_reprobe(sch);
760 else if (sch->driver && sch->driver->verify)
761 sch->driver->verify(&sch->dev);
763 sch->opm &= ~(0x80 >> chp);
764 sch->lpm &= ~(0x80 >> chp);
766 * Give running I/O a grace period in which it
767 * can successfully terminate, even using the
768 * just varied off path. Then kill it.
770 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
771 if (css_enqueue_subchannel_slow(sch->schid)) {
772 css_clear_subchannel_slow_list();
775 } else if (sch->driver && sch->driver->verify)
776 sch->driver->verify(&sch->dev);
780 spin_unlock_irqrestore(&sch->lock, flags);
784 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
786 struct subchannel *sch;
789 sch = to_subchannel(dev);
792 __s390_subchannel_vary_chpid(sch, *chpid, 0);
797 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
799 struct subchannel *sch;
802 sch = to_subchannel(dev);
805 __s390_subchannel_vary_chpid(sch, *chpid, 1);
810 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
813 struct subchannel *sch;
815 sch = get_subchannel_by_schid(schid);
817 put_device(&sch->dev);
820 if (stsch_err(schid, &schib))
823 /* Put it on the slow path. */
824 if (css_enqueue_subchannel_slow(schid)) {
825 css_clear_subchannel_slow_list();
833 * Function: s390_vary_chpid
834 * Varies the specified chpid online or offline
837 s390_vary_chpid( __u8 chpid, int on)
842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
843 CIO_TRACE_EVENT( 2, dbf_text);
845 status = get_chp_status(chpid);
847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
851 if (!on && !status) {
852 printk(KERN_ERR "chpid %x is already offline\n", chpid);
856 set_chp_logically_online(chpid, on);
859 * Redo PathVerification on the devices the chpid connects to
862 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
863 s390_subchannel_vary_chpid_on :
864 s390_subchannel_vary_chpid_off);
866 /* Scan for new devices on varied on path. */
867 for_each_subchannel(__s390_vary_chpid_on, NULL);
868 if (need_rescan || css_slow_subchannels_exist())
869 queue_work(slow_path_wq, &slow_path_work);
874 * Channel measurement related functions
877 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
880 struct channel_path *chp;
883 chp = to_channelpath(container_of(kobj, struct device, kobj));
887 size = sizeof(struct cmg_chars);
891 if (off + count > size)
893 memcpy(buf, chp->cmg_chars + off, count);
897 static struct bin_attribute chp_measurement_chars_attr = {
899 .name = "measurement_chars",
901 .owner = THIS_MODULE,
903 .size = sizeof(struct cmg_chars),
904 .read = chp_measurement_chars_read,
908 chp_measurement_copy_block(struct cmg_entry *buf,
909 struct channel_subsystem *css, int chpid)
912 struct cmg_entry *entry, reference_buf;
916 area = css->cub_addr1;
919 area = css->cub_addr2;
922 entry = area + (idx * sizeof(struct cmg_entry));
924 memcpy(buf, entry, sizeof(*entry));
925 memcpy(&reference_buf, entry, sizeof(*entry));
926 } while (reference_buf.values[0] != buf->values[0]);
930 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
932 struct channel_path *chp;
933 struct channel_subsystem *css;
936 chp = to_channelpath(container_of(kobj, struct device, kobj));
937 css = to_css(chp->dev.parent);
939 size = sizeof(struct cmg_chars);
941 /* Only allow single reads. */
942 if (off || count < size)
944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
948 static struct bin_attribute chp_measurement_attr = {
950 .name = "measurement",
952 .owner = THIS_MODULE,
954 .size = sizeof(struct cmg_entry),
955 .read = chp_measurement_read,
959 chsc_remove_chp_cmg_attr(struct channel_path *chp)
961 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr);
962 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr);
966 chsc_add_chp_cmg_attr(struct channel_path *chp)
970 ret = sysfs_create_bin_file(&chp->dev.kobj,
971 &chp_measurement_chars_attr);
974 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr);
976 sysfs_remove_bin_file(&chp->dev.kobj,
977 &chp_measurement_chars_attr);
982 chsc_remove_cmg_attr(struct channel_subsystem *css)
986 for (i = 0; i <= __MAX_CHPID; i++) {
989 chsc_remove_chp_cmg_attr(css->chps[i]);
994 chsc_add_cmg_attr(struct channel_subsystem *css)
999 for (i = 0; i <= __MAX_CHPID; i++) {
1002 ret = chsc_add_chp_cmg_attr(css->chps[i]);
1008 for (--i; i >= 0; i--) {
1011 chsc_remove_chp_cmg_attr(css->chps[i]);
1018 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1021 struct chsc_header request;
1022 u32 operation_code : 2;
1031 struct chsc_header response;
1040 secm_area->request.length = 0x0050;
1041 secm_area->request.code = 0x0016;
1043 secm_area->key = PAGE_DEFAULT_KEY;
1044 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
1045 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
1047 secm_area->operation_code = enable ? 0 : 1;
1049 ccode = chsc(secm_area);
1051 return (ccode == 3) ? -ENODEV : -EBUSY;
1053 switch (secm_area->response.code) {
1054 case 0x0001: /* Success. */
1057 case 0x0003: /* Invalid block. */
1058 case 0x0007: /* Invalid format. */
1059 case 0x0008: /* Other invalid block. */
1060 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1063 case 0x0004: /* Command not provided in model. */
1064 CIO_CRW_EVENT(2, "Model does not provide secm\n");
1067 case 0x0102: /* cub adresses incorrect */
1068 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
1071 case 0x0103: /* key error */
1072 CIO_CRW_EVENT(2, "Access key error in secm\n");
1075 case 0x0105: /* error while starting */
1076 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
1080 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1081 secm_area->response.code);
1088 chsc_secm(struct channel_subsystem *css, int enable)
1093 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1097 mutex_lock(&css->mutex);
1098 if (enable && !css->cm_enabled) {
1099 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1100 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1101 if (!css->cub_addr1 || !css->cub_addr2) {
1102 free_page((unsigned long)css->cub_addr1);
1103 free_page((unsigned long)css->cub_addr2);
1104 free_page((unsigned long)secm_area);
1105 mutex_unlock(&css->mutex);
1109 ret = __chsc_do_secm(css, enable, secm_area);
1111 css->cm_enabled = enable;
1112 if (css->cm_enabled) {
1113 ret = chsc_add_cmg_attr(css);
1115 memset(secm_area, 0, PAGE_SIZE);
1116 __chsc_do_secm(css, 0, secm_area);
1117 css->cm_enabled = 0;
1120 chsc_remove_cmg_attr(css);
1122 if (enable && !css->cm_enabled) {
1123 free_page((unsigned long)css->cub_addr1);
1124 free_page((unsigned long)css->cub_addr2);
1126 mutex_unlock(&css->mutex);
1127 free_page((unsigned long)secm_area);
1132 * Files for the channel path entries.
1135 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1137 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1141 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1142 sprintf(buf, "offline\n"));
1146 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1148 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1153 num_args = sscanf(buf, "%5s", cmd);
1157 if (!strnicmp(cmd, "on", 2))
1158 error = s390_vary_chpid(cp->id, 1);
1159 else if (!strnicmp(cmd, "off", 3))
1160 error = s390_vary_chpid(cp->id, 0);
1164 return error < 0 ? error : count;
1168 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1171 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1173 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1177 return sprintf(buf, "%x\n", chp->desc.desc);
1180 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1183 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1185 struct channel_path *chp = to_channelpath(dev);
1189 if (chp->cmg == -1) /* channel measurements not available */
1190 return sprintf(buf, "unknown\n");
1191 return sprintf(buf, "%x\n", chp->cmg);
1194 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1197 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1199 struct channel_path *chp = to_channelpath(dev);
1203 if (chp->shared == -1) /* channel measurements not available */
1204 return sprintf(buf, "unknown\n");
1205 return sprintf(buf, "%x\n", chp->shared);
1208 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1210 static struct attribute * chp_attrs[] = {
1211 &dev_attr_status.attr,
1212 &dev_attr_type.attr,
1214 &dev_attr_shared.attr,
1218 static struct attribute_group chp_attr_group = {
1223 chp_release(struct device *dev)
1225 struct channel_path *cp;
1227 cp = container_of(dev, struct channel_path, dev);
1232 chsc_determine_channel_path_description(int chpid,
1233 struct channel_path_desc *desc)
1238 struct chsc_header request;
1240 u32 first_chpid : 8;
1244 struct chsc_header response;
1246 struct channel_path_desc desc;
1249 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1253 scpd_area->request.length = 0x0010;
1254 scpd_area->request.code = 0x0002;
1256 scpd_area->first_chpid = chpid;
1257 scpd_area->last_chpid = chpid;
1259 ccode = chsc(scpd_area);
1261 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1265 switch (scpd_area->response.code) {
1266 case 0x0001: /* Success. */
1267 memcpy(desc, &scpd_area->desc,
1268 sizeof(struct channel_path_desc));
1271 case 0x0003: /* Invalid block. */
1272 case 0x0007: /* Invalid format. */
1273 case 0x0008: /* Other invalid block. */
1274 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1277 case 0x0004: /* Command not provided in model. */
1278 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1282 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1283 scpd_area->response.code);
1287 free_page((unsigned long)scpd_area);
1292 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1293 struct cmg_chars *chars)
1298 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1300 if (chp->cmg_chars) {
1302 struct cmg_chars *cmg_chars;
1304 cmg_chars = chp->cmg_chars;
1305 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1306 mask = 0x80 >> (i + 3);
1308 cmg_chars->values[i] = chars->values[i];
1310 cmg_chars->values[i] = 0;
1315 /* No cmg-dependent data. */
1321 chsc_get_channel_measurement_chars(struct channel_path *chp)
1326 struct chsc_header request;
1328 u32 first_chpid : 8;
1332 struct chsc_header response;
1343 u32 data[NR_MEASUREMENT_CHARS];
1346 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1350 scmc_area->request.length = 0x0010;
1351 scmc_area->request.code = 0x0022;
1353 scmc_area->first_chpid = chp->id;
1354 scmc_area->last_chpid = chp->id;
1356 ccode = chsc(scmc_area);
1358 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1362 switch (scmc_area->response.code) {
1363 case 0x0001: /* Success. */
1364 if (!scmc_area->not_valid) {
1365 chp->cmg = scmc_area->cmg;
1366 chp->shared = scmc_area->shared;
1367 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1368 (struct cmg_chars *)
1376 case 0x0003: /* Invalid block. */
1377 case 0x0007: /* Invalid format. */
1378 case 0x0008: /* Invalid bit combination. */
1379 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1382 case 0x0004: /* Command not provided. */
1383 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1387 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1388 scmc_area->response.code);
1392 free_page((unsigned long)scmc_area);
1397 * Entries for chpids on the system bus.
1398 * This replaces /proc/chpids.
1401 new_channel_path(int chpid)
1403 struct channel_path *chp;
1406 chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
1409 memset(chp, 0, sizeof(struct channel_path));
1411 /* fill in status, etc. */
1414 chp->dev = (struct device) {
1415 .parent = &css[0]->device,
1416 .release = chp_release,
1418 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1420 /* Obtain channel path description and fill it in. */
1421 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1424 /* Get channel-measurement characteristics. */
1425 if (css_characteristics_avail && css_chsc_characteristics.scmc
1426 && css_chsc_characteristics.secm) {
1427 ret = chsc_get_channel_measurement_chars(chp);
1431 static int msg_done;
1434 printk(KERN_WARNING "cio: Channel measurements not "
1435 "available, continuing.\n");
1441 /* make it known to the system */
1442 ret = device_register(&chp->dev);
1444 printk(KERN_WARNING "%s: could not register %02x\n",
1448 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1450 device_unregister(&chp->dev);
1453 mutex_lock(&css[0]->mutex);
1454 if (css[0]->cm_enabled) {
1455 ret = chsc_add_chp_cmg_attr(chp);
1457 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1458 device_unregister(&chp->dev);
1459 mutex_unlock(&css[0]->mutex);
1463 css[0]->chps[chpid] = chp;
1464 mutex_unlock(&css[0]->mutex);
1472 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1474 struct channel_path *chp;
1475 struct channel_path_desc *desc;
1477 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1480 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1483 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1489 chsc_alloc_sei_area(void)
1491 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1493 printk(KERN_WARNING"Can't allocate page for processing of " \
1494 "chsc machine checks!\n");
1495 return (sei_page ? 0 : -ENOMEM);
1499 chsc_enable_facility(int operation_code)
1503 struct chsc_header request;
1510 u32 operation_data_area[252];
1511 struct chsc_header response;
1517 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1520 sda_area->request.length = 0x0400;
1521 sda_area->request.code = 0x0031;
1522 sda_area->operation_code = operation_code;
1524 ret = chsc(sda_area);
1526 ret = (ret == 3) ? -ENODEV : -EBUSY;
1529 switch (sda_area->response.code) {
1530 case 0x0001: /* everything ok */
1533 case 0x0003: /* invalid request block */
1537 case 0x0004: /* command not provided */
1538 case 0x0101: /* facility not provided */
1541 default: /* something went wrong */
1545 free_page((unsigned long)sda_area);
1549 subsys_initcall(chsc_alloc_sei_area);
1551 struct css_general_char css_general_characteristics;
1552 struct css_chsc_char css_chsc_characteristics;
1555 chsc_determine_css_characteristics(void)
1559 struct chsc_header request;
1563 struct chsc_header response;
1565 u32 general_char[510];
1569 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1571 printk(KERN_WARNING"cio: Was not able to determine available" \
1572 "CHSCs due to no memory.\n");
1576 scsc_area->request.length = 0x0010;
1577 scsc_area->request.code = 0x0010;
1579 result = chsc(scsc_area);
1581 printk(KERN_WARNING"cio: Was not able to determine " \
1582 "available CHSCs, cc=%i.\n", result);
1587 if (scsc_area->response.code != 1) {
1588 printk(KERN_WARNING"cio: Was not able to determine " \
1589 "available CHSCs.\n");
1593 memcpy(&css_general_characteristics, scsc_area->general_char,
1594 sizeof(css_general_characteristics));
1595 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1596 sizeof(css_chsc_characteristics));
1598 free_page ((unsigned long) scsc_area);
1602 EXPORT_SYMBOL_GPL(css_general_characteristics);
1603 EXPORT_SYMBOL_GPL(css_chsc_characteristics);