diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 10:02:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 10:02:14 -0700 |
commit | 9fd815b55f31be48dbb3dd23922587d247a4e497 (patch) | |
tree | 63814130acf3e472cc660ae71208c146f16dc5d6 /drivers/s390 | |
parent | 31bbb9b58d1e8ebcf2b28c95c2250a9f8e31e397 (diff) | |
parent | ed87b27e00d2ca240f62f3903583a2f1541fb9ef (diff) | |
download | linux-9fd815b55f31be48dbb3dd23922587d247a4e497.tar.bz2 |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (22 commits)
[S390] Update default configuration.
[S390] hibernate: Do real CPU swap at resume time
[S390] dasd: tolerate devices that have no feature codes
[S390] zcrypt: Do not add/remove devices in s/r callbacks
[S390] hibernate: make sure pfn_is_nosave handles lowcore pages
[S390] smp: introduce LC_ORDER and simplify lowcore handling
[S390] ptrace: use common code for simple peek/poke operations
[S390] fix disabled_wait inline assembly clobber list
[S390] Change kernel_page_present coding style.
[S390] hibernation: reset system after resume
[S390] hibernation: fix guest page hinting related crash
[S390] Get rid of init_module/delete_module compat functions.
[S390] Convert sys_execve to function with parameters.
[S390] Convert sys_clone to function with parameters.
[S390] qdio: change state of all primed input buffers
[S390] qdio: reduce per device debug messages
[S390] cio: introduce consistent subchannel scanning
[S390] cio: idset use actual number of ssids
[S390] cio: dont kfree vmalloced memory
[S390] cio: introduce css_settle
...
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd_eckd.c | 13 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 252 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 3 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 38 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 1 | ||||
-rw-r--r-- | drivers/s390/cio/idset.c | 22 | ||||
-rw-r--r-- | drivers/s390/cio/idset.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 32 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 40 |
9 files changed, 219 insertions, 184 deletions
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index bd9fe2e36dce..ab3521755588 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -935,6 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) struct dasd_eckd_private *private; private = (struct dasd_eckd_private *) device->private; + memset(&private->features, 0, sizeof(struct dasd_rssd_features)); cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_features)), @@ -982,7 +983,9 @@ static int dasd_eckd_read_features(struct dasd_device *device) features = (struct dasd_rssd_features *) (prssdp + 1); memcpy(&private->features, features, sizeof(struct dasd_rssd_features)); - } + } else + dev_warn(&device->cdev->dev, "Reading device feature codes" + " failed with rc=%d\n", rc); dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -1144,9 +1147,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) } /* Read Feature Codes */ - rc = dasd_eckd_read_features(device); - if (rc) - goto out_err3; + dasd_eckd_read_features(device); /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, @@ -3241,9 +3242,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) } /* Read Feature Codes */ - rc = dasd_eckd_read_features(device); - if (rc) - goto out_err; + dasd_eckd_read_features(device); /* Read Device Characteristics */ memset(&private->rdc_data, 0, sizeof(private->rdc_data)); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 393c73c47f87..91c25706fa83 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -31,8 +31,7 @@ #include "chp.h" int css_init_done = 0; -static int need_reprobe = 0; -static int max_ssid = 0; +int max_ssid; struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; @@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid) int ret; struct subchannel *sch; - sch = css_alloc_subchannel(schid); - if (IS_ERR(sch)) - return PTR_ERR(sch); + if (cio_is_console(schid)) + sch = cio_get_console_subchannel(); + else { + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return PTR_ERR(sch); + } ret = css_register_subchannel(sch); - if (ret) - put_device(&sch->dev); + if (ret) { + if (!cio_is_console(schid)) + put_device(&sch->dev); + } return ret; } @@ -409,10 +414,14 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow) static struct idset *slow_subchannel_set; static spinlock_t slow_subchannel_lock; +static wait_queue_head_t css_eval_wq; +static atomic_t css_eval_scheduled; static int __init slow_subchannel_init(void) { spin_lock_init(&slow_subchannel_lock); + atomic_set(&css_eval_scheduled, 0); + init_waitqueue_head(&css_eval_wq); slow_subchannel_set = idset_sch_new(); if (!slow_subchannel_set) { CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); @@ -468,9 +477,17 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) static void css_slow_path_func(struct work_struct *unused) { + unsigned long flags; + CIO_TRACE_EVENT(4, "slowpath"); for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, NULL); + spin_lock_irqsave(&slow_subchannel_lock, flags); + if (idset_is_empty(slow_subchannel_set)) { + atomic_set(&css_eval_scheduled, 0); + wake_up(&css_eval_wq); + } + spin_unlock_irqrestore(&slow_subchannel_lock, flags); } static DECLARE_WORK(slow_path_work, css_slow_path_func); @@ -482,6 +499,7 @@ void css_schedule_eval(struct subchannel_id schid) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); + atomic_set(&css_eval_scheduled, 1); queue_work(slow_path_wq, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -492,80 +510,53 @@ void css_schedule_eval_all(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); + atomic_set(&css_eval_scheduled, 1); queue_work(slow_path_wq, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } -void css_wait_for_slow_path(void) +static int __unset_registered(struct device *dev, void *data) { - flush_workqueue(slow_path_wq); -} - -/* Reprobe subchannel if unregistered. */ -static int reprobe_subchannel(struct subchannel_id schid, void *data) -{ - int ret; - - CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", - schid.ssid, schid.sch_no); - if (need_reprobe) - return -EAGAIN; - - ret = css_probe_device(schid); - switch (ret) { - case 0: - break; - case -ENXIO: - case -ENOMEM: - case -EIO: - /* These should abort looping */ - break; - default: - ret = 0; - } - - return ret; -} + struct idset *set = data; + struct subchannel *sch = to_subchannel(dev); -static void reprobe_after_idle(struct work_struct *unused) -{ - /* Make sure initial subchannel scan is done. */ - wait_event(ccw_device_init_wq, - atomic_read(&ccw_device_init_count) == 0); - if (need_reprobe) - css_schedule_reprobe(); + idset_sch_del(set, sch->schid); + return 0; } -static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); - -/* Work function used to reprobe all unregistered subchannels. */ -static void reprobe_all(struct work_struct *unused) +void css_schedule_eval_all_unreg(void) { - int ret; - - CIO_MSG_EVENT(4, "reprobe start\n"); + unsigned long flags; + struct idset *unreg_set; - /* Make sure initial subchannel scan is done. */ - if (atomic_read(&ccw_device_init_count) != 0) { - queue_work(ccw_device_work, &reprobe_idle_work); + /* Find unregistered subchannels. */ + unreg_set = idset_sch_new(); + if (!unreg_set) { + /* Fallback. */ + css_schedule_eval_all(); return; } - need_reprobe = 0; - ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); - - CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, - need_reprobe); + idset_fill(unreg_set); + bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); + /* Apply to slow_subchannel_set. */ + spin_lock_irqsave(&slow_subchannel_lock, flags); + idset_add_set(slow_subchannel_set, unreg_set); + atomic_set(&css_eval_scheduled, 1); + queue_work(slow_path_wq, &slow_path_work); + spin_unlock_irqrestore(&slow_subchannel_lock, flags); + idset_free(unreg_set); } -static DECLARE_WORK(css_reprobe_work, reprobe_all); +void css_wait_for_slow_path(void) +{ + flush_workqueue(slow_path_wq); +} /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) { - need_reprobe = 1; - queue_work(slow_path_wq, &css_reprobe_work); + css_schedule_eval_all_unreg(); } - EXPORT_SYMBOL_GPL(css_schedule_reprobe); /* @@ -601,49 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) css_evaluate_subchannel(mchk_schid, 0); } -static int __init -__init_channel_subsystem(struct subchannel_id schid, void *data) -{ - struct subchannel *sch; - int ret; - - if (cio_is_console(schid)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(schid); - if (IS_ERR(sch)) - ret = PTR_ERR(sch); - else - ret = 0; - switch (ret) { - case 0: - break; - case -ENOMEM: - panic("Out of memory in init_channel_subsystem\n"); - /* -ENXIO: no more subchannels. */ - case -ENXIO: - return ret; - /* -EIO: this subchannel set not supported. */ - case -EIO: - return ret; - default: - return 0; - } - } - /* - * We register ALL valid subchannels in ioinfo, even those - * that have been present before init_channel_subsystem. - * These subchannels can't have been registered yet (kmalloc - * not working) so we do it now. This is true e.g. for the - * console subchannel. - */ - if (css_register_subchannel(sch)) { - if (!cio_is_console(schid)) - put_device(&sch->dev); - } - return 0; -} - static void __init css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { @@ -854,19 +802,30 @@ static struct notifier_block css_power_notifier = { * The struct subchannel's are created during probing (except for the * static console subchannel). */ -static int __init -init_channel_subsystem (void) +static int __init css_bus_init(void) { int ret, i; ret = chsc_determine_css_characteristics(); if (ret == -ENOMEM) - goto out; /* No need to continue. */ + goto out; ret = chsc_alloc_sei_area(); if (ret) goto out; + /* Try to enable MSS. */ + ret = chsc_enable_facility(CHSC_SDA_OC_MSS); + switch (ret) { + case 0: /* Success. */ + max_ssid = __MAX_SSID; + break; + case -ENOMEM: + goto out; + default: + max_ssid = 0; + } + ret = slow_subchannel_init(); if (ret) goto out; @@ -878,17 +837,6 @@ init_channel_subsystem (void) if ((ret = bus_register(&css_bus_type))) goto out; - /* Try to enable MSS. */ - ret = chsc_enable_facility(CHSC_SDA_OC_MSS); - switch (ret) { - case 0: /* Success. */ - max_ssid = __MAX_SSID; - break; - case -ENOMEM: - goto out_bus; - default: - max_ssid = 0; - } /* Setup css structure. */ for (i = 0; i <= __MAX_CSSID; i++) { struct channel_subsystem *css; @@ -934,7 +882,6 @@ init_channel_subsystem (void) /* Enable default isc for I/O subchannels. */ isc_register(IO_SCH_ISC); - for_each_subchannel(__init_channel_subsystem, NULL); return 0; out_file: if (css_chsc_characteristics.secm) @@ -955,17 +902,76 @@ out_unregister: &dev_attr_cm_enable); device_unregister(&css->device); } -out_bus: bus_unregister(&css_bus_type); out: crw_unregister_handler(CRW_RSC_CSS); chsc_free_sei_area(); - kfree(slow_subchannel_set); + idset_free(slow_subchannel_set); pr_alert("The CSS device driver initialization failed with " "errno=%d\n", ret); return ret; } +static void __init css_bus_cleanup(void) +{ + struct channel_subsystem *css; + int i; + + for (i = 0; i <= __MAX_CSSID; i++) { + css = channel_subsystems[i]; + device_unregister(&css->pseudo_subchannel->dev); + css->pseudo_subchannel = NULL; + if (css_chsc_characteristics.secm) + device_remove_file(&css->device, &dev_attr_cm_enable); + device_unregister(&css->device); + } + bus_unregister(&css_bus_type); + crw_unregister_handler(CRW_RSC_CSS); + chsc_free_sei_area(); + idset_free(slow_subchannel_set); + isc_unregister(IO_SCH_ISC); +} + +static int __init channel_subsystem_init(void) +{ + int ret; + + ret = css_bus_init(); + if (ret) + return ret; + + ret = io_subchannel_init(); + if (ret) + css_bus_cleanup(); + + return ret; +} +subsys_initcall(channel_subsystem_init); + +static int css_settle(struct device_driver *drv, void *unused) +{ + struct css_driver *cssdrv = to_cssdriver(drv); + + if (cssdrv->settle) + cssdrv->settle(); + return 0; +} + +/* + * Wait for the initialization of devices to finish, to make sure we are + * done with our setup if the search for the root device starts. + */ +static int __init channel_subsystem_init_sync(void) +{ + /* Start initial subchannel evaluation. */ + css_schedule_eval_all(); + /* Wait for the evaluation of subchannels to finish. */ + wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); + /* Wait for the subchannel type specific initialization to finish */ + return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); +} +subsys_initcall_sync(channel_subsystem_init_sync); + int sch_is_pseudo_sch(struct subchannel *sch) { return sch == to_css(sch->dev.parent)->pseudo_subchannel; @@ -1135,7 +1141,5 @@ void css_driver_unregister(struct css_driver *cdrv) } EXPORT_SYMBOL_GPL(css_driver_unregister); -subsys_initcall(init_channel_subsystem); - MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 9763eeec7458..68d6b0bf151c 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -75,6 +75,7 @@ struct chp_link; * @freeze: callback for freezing during hibernation snapshotting * @thaw: undo work done in @freeze * @restore: callback for restoring after hibernation + * @settle: wait for asynchronous work to finish * @name: name of the device driver */ struct css_driver { @@ -92,6 +93,7 @@ struct css_driver { int (*freeze)(struct subchannel *); int (*thaw) (struct subchannel *); int (*restore)(struct subchannel *); + void (*settle)(void); const char *name; }; @@ -109,6 +111,7 @@ extern void css_sch_device_unregister(struct subchannel *); extern int css_probe_device(struct subchannel_id); extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; +extern int max_ssid; int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6527f3f34493..f780bdd3a04e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -131,6 +131,10 @@ static void io_subchannel_shutdown(struct subchannel *); static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); +static void recovery_func(unsigned long data); +struct workqueue_struct *ccw_device_work; +wait_queue_head_t ccw_device_init_wq; +atomic_t ccw_device_init_count; static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, @@ -151,6 +155,13 @@ static int io_subchannel_prepare(struct subchannel *sch) return 0; } +static void io_subchannel_settle(void) +{ + wait_event(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + flush_workqueue(ccw_device_work); +} + static struct css_driver io_subchannel_driver = { .owner = THIS_MODULE, .subchannel_type = io_subchannel_ids, @@ -162,16 +173,10 @@ static struct css_driver io_subchannel_driver = { .remove = io_subchannel_remove, .shutdown = io_subchannel_shutdown, .prepare = io_subchannel_prepare, + .settle = io_subchannel_settle, }; -struct workqueue_struct *ccw_device_work; -wait_queue_head_t ccw_device_init_wq; -atomic_t ccw_device_init_count; - -static void recovery_func(unsigned long data); - -static int __init -init_ccw_bus_type (void) +int __init io_subchannel_init(void) { int ret; @@ -181,10 +186,10 @@ init_ccw_bus_type (void) ccw_device_work = create_singlethread_workqueue("cio"); if (!ccw_device_work) - return -ENOMEM; /* FIXME: better errno ? */ + return -ENOMEM; slow_path_wq = create_singlethread_workqueue("kslowcrw"); if (!slow_path_wq) { - ret = -ENOMEM; /* FIXME: better errno ? */ + ret = -ENOMEM; goto out_err; } if ((ret = bus_register (&ccw_bus_type))) @@ -194,9 +199,6 @@ init_ccw_bus_type (void) if (ret) goto out_err; - wait_event(ccw_device_init_wq, - atomic_read(&ccw_device_init_count) == 0); - flush_workqueue(ccw_device_work); return 0; out_err: if (ccw_device_work) @@ -206,16 +208,6 @@ out_err: return ret; } -static void __exit -cleanup_ccw_bus_type (void) -{ - css_driver_unregister(&io_subchannel_driver); - bus_unregister(&ccw_bus_type); - destroy_workqueue(ccw_device_work); -} - -subsys_initcall(init_ccw_bus_type); -module_exit(cleanup_ccw_bus_type); /************************ device handling **************************/ diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index e3975107a578..ed39a2caaf47 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -74,6 +74,7 @@ dev_fsm_final_state(struct ccw_device *cdev) extern struct workqueue_struct *ccw_device_work; extern wait_queue_head_t ccw_device_init_wq; extern atomic_t ccw_device_init_count; +int __init io_subchannel_init(void); void io_subchannel_recog_done(struct ccw_device *cdev); void io_subchannel_init_config(struct subchannel *sch); diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index cf8f24a4b5eb..4d10981c7cc1 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -78,7 +78,7 @@ static inline int idset_get_first(struct idset *set, int *ssid, int *id) struct idset *idset_sch_new(void) { - return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); + return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1); } void idset_sch_add(struct idset *set, struct subchannel_id schid) @@ -110,3 +110,23 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) } return rc; } + +int idset_is_empty(struct idset *set) +{ + int bitnum; + + bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); + if (bitnum >= set->num_ssid * set->num_id) + return 1; + return 0; +} + +void idset_add_set(struct idset *to, struct idset *from) +{ + unsigned long i, len; + + len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), + __BITOPS_WORDS(from->num_ssid * from->num_id)); + for (i = 0; i < len ; i++) + to->bitmap[i] |= from->bitmap[i]; +} diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 528065cb5021..7543da4529f9 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h @@ -21,5 +21,7 @@ void idset_sch_add(struct idset *set, struct subchannel_id id); void idset_sch_del(struct idset *set, struct subchannel_id id); int idset_sch_contains(struct idset *set, struct subchannel_id id); int idset_sch_get_first(struct idset *set, struct subchannel_id *id); +int idset_is_empty(struct idset *set); +void idset_add_set(struct idset *to, struct idset *from); #endif /* S390_IDSET_H */ diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9aef402a5f1b..4be6e84b9599 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -401,7 +401,7 @@ static void announce_buffer_error(struct qdio_q *q, int count) if ((!q->is_input_q && (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { qdio_perf_stat_inc(&perf_stats.outbound_target_full); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d", + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", q->first_to_check); return; } @@ -418,7 +418,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) { int new; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); /* for QEBSM the ACK was already set by EQBS */ if (is_qebsm(q)) { @@ -455,6 +455,8 @@ static inline void inbound_primed(struct qdio_q *q, int count) count--; if (!count) return; + /* need to change ALL buffers to get more interrupts */ + set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); } static int get_inbound_buffer_frontier(struct qdio_q *q) @@ -545,7 +547,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * has (probably) not moved (see qdio_inbound_processing). */ if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; } else @@ -565,11 +567,10 @@ static void qdio_kick_handler(struct qdio_q *q) if (q->is_input_q) { qdio_perf_stat_inc(&perf_stats.inbound_handler); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); - } else { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); - } + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); + } else + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", + start, count); q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q->irq_ptr->int_parm); @@ -633,7 +634,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) switch (state) { case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); @@ -1481,10 +1482,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, get_buf_state(q, prev_buf(bufnr), &state, 0); if (state != SLSB_CU_OUTPUT_PRIMED) rc = qdio_kick_outbound_q(q); - else { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); + else qdio_perf_stat_inc(&perf_stats.fast_requeue); - } + out: tasklet_schedule(&q->tasklet); return rc; @@ -1510,12 +1510,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, if (!irq_ptr) return -ENODEV; - if (callflags & QDIO_FLAG_SYNC_INPUT) - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input"); - else - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output"); - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags); - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, + "do%02x b:%02x c:%02x", callflags, bufnr, count); if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) return -EBUSY; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 090b32a339c6..1294876bf7b4 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -60,6 +60,7 @@ static int ap_device_probe(struct device *dev); static void ap_interrupt_handler(void *unused1, void *unused2); static void ap_reset(struct ap_device *ap_dev); static void ap_config_timeout(unsigned long ptr); +static int ap_select_domain(void); /* * Module description. @@ -109,6 +110,10 @@ static unsigned long long poll_timeout = 250000; /* Suspend flag */ static int ap_suspend_flag; +/* Flag to check if domain was set through module parameter domain=. This is + * important when supsend and resume is done in a z/VM environment where the + * domain might change. */ +static int user_set_domain = 0; static struct bus_type ap_bus_type; /** @@ -643,6 +648,7 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state) destroy_workqueue(ap_work_queue); ap_work_queue = NULL; } + tasklet_disable(&ap_tasklet); } /* Poll on the device until all requests are finished. */ @@ -653,7 +659,10 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state) spin_unlock_bh(&ap_dev->lock); } while ((flags & 1) || (flags & 2)); - ap_device_remove(dev); + spin_lock_bh(&ap_dev->lock); + ap_dev->unregistered = 1; + spin_unlock_bh(&ap_dev->lock); + return 0; } @@ -666,11 +675,10 @@ static int ap_bus_resume(struct device *dev) ap_suspend_flag = 0; if (!ap_interrupts_available()) ap_interrupt_indicator = NULL; - ap_device_probe(dev); - ap_reset(ap_dev); - setup_timer(&ap_dev->timeout, ap_request_timeout, - (unsigned long) ap_dev); - ap_scan_bus(NULL); + if (!user_set_domain) { + ap_domain_index = -1; + ap_select_domain(); + } init_timer(&ap_config_timer); ap_config_timer.function = ap_config_timeout; ap_config_timer.data = 0; @@ -686,12 +694,14 @@ static int ap_bus_resume(struct device *dev) tasklet_schedule(&ap_tasklet); if (ap_thread_flag) rc = ap_poll_thread_start(); - } else { - ap_device_probe(dev); - ap_reset(ap_dev); - setup_timer(&ap_dev->timeout, ap_request_timeout, - (unsigned long) ap_dev); } + if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { + spin_lock_bh(&ap_dev->lock); + ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), + ap_domain_index); + spin_unlock_bh(&ap_dev->lock); + } + queue_work(ap_work_queue, &ap_config_work); return rc; } @@ -1079,6 +1089,8 @@ static void ap_scan_bus(struct work_struct *unused) spin_lock_bh(&ap_dev->lock); if (rc || ap_dev->unregistered) { spin_unlock_bh(&ap_dev->lock); + if (ap_dev->unregistered) + i--; device_unregister(dev); put_device(dev); continue; @@ -1586,6 +1598,12 @@ int __init ap_module_init(void) ap_domain_index); return -EINVAL; } + /* In resume callback we need to know if the user had set the domain. + * If so, we can not just reset it. + */ + if (ap_domain_index >= 0) + user_set_domain = 1; + if (ap_instructions_available() != 0) { pr_warning("The hardware system does not support " "AP instructions\n"); |