summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 11:49:56 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 11:49:56 -0700
commit6c09931b3f987898f5c581d267ef269f5e2e9575 (patch)
tree27cc7d7304e189505e53b25c6284b1aa9a4f31d3 /drivers
parentb3eda8d05c1afe722dc19be3fee7eeadc75e25e2 (diff)
parentc397031f58f9a0a5d808dc998105781b1945b6fe (diff)
downloadlinux-6c09931b3f987898f5c581d267ef269f5e2e9575.tar.bz2
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The main new feature is machine support for System zEC12 including transactional memory, runtime instrumentation, support for scm block devices via eadm subchannels, and support for CEX4 crypto cards. In addition there are some nice improvements: bpf jit compiler, arch backend for cmpxchg_double, relative exception table entries, dasd partition detection independent from the dasd driver ioctls, and cpu cache information in /proc/cpuinfo and /sys/device/cpu. And last but not least a series of cleanup patches from Heiko." Fix up trivial add-add conflict in arch/s390/Kconfig due to commit b952741c8079 ("cputime: Generalize CONFIG_VIRT_CPU_ACCOUNTING") * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (76 commits) s390: update defconfig s390/jump label,nss: let shared kernel support depend on !JUMP_LABEL s390/disassembler: fix decoding of risblg instruction s390/bpf,jit: add support for BPF_S_ANC_ALU_XOR_X instruction s390/traps: move call to print_modules() out of show_regs() s390/mm: mark free_initrd_mem() as __init s390/dasd: check count address during online setting drivers/s390/char/monreader.c: fix error return code s390/cmpxchg,percpu: implement cmpxchg_double() s390/percpu: implement this_cpu_add_return() s390/percpu: implement this_cpu_xchg() s390/kexec: remove CONFIG_KEXEC s390/irq: use designated initializers for irq class array s390: add uninitialized_var() to suppress false positive compiler warnings s390/crashdump: move fill_cpu_elf_notes() prototype to header file s390/process: add missing header include s390/ptrace: add missing ifdef s390/ipl,decrompressor: disable branch profiling s390/perf_events: compile only for CONFIG_64BIT s390/tape: remove even more tape block leftovers ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/block/Kconfig18
-rw-r--r--drivers/s390/block/Makefile6
-rw-r--r--drivers/s390/block/dasd_eckd.c21
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/scm_blk.c445
-rw-r--r--drivers/s390/block/scm_blk.h117
-rw-r--r--drivers/s390/block/scm_blk_cluster.c228
-rw-r--r--drivers/s390/block/scm_drv.c81
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/monreader.c5
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/tape.h1
-rw-r--r--drivers/s390/char/tape_std.h4
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c52
-rw-r--r--drivers/s390/cio/chsc.h43
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/cio/eadm_sch.c401
-rw-r--r--drivers/s390/cio/eadm_sch.h20
-rw-r--r--drivers/s390/cio/orb.h24
-rw-r--r--drivers/s390/cio/qdio_debug.h38
-rw-r--r--drivers/s390/cio/scm.c317
-rw-r--r--drivers/s390/crypto/Makefile3
-rw-r--r--drivers/s390/crypto/ap_bus.c209
-rw-r--r--drivers/s390/crypto/ap_bus.h35
-rw-r--r--drivers/s390/crypto/zcrypt_api.c187
-rw-r--r--drivers/s390/crypto/zcrypt_api.h19
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c371
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c149
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.h12
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h59
-rw-r--r--drivers/s390/crypto/zcrypt_error.h13
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c531
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h39
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c856
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h169
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c781
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c15
42 files changed, 4064 insertions, 1224 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 8e477bb1f3f6..4a3b62326183 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -70,3 +70,21 @@ config DASD_EER
This driver provides a character device interface to the
DASD extended error reporting. This is only needed if you want to
use applications written for the EER facility.
+
+config SCM_BLOCK
+ def_tristate m
+ prompt "Support for Storage Class Memory"
+ depends on S390 && BLOCK && EADM_SCH && SCM_BUS
+ help
+ Block device driver for Storage Class Memory (SCM). This driver
+ provides a block device interface for each available SCM increment.
+
+ To compile this driver as a module, choose M here: the
+ module will be called scm_block.
+
+config SCM_BLOCK_CLUSTER_WRITE
+ def_bool y
+ prompt "SCM force cluster writes"
+ depends on SCM_BLOCK
+ help
+ Force writes to Storage Class Memory (SCM) to be in done in clusters.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 0a89e080b389..c2f4e673e031 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -17,3 +17,9 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
+
+scm_block-objs := scm_drv.o scm_blk.o
+ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
+scm_block-objs += scm_blk_cluster.o
+endif
+obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c48c72abbefc..108332b44d98 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -20,6 +20,7 @@
#include <linux/compat.h>
#include <linux/init.h>
+#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
@@ -31,8 +32,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#include "../cio/chsc.h"
-
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
@@ -140,6 +139,10 @@ dasd_eckd_set_online(struct ccw_device *cdev)
static const int sizes_trk0[] = { 28, 148, 84 };
#define LABEL_SIZE 140
+/* head and record addresses of count_area read in analysis ccw */
+static const int count_area_head[] = { 0, 0, 0, 0, 2 };
+static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
+
static inline unsigned int
round_up_multiple(unsigned int no, unsigned int mult)
{
@@ -212,7 +215,7 @@ check_XRC (struct ccw1 *de_ccw,
rc = get_sync_clock(&data->ep_sys_time);
/* Ignore return code if sync clock is switched off. */
- if (rc == -ENOSYS || rc == -EACCES)
+ if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
de_ccw->count = sizeof(struct DE_eckd_data);
@@ -323,7 +326,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
/* Ignore return code if sync clock is switched off. */
- if (rc == -ENOSYS || rc == -EACCES)
+ if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
return rc;
}
@@ -1940,7 +1943,10 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
count_area = NULL;
for (i = 0; i < 3; i++) {
if (private->count_area[i].kl != 4 ||
- private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
+ private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
+ private->count_area[i].cyl != 0 ||
+ private->count_area[i].head != count_area_head[i] ||
+ private->count_area[i].record != count_area_rec[i]) {
private->uses_cdl = 0;
break;
}
@@ -1952,7 +1958,10 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
for (i = 0; i < 5; i++) {
if ((private->count_area[i].kl != 0) ||
(private->count_area[i].dl !=
- private->count_area[0].dl))
+ private->count_area[0].dl) ||
+ private->count_area[i].cyl != 0 ||
+ private->count_area[i].head != count_area_head[i] ||
+ private->count_area[i].record != count_area_rec[i])
break;
}
if (i == 5)
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 654c6921a6d4..8252f37d04ed 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -292,12 +292,12 @@ out:
#else
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
- return -ENOSYS;
+ return -ENOTTY;
}
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
- return -ENOSYS;
+ return -ENOTTY;
}
#endif
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
new file mode 100644
index 000000000000..9978ad4433cb
--- /dev/null
+++ b/drivers/s390/block/scm_blk.c
@@ -0,0 +1,445 @@
+/*
+ * Block driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "scm_block"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+debug_info_t *scm_debug;
+static int scm_major;
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(inactive_requests);
+static unsigned int nr_requests = 64;
+static atomic_t nr_devices = ATOMIC_INIT(0);
+module_param(nr_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
+
+MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("scm:scmdev*");
+
+static void __scm_free_rq(struct scm_request *scmrq)
+{
+ struct aob_rq_header *aobrq = to_aobrq(scmrq);
+
+ free_page((unsigned long) scmrq->aob);
+ free_page((unsigned long) scmrq->aidaw);
+ __scm_free_rq_cluster(scmrq);
+ kfree(aobrq);
+}
+
+static void scm_free_rqs(void)
+{
+ struct list_head *iter, *safe;
+ struct scm_request *scmrq;
+
+ spin_lock_irq(&list_lock);
+ list_for_each_safe(iter, safe, &inactive_requests) {
+ scmrq = list_entry(iter, struct scm_request, list);
+ list_del(&scmrq->list);
+ __scm_free_rq(scmrq);
+ }
+ spin_unlock_irq(&list_lock);
+}
+
+static int __scm_alloc_rq(void)
+{
+ struct aob_rq_header *aobrq;
+ struct scm_request *scmrq;
+
+ aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
+ if (!aobrq)
+ return -ENOMEM;
+
+ scmrq = (void *) aobrq->data;
+ scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
+ scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
+ if (!scmrq->aob || !scmrq->aidaw) {
+ __scm_free_rq(scmrq);
+ return -ENOMEM;
+ }
+
+ if (__scm_alloc_rq_cluster(scmrq)) {
+ __scm_free_rq(scmrq);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&scmrq->list);
+ spin_lock_irq(&list_lock);
+ list_add(&scmrq->list, &inactive_requests);
+ spin_unlock_irq(&list_lock);
+
+ return 0;
+}
+
+static int scm_alloc_rqs(unsigned int nrqs)
+{
+ int ret = 0;
+
+ while (nrqs-- && !ret)
+ ret = __scm_alloc_rq();
+
+ return ret;
+}
+
+static struct scm_request *scm_request_fetch(void)
+{
+ struct scm_request *scmrq = NULL;
+
+ spin_lock(&list_lock);
+ if (list_empty(&inactive_requests))
+ goto out;
+ scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
+ list_del(&scmrq->list);
+out:
+ spin_unlock(&list_lock);
+ return scmrq;
+}
+
+static void scm_request_done(struct scm_request *scmrq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_add(&scmrq->list, &inactive_requests);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static int scm_open(struct block_device *blkdev, fmode_t mode)
+{
+ return scm_get_ref();
+}
+
+static int scm_release(struct gendisk *gendisk, fmode_t mode)
+{
+ scm_put_ref();
+ return 0;
+}
+
+static const struct block_device_operations scm_blk_devops = {
+ .owner = THIS_MODULE,
+ .open = scm_open,
+ .release = scm_release,
+};
+
+static void scm_request_prepare(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ struct scm_device *scmdev = bdev->gendisk->private_data;
+ struct aidaw *aidaw = scmrq->aidaw;
+ struct msb *msb = &scmrq->aob->msb[0];
+ struct req_iterator iter;
+ struct bio_vec *bv;
+
+ msb->bs = MSB_BS_4K;
+ scmrq->aob->request.msb_count = 1;
+ msb->scm_addr = scmdev->address +
+ ((u64) blk_rq_pos(scmrq->request) << 9);
+ msb->oc = (rq_data_dir(scmrq->request) == READ) ?
+ MSB_OC_READ : MSB_OC_WRITE;
+ msb->flags |= MSB_FLAG_IDA;
+ msb->data_addr = (u64) aidaw;
+
+ rq_for_each_segment(bv, scmrq->request, iter) {
+ WARN_ON(bv->bv_offset);
+ msb->blk_count += bv->bv_len >> 12;
+ aidaw->data_addr = (u64) page_address(bv->bv_page);
+ aidaw++;
+ }
+}
+
+static inline void scm_request_init(struct scm_blk_dev *bdev,
+ struct scm_request *scmrq,
+ struct request *req)
+{
+ struct aob_rq_header *aobrq = to_aobrq(scmrq);
+ struct aob *aob = scmrq->aob;
+
+ memset(aob, 0, sizeof(*aob));
+ memset(scmrq->aidaw, 0, PAGE_SIZE);
+ aobrq->scmdev = bdev->scmdev;
+ aob->request.cmd_code = ARQB_CMD_MOVE;
+ aob->request.data = (u64) aobrq;
+ scmrq->request = req;
+ scmrq->bdev = bdev;
+ scmrq->retries = 4;
+ scmrq->error = 0;
+ scm_request_cluster_init(scmrq);
+}
+
+static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
+{
+ if (atomic_read(&bdev->queued_reqs)) {
+ /* Queue restart is triggered by the next interrupt. */
+ return;
+ }
+ blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
+}
+
+void scm_request_requeue(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+
+ scm_release_cluster(scmrq);
+ blk_requeue_request(bdev->rq, scmrq->request);
+ scm_request_done(scmrq);
+ scm_ensure_queue_restart(bdev);
+}
+
+void scm_request_finish(struct scm_request *scmrq)
+{
+ scm_release_cluster(scmrq);
+ blk_end_request_all(scmrq->request, scmrq->error);
+ scm_request_done(scmrq);
+}
+
+static void scm_blk_request(struct request_queue *rq)
+{
+ struct scm_device *scmdev = rq->queuedata;
+ struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+ struct scm_request *scmrq;
+ struct request *req;
+ int ret;
+
+ while ((req = blk_peek_request(rq))) {
+ if (req->cmd_type != REQ_TYPE_FS)
+ continue;
+
+ scmrq = scm_request_fetch();
+ if (!scmrq) {
+ SCM_LOG(5, "no request");
+ scm_ensure_queue_restart(bdev);
+ return;
+ }
+ scm_request_init(bdev, scmrq, req);
+ if (!scm_reserve_cluster(scmrq)) {
+ SCM_LOG(5, "cluster busy");
+ scm_request_done(scmrq);
+ return;
+ }
+ if (scm_need_cluster_request(scmrq)) {
+ blk_start_request(req);
+ scm_initiate_cluster_request(scmrq);
+ return;
+ }
+ scm_request_prepare(scmrq);
+ blk_start_request(req);
+
+ ret = scm_start_aob(scmrq->aob);
+ if (ret) {
+ SCM_LOG(5, "no subchannel");
+ scm_request_requeue(scmrq);
+ return;
+ }
+ atomic_inc(&bdev->queued_reqs);
+ }
+}
+
+static void __scmrq_log_error(struct scm_request *scmrq)
+{
+ struct aob *aob = scmrq->aob;
+
+ if (scmrq->error == -ETIMEDOUT)
+ SCM_LOG(1, "Request timeout");
+ else {
+ SCM_LOG(1, "Request error");
+ SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
+ }
+ if (scmrq->retries)
+ SCM_LOG(1, "Retry request");
+ else
+ pr_err("An I/O operation to SCM failed with rc=%d\n",
+ scmrq->error);
+}
+
+void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
+{
+ struct scm_request *scmrq = data;
+ struct scm_blk_dev *bdev = scmrq->bdev;
+
+ scmrq->error = error;
+ if (error)
+ __scmrq_log_error(scmrq);
+
+ spin_lock(&bdev->lock);
+ list_add_tail(&scmrq->list, &bdev->finished_requests);
+ spin_unlock(&bdev->lock);
+ tasklet_hi_schedule(&bdev->tasklet);
+}
+
+static void scm_blk_tasklet(struct scm_blk_dev *bdev)
+{
+ struct scm_request *scmrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdev->lock, flags);
+ while (!list_empty(&bdev->finished_requests)) {
+ scmrq = list_first_entry(&bdev->finished_requests,
+ struct scm_request, list);
+ list_del(&scmrq->list);
+ spin_unlock_irqrestore(&bdev->lock, flags);
+
+ if (scmrq->error && scmrq->retries-- > 0) {
+ if (scm_start_aob(scmrq->aob)) {
+ spin_lock_irqsave(&bdev->rq_lock, flags);
+ scm_request_requeue(scmrq);
+ spin_unlock_irqrestore(&bdev->rq_lock, flags);
+ }
+ /* Request restarted or requeued, handle next. */
+ spin_lock_irqsave(&bdev->lock, flags);
+ continue;
+ }
+
+ if (scm_test_cluster_request(scmrq)) {
+ scm_cluster_request_irq(scmrq);
+ spin_lock_irqsave(&bdev->lock, flags);
+ continue;
+ }
+
+ scm_request_finish(scmrq);
+ atomic_dec(&bdev->queued_reqs);
+ spin_lock_irqsave(&bdev->lock, flags);
+ }
+ spin_unlock_irqrestore(&bdev->lock, flags);
+ /* Look out for more requests. */
+ blk_run_queue(bdev->rq);
+}
+
+int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
+{
+ struct request_queue *rq;
+ int len, ret = -ENOMEM;
+ unsigned int devindex, nr_max_blk;
+
+ devindex = atomic_inc_return(&nr_devices) - 1;
+ /* scma..scmz + scmaa..scmzz */
+ if (devindex > 701) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bdev->scmdev = scmdev;
+ spin_lock_init(&bdev->rq_lock);
+ spin_lock_init(&bdev->lock);
+ INIT_LIST_HEAD(&bdev->finished_requests);
+ atomic_set(&bdev->queued_reqs, 0);
+ tasklet_init(&bdev->tasklet,
+ (void (*)(unsigned long)) scm_blk_tasklet,
+ (unsigned long) bdev);
+
+ rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
+ if (!rq)
+ goto out;
+
+ bdev->rq = rq;
+ nr_max_blk = min(scmdev->nr_max_block,
+ (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
+
+ blk_queue_logical_block_size(rq, 1 << 12);
+ blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
+ blk_queue_max_segments(rq, nr_max_blk);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
+ scm_blk_dev_cluster_setup(bdev);
+
+ bdev->gendisk = alloc_disk(SCM_NR_PARTS);
+ if (!bdev->gendisk)
+ goto out_queue;
+
+ rq->queuedata = scmdev;
+ bdev->gendisk->driverfs_dev = &scmdev->dev;
+ bdev->gendisk->private_data = scmdev;
+ bdev->gendisk->fops = &scm_blk_devops;
+ bdev->gendisk->queue = rq;
+ bdev->gendisk->major = scm_major;
+ bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
+
+ len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
+ if (devindex > 25) {
+ len += snprintf(bdev->gendisk->disk_name + len,
+ DISK_NAME_LEN - len, "%c",
+ 'a' + (devindex / 26) - 1);
+ devindex = devindex % 26;
+ }
+ snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
+ 'a' + devindex);
+
+ /* 512 byte sectors */
+ set_capacity(bdev->gendisk, scmdev->size >> 9);
+ add_disk(bdev->gendisk);
+ return 0;
+
+out_queue:
+ blk_cleanup_queue(rq);
+out:
+ atomic_dec(&nr_devices);
+ return ret;
+}
+
+void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
+{
+ tasklet_kill(&bdev->tasklet);
+ del_gendisk(bdev->gendisk);
+ blk_cleanup_queue(bdev->gendisk->queue);
+ put_disk(bdev->gendisk);
+}
+
+static int __init scm_blk_init(void)
+{
+ int ret = -EINVAL;
+
+ if (!scm_cluster_size_valid())
+ goto out;
+
+ ret = register_blkdev(0, "scm");
+ if (ret < 0)
+ goto out;
+
+ scm_major = ret;
+ if (scm_alloc_rqs(nr_requests))
+ goto out_unreg;
+
+ scm_debug = debug_register("scm_log", 16, 1, 16);
+ if (!scm_debug)
+ goto out_free;
+
+ debug_register_view(scm_debug, &debug_hex_ascii_view);
+ debug_set_level(scm_debug, 2);
+
+ ret = scm_drv_init();
+ if (ret)
+ goto out_dbf;
+
+ return ret;
+
+out_dbf:
+ debug_unregister(scm_debug);
+out_free:
+ scm_free_rqs();
+out_unreg:
+ unregister_blkdev(scm_major, "scm");
+out:
+ return ret;
+}
+module_init(scm_blk_init);
+
+static void __exit scm_blk_cleanup(void)
+{
+ scm_drv_cleanup();
+ debug_unregister(scm_debug);
+ scm_free_rqs();
+ unregister_blkdev(scm_major, "scm");
+}
+module_exit(scm_blk_cleanup);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
new file mode 100644
index 000000000000..7ac6bad919ef
--- /dev/null
+++ b/drivers/s390/block/scm_blk.h
@@ -0,0 +1,117 @@
+#ifndef SCM_BLK_H
+#define SCM_BLK_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/list.h>
+
+#include <asm/debug.h>
+#include <asm/eadm.h>
+
+#define SCM_NR_PARTS 8
+#define SCM_QUEUE_DELAY 5
+
+struct scm_blk_dev {
+ struct tasklet_struct tasklet;
+ struct request_queue *rq;
+ struct gendisk *gendisk;
+ struct scm_device *scmdev;
+ spinlock_t rq_lock; /* guard the request queue */
+ spinlock_t lock; /* guard the rest of the blockdev */
+ atomic_t queued_reqs;
+ struct list_head finished_requests;
+#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
+ struct list_head cluster_list;
+#endif
+};
+
+struct scm_request {
+ struct scm_blk_dev *bdev;
+ struct request *request;
+ struct aidaw *aidaw;
+ struct aob *aob;
+ struct list_head list;
+ u8 retries;
+ int error;
+#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
+ struct {
+ enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
+ struct list_head list;
+ void **buf;
+ } cluster;
+#endif
+};
+
+#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
+
+int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
+void scm_blk_dev_cleanup(struct scm_blk_dev *);
+void scm_blk_irq(struct scm_device *, void *, int);
+
+void scm_request_finish(struct scm_request *);
+void scm_request_requeue(struct scm_request *);
+
+int scm_drv_init(void);
+void scm_drv_cleanup(void);
+
+#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
+void __scm_free_rq_cluster(struct scm_request *);
+int __scm_alloc_rq_cluster(struct scm_request *);
+void scm_request_cluster_init(struct scm_request *);
+bool scm_reserve_cluster(struct scm_request *);
+void scm_release_cluster(struct scm_request *);
+void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
+bool scm_need_cluster_request(struct scm_request *);
+void scm_initiate_cluster_request(struct scm_request *);
+void scm_cluster_request_irq(struct scm_request *);
+bool scm_test_cluster_request(struct scm_request *);
+bool scm_cluster_size_valid(void);
+#else
+#define __scm_free_rq_cluster(scmrq) {}
+#define __scm_alloc_rq_cluster(scmrq) 0
+#define scm_request_cluster_init(scmrq) {}
+#define scm_reserve_cluster(scmrq) true
+#define scm_release_cluster(scmrq) {}
+#define scm_blk_dev_cluster_setup(bdev) {}
+#define scm_need_cluster_request(scmrq) false
+#define scm_initiate_cluster_request(scmrq) {}
+#define scm_cluster_request_irq(scmrq) {}
+#define scm_test_cluster_request(scmrq) false
+#define scm_cluster_size_valid() true
+#endif
+
+extern debug_info_t *scm_debug;
+
+#define SCM_LOG(imp, txt) do { \
+ debug_text_event(scm_debug, imp, txt); \
+ } while (0)
+
+static inline void SCM_LOG_HEX(int level, void *data, int length)
+{
+ if (level > scm_debug->level)
+ return;
+ while (length > 0) {
+ debug_event(scm_debug, level, data, length);
+ length -= scm_debug->buf_size;
+ data += scm_debug->buf_size;
+ }
+}
+
+static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
+{
+ struct {
+ u64 address;
+ u8 oper_state;
+ u8 rank;
+ } __packed data = {
+ .address = scmdev->address,
+ .oper_state = scmdev->attrs.oper_state,
+ .rank = scmdev->attrs.rank,
+ };
+
+ SCM_LOG_HEX(level, &data, sizeof(data));
+}
+
+#endif /* SCM_BLK_H */
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
new file mode 100644
index 000000000000..f4bb61b0cea1
--- /dev/null
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -0,0 +1,228 @@
+/*
+ * Block driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+static unsigned int write_cluster_size = 64;
+module_param(write_cluster_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_cluster_size,
+ "Number of pages used for contiguous writes.");
+
+#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
+
+void __scm_free_rq_cluster(struct scm_request *scmrq)
+{
+ int i;
+
+ if (!scmrq->cluster.buf)
+ return;
+
+ for (i = 0; i < 2 * write_cluster_size; i++)
+ free_page((unsigned long) scmrq->cluster.buf[i]);
+
+ kfree(scmrq->cluster.buf);
+}
+
+int __scm_alloc_rq_cluster(struct scm_request *scmrq)
+{
+ int i;
+
+ scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
+ GFP_KERNEL);
+ if (!scmrq->cluster.buf)
+ return -ENOMEM;
+
+ for (i = 0; i < 2 * write_cluster_size; i++) {
+ scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
+ if (!scmrq->cluster.buf[i])
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&scmrq->cluster.list);
+ return 0;
+}
+
+void scm_request_cluster_init(struct scm_request *scmrq)
+{
+ scmrq->cluster.state = CLUSTER_NONE;
+}
+
+static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
+{
+ unsigned long firstA, lastA, firstB, lastB;
+
+ firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
+ lastA = (((u64) blk_rq_pos(A->request) << 9) +
+ blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
+
+ firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
+ lastB = (((u64) blk_rq_pos(B->request) << 9) +
+ blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
+
+ return (firstB <= lastA && firstA <= lastB);
+}
+
+bool scm_reserve_cluster(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ struct scm_request *iter;
+
+ if (write_cluster_size == 0)
+ return true;
+
+ spin_lock(&bdev->lock);
+ list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
+ if (clusters_intersect(scmrq, iter) &&
+ (rq_data_dir(scmrq->request) == WRITE ||
+ rq_data_dir(iter->request) == WRITE)) {
+ spin_unlock(&bdev->lock);
+ return false;
+ }
+ }
+ list_add(&scmrq->cluster.list, &bdev->cluster_list);
+ spin_unlock(&bdev->lock);
+
+ return true;
+}
+
+void scm_release_cluster(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ unsigned long flags;
+
+ if (write_cluster_size == 0)
+ return;
+
+ spin_lock_irqsave(&bdev->lock, flags);
+ list_del(&scmrq->cluster.list);
+ spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
+void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
+{
+ INIT_LIST_HEAD(&bdev->cluster_list);
+ blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
+}
+
+static void scm_prepare_cluster_request(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ struct scm_device *scmdev = bdev->gendisk->private_data;
+ struct request *req = scmrq->request;
+ struct aidaw *aidaw = scmrq->aidaw;
+ struct msb *msb = &scmrq->aob->msb[0];
+ struct req_iterator iter;
+ struct bio_vec *bv;
+ int i = 0;
+ u64 addr;
+
+ switch (scmrq->cluster.state) {
+ case CLUSTER_NONE:
+ scmrq->cluster.state = CLUSTER_READ;
+ /* fall through */
+ case CLUSTER_READ:
+ scmrq->aob->request.msb_count = 1;
+ msb->bs = MSB_BS_4K;
+ msb->oc = MSB_OC_READ;
+ msb->flags = MSB_FLAG_IDA;
+ msb->data_addr = (u64) aidaw;
+ msb->blk_count = write_cluster_size;
+
+ addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ msb->scm_addr = round_down(addr, CLUSTER_SIZE);
+
+ if (msb->scm_addr !=
+ round_down(addr + (u64) blk_rq_bytes(req) - 1,
+ CLUSTER_SIZE))
+ msb->blk_count = 2 * write_cluster_size;
+
+ for (i = 0; i < msb->blk_count; i++) {
+ aidaw->data_addr = (u64) scmrq->cluster.buf[i];
+ aidaw++;
+ }
+
+ break;
+ case CLUSTER_WRITE:
+ msb->oc = MSB_OC_WRITE;
+
+ for (addr = msb->scm_addr;
+ addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ addr += PAGE_SIZE) {
+ aidaw->data_addr = (u64) scmrq->cluster.buf[i];
+ aidaw++;
+ i++;
+ }
+ rq_for_each_segment(bv, req, iter) {
+ aidaw->data_addr = (u64) page_address(bv->bv_page);
+ aidaw++;
+ i++;
+ }
+ for (; i < msb->blk_count; i++) {
+ aidaw->data_addr = (u64) scmrq->cluster.buf[i];
+ aidaw++;
+ }
+ break;
+ }
+}
+
+bool scm_need_cluster_request(struct scm_request *scmrq)
+{
+ if (rq_data_dir(scmrq->request) == READ)
+ return false;
+
+ return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
+}
+
+/* Called with queue lock held. */
+void scm_initiate_cluster_request(struct scm_request *scmrq)
+{
+ scm_prepare_cluster_request(scmrq);
+ if (scm_start_aob(scmrq->aob))
+ scm_request_requeue(scmrq);
+}
+
+bool scm_test_cluster_request(struct scm_request *scmrq)
+{
+ return scmrq->cluster.state != CLUSTER_NONE;
+}
+
+void scm_cluster_request_irq(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ unsigned long flags;
+
+ switch (scmrq->cluster.state) {
+ case CLUSTER_NONE:
+ BUG();
+ break;
+ case CLUSTER_READ:
+ if (scmrq->error) {
+ scm_request_finish(scmrq);
+ break;
+ }
+ scmrq->cluster.state = CLUSTER_WRITE;
+ spin_lock_irqsave(&bdev->rq_lock, flags);
+ scm_initiate_cluster_request(scmrq);
+ spin_unlock_irqrestore(&bdev->rq_lock, flags);
+ break;
+ case CLUSTER_WRITE:
+ scm_request_finish(scmrq);
+ break;
+ }
+}
+
+bool scm_cluster_size_valid(void)
+{
+ return write_cluster_size == 0 || write_cluster_size == 32 ||
+ write_cluster_size == 64 || write_cluster_size == 128;
+}
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
new file mode 100644
index 000000000000..9fa0a908607b
--- /dev/null
+++ b/drivers/s390/block/scm_drv.c
@@ -0,0 +1,81 @@
+/*
+ * Device driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "scm_block"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+static void notify(struct scm_device *scmdev)
+{
+ pr_info("%lu: The capabilities of the SCM increment changed\n",
+ (unsigned long) scmdev->address);
+ SCM_LOG(2, "State changed");
+ SCM_LOG_STATE(2, scmdev);
+}
+
+static int scm_probe(struct scm_device *scmdev)
+{
+ struct scm_blk_dev *bdev;
+ int ret;
+
+ SCM_LOG(2, "probe");
+ SCM_LOG_STATE(2, scmdev);
+
+ if (scmdev->attrs.oper_state != OP_STATE_GOOD)
+ return -EINVAL;
+
+ bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&scmdev->dev, bdev);
+ ret = scm_blk_dev_setup(bdev, scmdev);
+ if (ret) {
+ dev_set_drvdata(&scmdev->dev, NULL);
+ kfree(bdev);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int scm_remove(struct scm_device *scmdev)
+{
+ struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+ scm_blk_dev_cleanup(bdev);
+ dev_set_drvdata(&scmdev->dev, NULL);
+ kfree(bdev);
+
+ return 0;
+}
+
+static struct scm_driver scm_drv = {
+ .drv = {
+ .name = "scm_block",
+ .owner = THIS_MODULE,
+ },
+ .notify = notify,
+ .probe = scm_probe,
+ .remove = scm_remove,
+ .handler = scm_blk_irq,
+};
+
+int __init scm_drv_init(void)
+{
+ return scm_driver_register(&scm_drv);
+}
+
+void scm_drv_cleanup(void)
+{
+ scm_driver_unregister(&scm_drv);
+}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb07577e8fd4..699fd3e363df 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -35,7 +35,6 @@ static struct raw3270_fn con3270_fn;
*/
struct con3270 {
struct raw3270_view view;
- spinlock_t lock;
struct list_head freemem; /* list of free memory for strings. */
/* Output stuff. */
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 5b8b8592d311..f4ff515db251 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -571,8 +571,11 @@ static int __init mon_init(void)
if (rc)
goto out_iucv;
monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!monreader_device)
+ if (!monreader_device) {
+ rc = -ENOMEM;
goto out_driver;
+ }
+
dev_set_name(monreader_device, "monreader-dev");
monreader_device->bus = &iucv_bus;
monreader_device->parent = iucv_root;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3fcc000efc53..4fa21f7e2308 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -334,7 +334,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
reg->receiver_fn(evbuf);
spin_lock_irqsave(&sclp_lock, flags);
} else if (reg == NULL)
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 4be63be73445..3b13d58fe87b 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -463,7 +463,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
/* Use write priority message */
sccb->msg_buf.header.type = EVTYP_PMSGCMD;
else
- return -ENOSYS;
+ return -EOPNOTSUPP;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c06be6cc2fc3..ea664dd4f56d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -15,7 +15,6 @@
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/idals.h>
-#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtio.h>
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index c5816ad9ed7d..8c760c036832 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -100,11 +100,7 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device,
struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t);
-struct tape_request *tape_std_bread(struct tape_device *, struct request *);
-void tape_std_free_bread(struct tape_request *);
void tape_std_check_locate(struct tape_device *, struct tape_request *);
-struct tape_request *tape_std_bwrite(struct request *,
- struct tape_device *, int);
/* Some non-mtop commands. */
int tape_std_assign(struct tape_device *);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c131bc40f962..9b3a24e8d3a0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -321,7 +321,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
* only allow for blocking reads to be open
*/
if (filp->f_flags & O_NONBLOCK)
- return -ENOSYS;
+ return -EOPNOTSUPP;
/* Besure this device hasn't already been opened */
spin_lock_bh(&logptr->priv_lock);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index e1b700a19648..8c4a386e97f6 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -8,6 +8,8 @@ ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_EADM_SCH) += eadm_sch.o
+obj-$(CONFIG_SCM_BUS) += scm.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cfe0c087fe5c..4d51a7c4eb8b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -52,6 +52,11 @@ int chsc_error_from_response(int response)
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
+ case 0x000b:
+ return -EBUSY;
+ case 0x0100:
+ case 0x0102:
+ return -ENOMEM;
default:
return -EIO;
}
@@ -393,6 +398,20 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
+static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_update_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: updating change notification"
+ " failed (rc=%d).\n", ret);
+}
+
static void chsc_process_sei(struct chsc_sei_area *sei_area)
{
/* Check if we might have lost some information. */
@@ -414,6 +433,9 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area);
break;
+ case 12: /* scm change notification */
+ chsc_process_sei_scm_change(sei_area);
+ break;
default: /* other stuff */
CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
sei_area->cc);
@@ -1047,3 +1069,33 @@ out:
return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+ int ccode, ret;
+
+ memset(scm_area, 0, sizeof(*scm_area));
+ scm_area->request.length = 0x0020;
+ scm_area->request.code = 0x004C;
+ scm_area->reqtok = token;
+
+ ccode = chsc(scm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(scm_area->response.code);
+ if (ret != 0)
+ CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+ scm_area->response.code);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 3f15b2aaeaea..662dab4b93e6 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <asm/css_chars.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/schid.h>
@@ -118,4 +119,46 @@ int chsc_error_from_response(int response);
int chsc_siosl(struct subchannel_id schid);
+/* Functions and definitions to query storage-class memory. */
+struct sale {
+ u64 sa;
+ u32 p:4;
+ u32 op_state:4;
+ u32 data_state:4;
+ u32 rank:4;
+ u32 r:1;
+ u32:7;
+ u32 rid:8;
+ u32:32;
+} __packed;
+
+struct chsc_scm_info {
+ struct chsc_header request;
+ u32:32;
+ u64 reqtok;
+ u32 reserved1[4];
+ struct chsc_header response;
+ u64:56;
+ u8 rq;
+ u32 mbc;
+ u64 msa;
+ u16 is;
+ u16 mmc;
+ u32 mci;
+ u64 nr_scm_ini;
+ u64 nr_scm_unini;
+ u32 reserved2[10];
+ u64 restok;
+ struct sale scmal[248];
+} __packed;
+
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+
+#ifdef CONFIG_SCM_BUS
+int scm_update_information(void);
+#else /* CONFIG_SCM_BUS */
+#define scm_update_information() 0
+#endif /* CONFIG_SCM_BUS */
+
+
#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 33d1ef703593..8e927b9f285f 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1029,7 +1029,7 @@ extern void do_reipl_asm(__u32 schid);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
void reipl_ccw_dev(struct ccw_dev_id *devid)
{
- struct subchannel_id schid;
+ struct subchannel_id uninitialized_var(schid);
s390_reset_system(NULL, NULL);
if (reipl_find_schid(devid, &schid) != 0)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 21908e67bf67..b4d572f65f07 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -445,6 +445,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
put_device(&sch->dev);
}
}
+EXPORT_SYMBOL_GPL(css_sched_sch_todo);
static void css_sch_todo(struct work_struct *work)
{
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 000000000000..6c9673400464
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,401 @@
+/*
+ * Driver for s390 eadm subchannels
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+#include <asm/scsw.h>
+#include <asm/eadm.h>
+
+#include "eadm_sch.h"
+#include "ioasm.h"
+#include "cio.h"
+#include "css.h"
+#include "orb.h"
+
+MODULE_DESCRIPTION("driver for s390 eadm subchannels");
+MODULE_LICENSE("GPL");
+
+#define EADM_TIMEOUT (5 * HZ)
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(eadm_list);
+
+static debug_info_t *eadm_debug;
+
+#define EADM_LOG(imp, txt) do { \
+ debug_text_event(eadm_debug, imp, txt); \
+ } while (0)
+
+static void EADM_LOG_HEX(int level, void *data, int length)
+{
+ if (level > eadm_debug->level)
+ return;
+ while (length > 0) {
+ debug_event(eadm_debug, level, data, length);
+ length -= eadm_debug->buf_size;
+ data += eadm_debug->buf_size;
+ }
+}
+
+static void orb_init(union orb *orb)
+{
+ memset(orb, 0, sizeof(union orb));
+ orb->eadm.compat1 = 1;
+ orb->eadm.compat2 = 1;
+ orb->eadm.fmt = 1;
+ orb->eadm.x = 1;
+}
+
+static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
+{
+ union orb *orb = &get_eadm_private(sch)->orb;
+ int cc;
+
+ orb_init(orb);
+ orb->eadm.aob = (u32)__pa(aob);
+ orb->eadm.intparm = (u32)(addr_t)sch;
+ orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
+
+ EADM_LOG(6, "start");
+ EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
+
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
+ break;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int eadm_subchannel_clear(struct subchannel *sch)
+{
+ int cc;
+
+ cc = csch(sch->schid);
+ if (cc)
+ return -ENODEV;
+
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+}
+
+static void eadm_subchannel_timeout(unsigned long data)
+{
+ struct subchannel *sch = (struct subchannel *) data;
+
+ spin_lock_irq(sch->lock);
+ EADM_LOG(1, "timeout");
+ EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
+ if (eadm_subchannel_clear(sch))
+ EADM_LOG(0, "clear failed");
+ spin_unlock_irq(sch->lock);
+}
+
+static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ if (expires == 0) {
+ del_timer(&private->timer);
+ return;
+ }
+ if (timer_pending(&private->timer)) {
+ if (mod_timer(&private->timer, jiffies + expires))
+ return;
+ }
+ private->timer.function = eadm_subchannel_timeout;
+ private->timer.data = (unsigned long) sch;
+ private->timer.expires = jiffies + expires;
+ add_timer(&private->timer);
+}
+
+static void eadm_subchannel_irq(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
+ struct irb *irb = (struct irb *)&S390_lowcore.irb;
+ int error = 0;
+
+ EADM_LOG(6, "irq");
+ EADM_LOG_HEX(6, irb, sizeof(*irb));
+
+ kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++;
+
+ if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
+ && scsw->eswf == 1 && irb->esw.eadm.erw.r)
+ error = -EIO;
+
+ if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
+ error = -ETIMEDOUT;
+
+ eadm_subchannel_set_timeout(sch, 0);
+
+ if (private->state != EADM_BUSY) {
+ EADM_LOG(1, "irq unsol");
+ EADM_LOG_HEX(1, irb, sizeof(*irb));
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ return;
+ }
+ scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
+ private->state = EADM_IDLE;
+}
+
+static struct subchannel *eadm_get_idle_sch(void)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(private, &eadm_list, head) {
+ sch = private->sch;
+ spin_lock(sch->lock);
+ if (private->state == EADM_IDLE) {
+ private->state = EADM_BUSY;
+ list_move_tail(&private->head, &eadm_list);
+ spin_unlock(sch->lock);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return sch;
+ }
+ spin_unlock(sch->lock);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return NULL;
+}
+
+static int eadm_start_aob(struct aob *aob)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+ int ret;
+
+ sch = eadm_get_idle_sch();
+ if (!sch)
+ return -EBUSY;
+
+ spin_lock_irqsave(sch->lock, flags);
+ eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
+ ret = eadm_subchannel_start(sch, aob);
+ if (!ret)
+ goto out_unlock;
+
+ /* Handle start subchannel failure. */
+ eadm_subchannel_set_timeout(sch, 0);
+ private = get_eadm_private(sch);
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+
+static int eadm_subchannel_probe(struct subchannel *sch)
+{
+ struct eadm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->head);
+ init_timer(&private->timer);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, private);
+ private->state = EADM_IDLE;
+ private->sch = sch;
+ sch->isc = EADM_SCH_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ kfree(private);
+ goto out;
+ }
+ spin_unlock_irq(sch->lock);
+
+ spin_lock_irq(&list_lock);
+ list_add(&private->head, &eadm_list);
+ spin_unlock_irq(&list_lock);
+
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+out:
+ return ret;
+}
+
+static void eadm_quiesce(struct subchannel *sch)
+{
+ int ret;
+
+ do {
+ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ spin_unlock_irq(sch->lock);
+ } while (ret == -EBUSY);
+}
+
+static int eadm_subchannel_remove(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ spin_lock_irq(&list_lock);
+ list_del(&private->head);
+ spin_unlock_irq(&list_lock);
+
+ eadm_quiesce(sch);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+
+ kfree(private);
+
+ return 0;
+}
+
+static void eadm_subchannel_shutdown(struct subchannel *sch)
+{
+ eadm_quiesce(sch);
+}
+
+static int eadm_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int eadm_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+/**
+ * eadm_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ struct eadm_private *private;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+
+ if (cio_update_schib(sch)) {
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ goto out_unlock;
+ }
+ private = get_eadm_private(sch);
+ if (private->state == EADM_NOT_OPER)
+ private->state = EADM_IDLE;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+
+static struct css_device_id eadm_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
+
+static struct css_driver eadm_subchannel_driver = {
+ .drv = {
+ .name = "eadm_subchannel",
+ .owner = THIS_MODULE,
+ },
+ .subchannel_type = eadm_subchannel_ids,
+ .irq = eadm_subchannel_irq,
+ .probe = eadm_subchannel_probe,
+ .remove = eadm_subchannel_remove,
+ .shutdown = eadm_subchannel_shutdown,
+ .sch_event = eadm_subchannel_sch_event,
+ .freeze = eadm_subchannel_freeze,
+ .thaw = eadm_subchannel_restore,
+ .restore = eadm_subchannel_restore,
+};
+
+static struct eadm_ops eadm_ops = {
+ .eadm_start = eadm_start_aob,
+ .owner = THIS_MODULE,
+};
+
+static int __init eadm_sch_init(void)
+{
+ int ret;
+
+ if (!css_general_characteristics.eadm)
+ return -ENXIO;
+
+ eadm_debug = debug_register("eadm_log", 16, 1, 16);
+ if (!eadm_debug)
+ return -ENOMEM;
+
+ debug_register_view(eadm_debug, &debug_hex_ascii_view);
+ debug_set_level(eadm_debug, 2);
+
+ isc_register(EADM_SCH_ISC);
+ ret = css_driver_register(&eadm_subchannel_driver);
+ if (ret)
+ goto cleanup;
+
+ register_eadm_ops(&eadm_ops);
+ return ret;
+
+cleanup:
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+ return ret;
+}
+
+static void __exit eadm_sch_exit(void)
+{
+ unregister_eadm_ops(&eadm_ops);
+ css_driver_unregister(&eadm_subchannel_driver);
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+}
+module_init(eadm_sch_init);
+module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 000000000000..2779be093982
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,20 @@
+#ifndef EADM_SCH_H
+#define EADM_SCH_H
+
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include "orb.h"
+
+struct eadm_private {
+ union orb orb;
+ enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct timer_list timer;
+ struct list_head head;
+ struct subchannel *sch;
+} __aligned(8);
+
+#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
+#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
+
+#endif
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index 45a9865c2b36..7a640530e7f5 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -59,9 +59,33 @@ struct tm_orb {
u32:32;
} __packed __aligned(4);
+/*
+ * eadm operation request block
+ */
+struct eadm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:4;
+ u32 compat1:1;
+ u32 compat2:1;
+ u32:21;
+ u32 x:1;
+ u32 aob;
+ u32 css_prio:8;
+ u32:8;
+ u32 scm_prio:8;
+ u32:8;
+ u32:29;
+ u32 fmt:3;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
union orb {
struct cmd_orb cmd;
struct tm_orb tm;
+ struct eadm_orb eadm;
} __packed __aligned(4);
#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index e1f646800ddb..7f8b973da298 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -37,10 +37,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
} while (0)
-#define DBF_HEX(addr, len) \
- do { \
- debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \
- } while (0)
+static inline void DBF_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
+ len -= qdio_dbf_setup->buf_size;
+ addr += qdio_dbf_setup->buf_size;
+ }
+}
#define DBF_ERROR(text...) \
do { \
@@ -49,11 +53,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
} while (0)
-#define DBF_ERROR_HEX(addr, len) \
- do { \
- debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \
- } while (0)
-
+static inline void DBF_ERROR_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
+ len -= qdio_dbf_error->buf_size;
+ addr += qdio_dbf_error->buf_size;
+ }
+}
#define DBF_DEV_EVENT(level, device, text...) \
do { \
@@ -64,10 +71,15 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
} \
} while (0)
-#define DBF_DEV_HEX(level, device, addr, len) \
- do { \
- debug_event(device->debug_area, level, (void*)(addr), len); \
- } while (0)
+static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
+ int len, int level)
+{
+ while (len > 0) {
+ debug_event(dev->debug_area, level, addr, len);
+ len -= dev->debug_area->buf_size;
+ addr += dev->debug_area->buf_size;
+ }
+}
void qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
new file mode 100644
index 000000000000..bcf20f3aa51b
--- /dev/null
+++ b/drivers/s390/cio/scm.c
@@ -0,0 +1,317 @@
+/*
+ * Recognize and maintain s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/eadm.h>
+#include "chsc.h"
+
+static struct device *scm_root;
+static struct eadm_ops *eadm_ops;
+static DEFINE_MUTEX(eadm_ops_mutex);
+
+#define to_scm_dev(n) container_of(n, struct scm_device, dev)
+#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
+
+static int scmdev_probe(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
+}
+
+static int scmdev_remove(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+}
+
+static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=scm:scmdev");
+}
+
+static struct bus_type scm_bus_type = {
+ .name = "scm",
+ .probe = scmdev_probe,
+ .remove = scmdev_remove,
+ .uevent = scmdev_uevent,
+};
+
+/**
+ * scm_driver_register() - register a scm driver
+ * @scmdrv: driver to be registered
+ */
+int scm_driver_register(struct scm_driver *scmdrv)
+{
+ struct device_driver *drv = &scmdrv->drv;
+
+ drv->bus = &scm_bus_type;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_register);
+
+/**
+ * scm_driver_unregister() - deregister a scm driver
+ * @scmdrv: driver to be deregistered
+ */
+void scm_driver_unregister(struct scm_driver *scmdrv)
+{
+ driver_unregister(&scmdrv->drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_unregister);
+
+int scm_get_ref(void)
+{
+ int ret = 0;
+
+ mutex_lock(&eadm_ops_mutex);
+ if (!eadm_ops || !try_module_get(eadm_ops->owner))
+ ret = -ENOENT;
+ mutex_unlock(&eadm_ops_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scm_get_ref);
+
+void scm_put_ref(void)
+{
+ mutex_lock(&eadm_ops_mutex);
+ module_put(eadm_ops->owner);
+ mutex_unlock(&eadm_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(scm_put_ref);
+
+void register_eadm_ops(struct eadm_ops *ops)
+{
+ mutex_lock(&eadm_ops_mutex);
+ eadm_ops = ops;
+ mutex_unlock(&eadm_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(register_eadm_ops);
+
+void unregister_eadm_ops(struct eadm_ops *ops)
+{
+ mutex_lock(&eadm_ops_mutex);
+ eadm_ops = NULL;
+ mutex_unlock(&eadm_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_eadm_ops);
+
+int scm_start_aob(struct aob *aob)
+{
+ return eadm_ops->eadm_start(aob);
+}
+EXPORT_SYMBOL_GPL(scm_start_aob);
+
+void scm_irq_handler(struct aob *aob, int error)
+{
+ struct aob_rq_header *aobrq = (void *) aob->request.data;
+ struct scm_device *scmdev = aobrq->scmdev;
+ struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
+
+ scmdrv->handler(scmdev, aobrq->data, error);
+}
+EXPORT_SYMBOL_GPL(scm_irq_handler);
+
+#define scm_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scm_device *scmdev = to_scm_dev(dev); \
+ int ret; \
+ \
+ device_lock(dev); \
+ ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
+ device_unlock(dev); \
+ \
+ return ret; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+scm_attr(persistence);
+scm_attr(oper_state);
+scm_attr(data_state);
+scm_attr(rank);
+scm_attr(release);
+scm_attr(res_id);
+
+static struct attribute *scmdev_attrs[] = {
+ &dev_attr_persistence.attr,
+ &dev_attr_oper_state.attr,
+ &dev_attr_data_state.attr,
+ &dev_attr_rank.attr,
+ &dev_attr_release.attr,
+ &dev_attr_res_id.attr,
+ NULL,
+};
+
+static struct attribute_group scmdev_attr_group = {
+ .attrs = scmdev_attrs,
+};
+
+static const struct attribute_group *scmdev_attr_groups[] = {
+ &scmdev_attr_group,
+ NULL,
+};
+
+static void scmdev_release(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ kfree(scmdev);
+}
+
+static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
+ unsigned int size, unsigned int max_blk_count)
+{
+ dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
+ scmdev->nr_max_block = max_blk_count;
+ scmdev->address = sale->sa;
+ scmdev->size = 1UL << size;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.persistence = sale->p;
+ scmdev->attrs.oper_state = sale->op_state;
+ scmdev->attrs.data_state = sale->data_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.release = sale->r;
+ scmdev->attrs.res_id = sale->rid;
+ scmdev->dev.parent = scm_root;
+ scmdev->dev.bus = &scm_bus_type;
+ scmdev->dev.release = scmdev_release;
+ scmdev->dev.groups = scmdev_attr_groups;
+}
+
+/*
+ * Check for state-changes, notify the driver and userspace.
+ */
+static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
+{
+ struct scm_driver *scmdrv;
+ bool changed;
+
+ device_lock(&scmdev->dev);
+ changed = scmdev->attrs.rank != sale->rank ||
+ scmdev->attrs.oper_state != sale->op_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.oper_state = sale->op_state;
+ if (!scmdev->dev.driver)
+ goto out;
+ scmdrv = to_scm_drv(scmdev->dev.driver);
+ if (changed && scmdrv->notify)
+ scmdrv->notify(scmdev);
+out:
+ device_unlock(&scmdev->dev);
+ if (changed)
+ kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int check_address(struct device *dev, void *data)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct sale *sale = data;
+
+ return scmdev->address == sale->sa;
+}
+
+static struct scm_device *scmdev_find(struct sale *sale)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
+
+ return dev ? to_scm_dev(dev) : NULL;
+}
+
+static int scm_add(struct chsc_scm_info *scm_info, size_t num)
+{
+ struct sale *sale, *scmal = scm_info->scmal;
+ struct scm_device *scmdev;
+ int ret;
+
+ for (sale = scmal; sale < scmal + num; sale++) {
+ scmdev = scmdev_find(sale);
+ if (scmdev) {
+ scmdev_update(scmdev, sale);
+ /* Release reference from scm_find(). */
+ put_device(&scmdev->dev);
+ continue;
+ }
+ scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
+ if (!scmdev)
+ return -ENODEV;
+ scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
+ ret = device_register(&scmdev->dev);
+ if (ret) {
+ /* Release reference from device_initialize(). */
+ put_device(&scmdev->dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int scm_update_information(void)
+{
+ struct chsc_scm_info *scm_info;
+ u64 token = 0;
+ size_t num;
+ int ret;
+
+ scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!scm_info)
+ return -ENOMEM;
+
+ do {
+ ret = chsc_scm_info(scm_info, token);
+ if (ret)
+ break;
+
+ num = (scm_info->response.length -
+ (offsetof(struct chsc_scm_info, scmal) -
+ offsetof(struct chsc_scm_info, response))
+ ) / sizeof(struct sale);
+
+ ret = scm_add(scm_info, num);
+ if (ret)
+ break;
+
+ token = scm_info->restok;
+ } while (token);
+
+ free_page((unsigned long)scm_info);
+
+ return ret;
+}
+
+static int __init scm_init(void)
+{
+ int ret;
+
+ ret = bus_register(&scm_bus_type);
+ if (ret)
+ return ret;
+
+ scm_root = root_device_register("scm");
+ if (IS_ERR(scm_root)) {
+ bus_unregister(&scm_bus_type);
+ return PTR_ERR(scm_root);
+ }
+
+ scm_update_information();
+ return 0;
+}
+subsys_initcall_sync(scm_init);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index af3c7f16ea88..771faf7094d6 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -4,4 +4,5 @@
ap-objs := ap_bus.o
obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o zcrypt_cex4.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ae258a4b4e5e..7b865a7300e6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -62,13 +62,14 @@ static void ap_interrupt_handler(void *unused1, void *unused2);
static void ap_reset(struct ap_device *ap_dev);
static void ap_config_timeout(unsigned long ptr);
static int ap_select_domain(void);
+static void ap_query_configuration(void);
/*
* Module description.
*/
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
- "Copyright IBM Corp. 2006");
+MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
+ "Copyright IBM Corp. 2006, 2012");
MODULE_LICENSE("GPL");
/*
@@ -84,6 +85,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
static struct device *ap_root_device = NULL;
+static struct ap_config_info *ap_configuration;
static DEFINE_SPINLOCK(ap_device_list_lock);
static LIST_HEAD(ap_device_list);
@@ -158,6 +160,19 @@ static int ap_interrupts_available(void)
}
/**
+ * ap_configuration_available(): Test if AP configuration
+ * information is available.
+ *
+ * Returns 1 if AP configuration information is available.
+ */
+#ifdef CONFIG_64BIT
+static int ap_configuration_available(void)
+{
+ return test_facility(2) && test_facility(12);
+}
+#endif
+
+/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @queue_depth: Pointer to queue depth value
@@ -242,6 +257,26 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions)
}
#endif
+#ifdef CONFIG_64BIT
+static inline int __ap_query_configuration(struct ap_config_info *config)
+{
+ register unsigned long reg0 asm ("0") = 0x04000000UL;
+ register unsigned long reg1 asm ("1") = -EINVAL;
+ register unsigned char *reg2 asm ("2") = (unsigned char *)config;
+
+ asm volatile(
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %1,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg0), "+d" (reg1), "+d" (reg2)
+ :
+ : "cc");
+
+ return reg1;
+}
+#endif
+
/**
* ap_query_functions(): Query supported functions.
* @qid: The AP queue number
@@ -292,25 +327,6 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
}
/**
- * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
- * support.
- * @qid: The AP queue number
- *
- * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
- */
-int ap_4096_commands_available(ap_qid_t qid)
-{
- unsigned int functions;
-
- if (ap_query_functions(qid, &functions))
- return 0;
-
- return test_ap_facility(functions, 1) &&
- test_ap_facility(functions, 2);
-}
-EXPORT_SYMBOL(ap_4096_commands_available);
-
-/**
* ap_queue_enable_interruption(): Enable interruption on an AP.
* @qid: The AP queue number
* @ind: the notification indicator byte
@@ -657,6 +673,34 @@ static ssize_t ap_request_count_show(struct device *dev,
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+static ssize_t ap_requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ int rc;
+
+ spin_lock_bh(&ap_dev->lock);
+ rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
+ spin_unlock_bh(&ap_dev->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ int rc;
+
+ spin_lock_bh(&ap_dev->lock);
+ rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
+ spin_unlock_bh(&ap_dev->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
static ssize_t ap_modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -665,11 +709,23 @@ static ssize_t ap_modalias_show(struct device *dev,
static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+static ssize_t ap_functions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
+}
+
+static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
+
static struct attribute *ap_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_depth.attr,
&dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
+ &dev_attr_ap_functions.attr,
NULL
};
static struct attribute_group ap_dev_attr_group = {
@@ -772,6 +828,7 @@ static int ap_bus_resume(struct device *dev)
ap_suspend_flag = 0;
if (!ap_interrupts_available())
ap_interrupt_indicator = NULL;
+ ap_query_configuration();
if (!user_set_domain) {
ap_domain_index = -1;
ap_select_domain();
@@ -895,6 +952,20 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
+void ap_bus_force_rescan(void)
+{
+ /* Delete the AP bus rescan timer. */
+ del_timer(&ap_config_timer);
+
+ /* processing a synchonuous bus rescan */
+ ap_scan_bus(NULL);
+
+ /* Setup the AP bus rescan timer again. */
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+}
+EXPORT_SYMBOL(ap_bus_force_rescan);
+
/*
* AP bus attributes.
*/
@@ -997,6 +1068,65 @@ static struct bus_attribute *const ap_bus_attrs[] = {
NULL,
};
+static inline int ap_test_config(unsigned int *field, unsigned int nr)
+{
+ if (nr > 0xFFu)
+ return 0;
+ return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
+}
+
+/*
+ * ap_test_config_card_id(): Test, whether an AP card ID is configured.
+ * @id AP card ID
+ *
+ * Returns 0 if the card is not configured
+ * 1 if the card is configured or
+ * if the configuration information is not available
+ */
+static inline int ap_test_config_card_id(unsigned int id)
+{
+ if (!ap_configuration)
+ return 1;
+ return ap_test_config(ap_configuration->apm, id);
+}
+
+/*
+ * ap_test_config_domain(): Test, whether an AP usage domain is configured.
+ * @domain AP usage domain ID
+ *
+ * Returns 0 if the usage domain is not configured
+ * 1 if the usage domain is configured or
+ * if the configuration information is not available
+ */
+static inline int ap_test_config_domain(unsigned int domain)
+{
+ if (!ap_configuration)
+ return 1;
+ return ap_test_config(ap_configuration->aqm, domain);
+}
+
+/**
+ * ap_query_configuration(): Query AP configuration information.
+ *
+ * Query information of installed cards and configured domains from AP.
+ */
+static void ap_query_configuration(void)
+{
+#ifdef CONFIG_64BIT
+ if (ap_configuration_available()) {
+ if (!ap_configuration)
+ ap_configuration =
+ kzalloc(sizeof(struct ap_config_info),
+ GFP_KERNEL);
+ if (ap_configuration)
+ __ap_query_configuration(ap_configuration);
+ } else
+ ap_configuration = NULL;
+#else
+ ap_configuration = NULL;
+#endif
+}
+
/**
* ap_select_domain(): Select an AP domain.
*
@@ -1005,6 +1135,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
static int ap_select_domain(void)
{
int queue_depth, device_type, count, max_count, best_domain;
+ ap_qid_t qid;
int rc, i, j;
/*
@@ -1018,9 +1149,13 @@ static int ap_select_domain(void)
best_domain = -1;
max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) {
+ if (!ap_test_config_domain(i))
+ continue;
count = 0;
for (j = 0; j < AP_DEVICES; j++) {
- ap_qid_t qid = AP_MKQID(j, i);
+ if (!ap_test_config_card_id(j))
+ continue;
+ qid = AP_MKQID(j, i);
rc = ap_query_queue(qid, &queue_depth, &device_type);
if (rc)
continue;
@@ -1169,6 +1304,7 @@ static void ap_scan_bus(struct work_struct *unused)
unsigned int device_functions;
int rc, i;
+ ap_query_configuration();
if (ap_select_domain() != 0)
return;
for (i = 0; i < AP_DEVICES; i++) {
@@ -1176,7 +1312,10 @@ static void ap_scan_bus(struct work_struct *unused)
dev = bus_find_device(&ap_bus_type, NULL,
(void *)(unsigned long)qid,
__ap_scan_bus);
- rc = ap_query_queue(qid, &queue_depth, &device_type);
+ if (ap_test_config_card_id(i))
+ rc = ap_query_queue(qid, &queue_depth, &device_type);
+ else
+ rc = -ENODEV;
if (dev) {
if (rc == -EBUSY) {
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1217,29 +1356,22 @@ static void ap_scan_bus(struct work_struct *unused)
(unsigned long) ap_dev);
switch (device_type) {
case 0:
+ /* device type probing for old cards */
if (ap_probe_device_type(ap_dev)) {
kfree(ap_dev);
continue;
}
break;
- case 10:
- if (ap_query_functions(qid, &device_functions)) {
- kfree(ap_dev);
- continue;
- }
- if (test_ap_facility(device_functions, 3))
- ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
- else if (test_ap_facility(device_functions, 4))
- ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
- else {
- kfree(ap_dev);
- continue;
- }
- break;
default:
ap_dev->device_type = device_type;
}
+ rc = ap_query_functions(qid, &device_functions);
+ if (!rc)
+ ap_dev->functions = device_functions;
+ else
+ ap_dev->functions = 0u;
+
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
if (dev_set_name(&ap_dev->device, "card%02x",
@@ -1785,6 +1917,7 @@ int __init ap_module_init(void)
goto out_root;
}
+ ap_query_configuration();
if (ap_select_domain() == 0)
ap_scan_bus(NULL);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 52d61995af88..685f6cc022f9 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -83,13 +83,12 @@ int ap_queue_status_invalid_test(struct ap_queue_status *status)
return !(memcmp(status, &invalid, sizeof(struct ap_queue_status)));
}
-#define MAX_AP_FACILITY 31
-
-static inline int test_ap_facility(unsigned int function, unsigned int nr)
+#define AP_MAX_BITS 31
+static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
{
- if (nr > MAX_AP_FACILITY)
+ if (nr > AP_MAX_BITS)
return 0;
- return function & (unsigned int)(0x80000000 >> nr);
+ return (*ptr & (0x80000000u >> nr)) != 0;
}
#define AP_RESPONSE_NORMAL 0x00
@@ -117,6 +116,15 @@ static inline int test_ap_facility(unsigned int function, unsigned int nr)
#define AP_DEVICE_TYPE_CEX2C 7
#define AP_DEVICE_TYPE_CEX3A 8
#define AP_DEVICE_TYPE_CEX3C 9
+#define AP_DEVICE_TYPE_CEX4 10
+
+/*
+ * Known function facilities
+ */
+#define AP_FUNC_MEX4K 1
+#define AP_FUNC_CRT4K 2
+#define AP_FUNC_COPRO 3
+#define AP_FUNC_ACCEL 4
/*
* AP reset flag states
@@ -151,6 +159,7 @@ struct ap_device {
ap_qid_t qid; /* AP queue id. */
int queue_depth; /* AP queue depth.*/
int device_type; /* AP device type. */
+ unsigned int functions; /* AP device function bitfield. */
int unregistered; /* marks AP device as unregistered */
struct timer_list timeout; /* Timer for request timeouts. */
int reset; /* Reset required after req. timeout. */
@@ -183,6 +192,17 @@ struct ap_message {
struct ap_message *);
};
+struct ap_config_info {
+ unsigned int special_command:1;
+ unsigned int ap_extended:1;
+ unsigned char reserved1:6;
+ unsigned char reserved2[15];
+ unsigned int apm[8]; /* AP ID mask */
+ unsigned int aqm[8]; /* AP queue mask */
+ unsigned int adm[8]; /* AP domain mask */
+ unsigned char reserved4[16];
+} __packed;
+
#define AP_DEVICE(dt) \
.dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
@@ -211,10 +231,9 @@ int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_device *ap_dev);
+void ap_bus_force_rescan(void);
int ap_module_init(void);
void ap_module_exit(void);
-int ap_4096_commands_available(ap_qid_t qid);
-
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 2f94132246a1..31cfaa556072 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,7 +1,7 @@
/*
* zcrypt 2.1.0
*
- * Copyright IBM Corp. 2001, 2006
+ * Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -9,6 +9,7 @@
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,25 +38,39 @@
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+#include "zcrypt_debug.h"
#include "zcrypt_api.h"
/*
* Module description.
*/
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
- "Copyright IBM Corp. 2001, 2006");
+MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
+ "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
static DEFINE_SPINLOCK(zcrypt_device_lock);
static LIST_HEAD(zcrypt_device_list);
static int zcrypt_device_count = 0;
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
+static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
+
+atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
+EXPORT_SYMBOL(zcrypt_rescan_req);
static int zcrypt_rng_device_add(void);
static void zcrypt_rng_device_remove(void);
+static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
+static LIST_HEAD(zcrypt_ops_list);
+
+static debug_info_t *zcrypt_dbf_common;
+static debug_info_t *zcrypt_dbf_devices;
+static struct dentry *debugfs_root;
+
/*
* Device attributes common for all crypto devices.
*/
@@ -85,6 +100,8 @@ static ssize_t zcrypt_online_store(struct device *dev,
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
zdev->online = online;
+ ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
+ zdev->online);
if (!online)
ap_flush_queue(zdev->ap_dev);
return count;
@@ -103,6 +120,24 @@ static struct attribute_group zcrypt_device_attr_group = {
};
/**
+ * Process a rescan of the transport layer.
+ *
+ * Returns 1, if the rescan has been processed, otherwise 0.
+ */
+static inline int zcrypt_process_rescan(void)
+{
+ if (atomic_read(&zcrypt_rescan_req)) {
+ atomic_set(&zcrypt_rescan_req, 0);
+ atomic_inc(&zcrypt_rescan_count);
+ ap_bus_force_rescan();
+ ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
+ atomic_inc_return(&zcrypt_rescan_count));
+ return 1;
+ }
+ return 0;
+}
+
+/**
* __zcrypt_increase_preference(): Increase preference of a crypto device.
* @zdev: Pointer the crypto device
*
@@ -190,6 +225,7 @@ struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
zdev->reply.length = max_response_size;
spin_lock_init(&zdev->lock);
INIT_LIST_HEAD(&zdev->list);
+ zdev->dbf_area = zcrypt_dbf_devices;
return zdev;
out_free:
@@ -215,6 +251,8 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
{
int rc;
+ if (!zdev->ops)
+ return -ENODEV;
rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
&zcrypt_device_attr_group);
if (rc)
@@ -223,6 +261,8 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
kref_init(&zdev->refcount);
spin_lock_bh(&zcrypt_device_lock);
zdev->online = 1; /* New devices are online by default. */
+ ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
+ zdev->online);
list_add_tail(&zdev->list, &zcrypt_device_list);
__zcrypt_increase_preference(zdev);
zcrypt_device_count++;
@@ -269,6 +309,67 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
}
EXPORT_SYMBOL(zcrypt_device_unregister);
+void zcrypt_msgtype_register(struct zcrypt_ops *zops)
+{
+ if (zops->owner) {
+ spin_lock_bh(&zcrypt_ops_list_lock);
+ list_add_tail(&zops->list, &zcrypt_ops_list);
+ spin_unlock_bh(&zcrypt_ops_list_lock);
+ }
+}
+EXPORT_SYMBOL(zcrypt_msgtype_register);
+
+void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
+{
+ spin_lock_bh(&zcrypt_ops_list_lock);
+ list_del_init(&zops->list);
+ spin_unlock_bh(&zcrypt_ops_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_msgtype_unregister);
+
+static inline
+struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
+{
+ struct zcrypt_ops *zops;
+ int found = 0;
+
+ spin_lock_bh(&zcrypt_ops_list_lock);
+ list_for_each_entry(zops, &zcrypt_ops_list, list) {
+ if ((zops->variant == variant) &&
+ (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&zcrypt_ops_list_lock);
+
+ if (!found)
+ return NULL;
+ return zops;
+}
+
+struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
+{
+ struct zcrypt_ops *zops = NULL;
+
+ zops = __ops_lookup(name, variant);
+ if (!zops) {
+ request_module(name);
+ zops = __ops_lookup(name, variant);
+ }
+ if ((!zops) || (!try_module_get(zops->owner)))
+ return NULL;
+ return zops;
+}
+EXPORT_SYMBOL(zcrypt_msgtype_request);
+
+void zcrypt_msgtype_release(struct zcrypt_ops *zops)
+{
+ if (zops)
+ module_put(zops->owner);
+}
+EXPORT_SYMBOL(zcrypt_msgtype_release);
+
/**
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
@@ -640,6 +741,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
do {
rc = zcrypt_rsa_modexpo(&mex);
} while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_modexpo(&mex);
+ } while (rc == -EAGAIN);
if (rc)
return rc;
return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -652,6 +758,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
do {
rc = zcrypt_rsa_crt(&crt);
} while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_crt(&crt);
+ } while (rc == -EAGAIN);
if (rc)
return rc;
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -664,6 +775,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
do {
rc = zcrypt_send_cprb(&xcRB);
} while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_send_cprb(&xcRB);
+ } while (rc == -EAGAIN);
if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
return -EFAULT;
return rc;
@@ -770,10 +886,15 @@ static long trans_modexpo32(struct file *filp, unsigned int cmd,
do {
rc = zcrypt_rsa_modexpo(&mex64);
} while (rc == -EAGAIN);
- if (!rc)
- rc = put_user(mex64.outputdatalength,
- &umex32->outputdatalength);
- return rc;
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_modexpo(&mex64);
+ } while (rc == -EAGAIN);
+ if (rc)
+ return rc;
+ return put_user(mex64.outputdatalength,
+ &umex32->outputdatalength);
}
struct compat_ica_rsa_modexpo_crt {
@@ -810,10 +931,15 @@ static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
do {
rc = zcrypt_rsa_crt(&crt64);
} while (rc == -EAGAIN);
- if (!rc)
- rc = put_user(crt64.outputdatalength,
- &ucrt32->outputdatalength);
- return rc;
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_crt(&crt64);
+ } while (rc == -EAGAIN);
+ if (rc)
+ return rc;
+ return put_user(crt64.outputdatalength,
+ &ucrt32->outputdatalength);
}
struct compat_ica_xcRB {
@@ -869,6 +995,11 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
do {
rc = zcrypt_send_cprb(&xcRB64);
} while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_send_cprb(&xcRB64);
+ } while (rc == -EAGAIN);
xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
xcRB32.reply_data_length = xcRB64.reply_data_length;
xcRB32.status = xcRB64.status;
@@ -1126,6 +1257,9 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ rc = zcrypt_rng((char *) zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
zcrypt_rng_buffer_index = rc / sizeof *data;
@@ -1178,6 +1312,30 @@ static void zcrypt_rng_device_remove(void)
mutex_unlock(&zcrypt_rng_mutex);
}
+int __init zcrypt_debug_init(void)
+{
+ debugfs_root = debugfs_create_dir("zcrypt", NULL);
+
+ zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
+ debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
+ debug_set_level(zcrypt_dbf_common, DBF_ERR);
+
+ zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
+ debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
+ debug_set_level(zcrypt_dbf_devices, DBF_ERR);
+
+ return 0;
+}
+
+void zcrypt_debug_exit(void)
+{
+ debugfs_remove(debugfs_root);
+ if (zcrypt_dbf_common)
+ debug_unregister(zcrypt_dbf_common);
+ if (zcrypt_dbf_devices)
+ debug_unregister(zcrypt_dbf_devices);
+}
+
/**
* zcrypt_api_init(): Module initialization.
*
@@ -1187,6 +1345,12 @@ int __init zcrypt_api_init(void)
{
int rc;
+ rc = zcrypt_debug_init();
+ if (rc)
+ goto out;
+
+ atomic_set(&zcrypt_rescan_req, 0);
+
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0)
@@ -1216,6 +1380,7 @@ void zcrypt_api_exit(void)
{
remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device);
+ zcrypt_debug_exit();
}
module_init(zcrypt_api_init);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 7a32c4bc8ef9..89632919c993 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,7 +1,7 @@
/*
* zcrypt 2.1.0
*
- * Copyright IBM Corp. 2001, 2006
+ * Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -9,6 +9,7 @@
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,8 +29,10 @@
#ifndef _ZCRYPT_API_H_
#define _ZCRYPT_API_H_
-#include "ap_bus.h"
+#include <linux/atomic.h>
+#include <asm/debug.h>
#include <asm/zcrypt.h>
+#include "ap_bus.h"
/* deprecated status calls */
#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
@@ -87,6 +90,9 @@ struct zcrypt_ops {
struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
long (*rng)(struct zcrypt_device *, char *);
+ struct list_head list; /* zcrypt ops list. */
+ struct module *owner;
+ int variant;
};
struct zcrypt_device {
@@ -108,14 +114,23 @@ struct zcrypt_device {
struct ap_message reply; /* Per-device reply structure. */
int max_exp_bit_length;
+
+ debug_info_t *dbf_area; /* debugging */
};
+/* transport layer rescanning */
+extern atomic_t zcrypt_rescan_req;
+
struct zcrypt_device *zcrypt_device_alloc(size_t);
void zcrypt_device_free(struct zcrypt_device *);
void zcrypt_device_get(struct zcrypt_device *);
int zcrypt_device_put(struct zcrypt_device *);
int zcrypt_device_register(struct zcrypt_device *);
void zcrypt_device_unregister(struct zcrypt_device *);
+void zcrypt_msgtype_register(struct zcrypt_ops *);
+void zcrypt_msgtype_unregister(struct zcrypt_ops *);
+struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
+void zcrypt_msgtype_release(struct zcrypt_ops *);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 744c668f586c..1e849d6e1dfe 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,13 +1,14 @@
/*
* zcrypt 2.1.0
*
- * Copyright IBM Corp. 2001, 2006
+ * Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -35,6 +36,7 @@
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_cex2a.h"
+#include "zcrypt_msgtype50.h"
#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
@@ -63,14 +65,12 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
- "Copyright IBM Corp. 2001, 2006");
+MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
+ "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
-static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
- struct ap_message *);
static struct ap_driver zcrypt_cex2a_driver = {
.probe = zcrypt_cex2a_probe,
@@ -80,344 +80,6 @@ static struct ap_driver zcrypt_cex2a_driver = {
};
/**
- * Convert a ICAMEX message to a type50 MEX message.
- *
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
- * @mex: pointer to user input data
- *
- * Returns 0 on success or -EFAULT.
- */
-static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_rsa_modexpo *mex)
-{
- unsigned char *mod, *exp, *inp;
- int mod_len;
-
- mod_len = mex->inputdatalength;
-
- if (mod_len <= 128) {
- struct type50_meb1_msg *meb1 = ap_msg->message;
- memset(meb1, 0, sizeof(*meb1));
- ap_msg->length = sizeof(*meb1);
- meb1->header.msg_type_code = TYPE50_TYPE_CODE;
- meb1->header.msg_len = sizeof(*meb1);
- meb1->keyblock_type = TYPE50_MEB1_FMT;
- mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
- exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
- inp = meb1->message + sizeof(meb1->message) - mod_len;
- } else if (mod_len <= 256) {
- struct type50_meb2_msg *meb2 = ap_msg->message;
- memset(meb2, 0, sizeof(*meb2));
- ap_msg->length = sizeof(*meb2);
- meb2->header.msg_type_code = TYPE50_TYPE_CODE;
- meb2->header.msg_len = sizeof(*meb2);
- meb2->keyblock_type = TYPE50_MEB2_FMT;
- mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
- exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
- inp = meb2->message + sizeof(meb2->message) - mod_len;
- } else {
- /* mod_len > 256 = 4096 bit RSA Key */
- struct type50_meb3_msg *meb3 = ap_msg->message;
- memset(meb3, 0, sizeof(*meb3));
- ap_msg->length = sizeof(*meb3);
- meb3->header.msg_type_code = TYPE50_TYPE_CODE;
- meb3->header.msg_len = sizeof(*meb3);
- meb3->keyblock_type = TYPE50_MEB3_FMT;
- mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
- exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
- inp = meb3->message + sizeof(meb3->message) - mod_len;
- }
-
- if (copy_from_user(mod, mex->n_modulus, mod_len) ||
- copy_from_user(exp, mex->b_key, mod_len) ||
- copy_from_user(inp, mex->inputdata, mod_len))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Convert a ICACRT message to a type50 CRT message.
- *
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
- * @crt: pointer to user input data
- *
- * Returns 0 on success or -EFAULT.
- */
-static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_rsa_modexpo_crt *crt)
-{
- int mod_len, short_len, long_len, long_offset, limit;
- unsigned char *p, *q, *dp, *dq, *u, *inp;
-
- mod_len = crt->inputdatalength;
- short_len = mod_len / 2;
- long_len = mod_len / 2 + 8;
-
- /*
- * CEX2A cannot handle p, dp, or U > 128 bytes.
- * If we have one of these, we need to do extra checking.
- * For CEX3A the limit is 256 bytes.
- */
- if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
- limit = 256;
- else
- limit = 128;
-
- if (long_len > limit) {
- /*
- * zcrypt_rsa_crt already checked for the leading
- * zeroes of np_prime, bp_key and u_mult_inc.
- */
- long_offset = long_len - limit;
- long_len = limit;
- } else
- long_offset = 0;
-
- /*
- * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
- * the larger message structure.
- */
- if (long_len <= 64) {
- struct type50_crb1_msg *crb1 = ap_msg->message;
- memset(crb1, 0, sizeof(*crb1));
- ap_msg->length = sizeof(*crb1);
- crb1->header.msg_type_code = TYPE50_TYPE_CODE;
- crb1->header.msg_len = sizeof(*crb1);
- crb1->keyblock_type = TYPE50_CRB1_FMT;
- p = crb1->p + sizeof(crb1->p) - long_len;
- q = crb1->q + sizeof(crb1->q) - short_len;
- dp = crb1->dp + sizeof(crb1->dp) - long_len;
- dq = crb1->dq + sizeof(crb1->dq) - short_len;
- u = crb1->u + sizeof(crb1->u) - long_len;
- inp = crb1->message + sizeof(crb1->message) - mod_len;
- } else if (long_len <= 128) {
- struct type50_crb2_msg *crb2 = ap_msg->message;
- memset(crb2, 0, sizeof(*crb2));
- ap_msg->length = sizeof(*crb2);
- crb2->header.msg_type_code = TYPE50_TYPE_CODE;
- crb2->header.msg_len = sizeof(*crb2);
- crb2->keyblock_type = TYPE50_CRB2_FMT;
- p = crb2->p + sizeof(crb2->p) - long_len;
- q = crb2->q + sizeof(crb2->q) - short_len;
- dp = crb2->dp + sizeof(crb2->dp) - long_len;
- dq = crb2->dq + sizeof(crb2->dq) - short_len;
- u = crb2->u + sizeof(crb2->u) - long_len;
- inp = crb2->message + sizeof(crb2->message) - mod_len;
- } else {
- /* long_len >= 256 */
- struct type50_crb3_msg *crb3 = ap_msg->message;
- memset(crb3, 0, sizeof(*crb3));
- ap_msg->length = sizeof(*crb3);
- crb3->header.msg_type_code = TYPE50_TYPE_CODE;
- crb3->header.msg_len = sizeof(*crb3);
- crb3->keyblock_type = TYPE50_CRB3_FMT;
- p = crb3->p + sizeof(crb3->p) - long_len;
- q = crb3->q + sizeof(crb3->q) - short_len;
- dp = crb3->dp + sizeof(crb3->dp) - long_len;
- dq = crb3->dq + sizeof(crb3->dq) - short_len;
- u = crb3->u + sizeof(crb3->u) - long_len;
- inp = crb3->message + sizeof(crb3->message) - mod_len;
- }
-
- if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
- copy_from_user(q, crt->nq_prime, short_len) ||
- copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
- copy_from_user(dq, crt->bq_key, short_len) ||
- copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
- copy_from_user(inp, crt->inputdata, mod_len))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * Copy results from a type 80 reply message back to user space.
- *
- * @zdev: crypto device pointer
- * @reply: reply AP message.
- * @data: pointer to user output data
- * @length: size of user output data
- *
- * Returns 0 on success or -EFAULT.
- */
-static int convert_type80(struct zcrypt_device *zdev,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
-{
- struct type80_hdr *t80h = reply->message;
- unsigned char *data;
-
- if (t80h->len < sizeof(*t80h) + outputdatalength) {
- /* The result is too short, the CEX2A card may not do that.. */
- zdev->online = 0;
- return -EAGAIN; /* repeat the request on a different device. */
- }
- if (zdev->user_space_type == ZCRYPT_CEX2A)
- BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
- else
- BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
- data = reply->message + t80h->len - outputdatalength;
- if (copy_to_user(outputdata, data, outputdatalength))
- return -EFAULT;
- return 0;
-}
-
-static int convert_response(struct zcrypt_device *zdev,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
-{
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
- case TYPE82_RSP_CODE:
- case TYPE88_RSP_CODE:
- return convert_error(zdev, reply);
- case TYPE80_RSP_CODE:
- return convert_type80(zdev, reply,
- outputdata, outputdatalength);
- default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- return -EAGAIN; /* repeat the request on a different device. */
- }
-}
-
-/**
- * This function is called from the AP bus code after a crypto request
- * "msg" has finished with the reply message "reply".
- * It is called from tasklet context.
- * @ap_dev: pointer to the AP device
- * @msg: pointer to the AP message
- * @reply: pointer to the AP reply message
- */
-static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
- struct ap_message *msg,
- struct ap_message *reply)
-{
- static struct error_hdr error_reply = {
- .type = TYPE82_RSP_CODE,
- .reply_code = REP82_ERROR_MACHINE_FAILURE,
- };
- struct type80_hdr *t80h;
- int length;
-
- /* Copy the reply message to the request message buffer. */
- if (IS_ERR(reply)) {
- memcpy(msg->message, &error_reply, sizeof(error_reply));
- goto out;
- }
- t80h = reply->message;
- if (t80h->type == TYPE80_RSP_CODE) {
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
- length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
- else
- length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
- memcpy(msg->message, reply->message, length);
- } else
- memcpy(msg->message, reply->message, sizeof error_reply);
-out:
- complete((struct completion *) msg->private);
-}
-
-static atomic_t zcrypt_step = ATOMIC_INIT(0);
-
-/**
- * The request distributor calls this function if it picked the CEX2A
- * device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
- * CEX2A device to the request distributor
- * @mex: pointer to the modexpo request buffer
- */
-static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
- struct ica_rsa_modexpo *mex)
-{
- struct ap_message ap_msg;
- struct completion work;
- int rc;
-
- ap_init_message(&ap_msg);
- if (zdev->user_space_type == ZCRYPT_CEX2A)
- ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
- else
- ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_cex2a_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &work;
- rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
- if (rc)
- goto out_free;
- init_completion(&work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&work);
- if (rc == 0)
- rc = convert_response(zdev, &ap_msg, mex->outputdata,
- mex->outputdatalength);
- else
- /* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- kfree(ap_msg.message);
- return rc;
-}
-
-/**
- * The request distributor calls this function if it picked the CEX2A
- * device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
- * CEX2A device to the request distributor
- * @crt: pointer to the modexpoc_crt request buffer
- */
-static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
- struct ica_rsa_modexpo_crt *crt)
-{
- struct ap_message ap_msg;
- struct completion work;
- int rc;
-
- ap_init_message(&ap_msg);
- if (zdev->user_space_type == ZCRYPT_CEX2A)
- ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
- else
- ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_cex2a_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &work;
- rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
- if (rc)
- goto out_free;
- init_completion(&work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&work);
- if (rc == 0)
- rc = convert_response(zdev, &ap_msg, crt->outputdata,
- crt->outputdatalength);
- else
- /* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- kfree(ap_msg.message);
- return rc;
-}
-
-/**
- * The crypto operations for a CEX2A card.
- */
-static struct zcrypt_ops zcrypt_cex2a_ops = {
- .rsa_modexpo = zcrypt_cex2a_modexpo,
- .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
-};
-
-/**
* Probe function for CEX2A cards. It always accepts the AP device
* since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP device.
@@ -449,7 +111,8 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- if (ap_4096_commands_available(ap_dev->qid)) {
+ if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
}
@@ -457,16 +120,18 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
zdev->speed_rating = CEX3A_SPEED_RATING;
break;
}
- if (zdev != NULL) {
- zdev->ap_dev = ap_dev;
- zdev->ops = &zcrypt_cex2a_ops;
- zdev->online = 1;
- ap_dev->reply = &zdev->reply;
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
- }
+ if (!zdev)
+ return -ENODEV;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
+ MSGTYPE50_VARIANT_DEFAULT);
+ zdev->ap_dev = ap_dev;
+ zdev->online = 1;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
if (rc) {
ap_dev->private = NULL;
+ zcrypt_msgtype_release(zdev->ops);
zcrypt_device_free(zdev);
}
return rc;
@@ -479,8 +144,10 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev = ap_dev->private;
+ struct zcrypt_ops *zops = zdev->ops;
zcrypt_device_unregister(zdev);
+ zcrypt_msgtype_release(zops);
}
int __init zcrypt_cex2a_init(void)
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
new file mode 100644
index 000000000000..ce1226398ac9
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright IBM Corp. 2012
+ * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex4.h"
+
+#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
+#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
+#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */
+
+#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
+#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
+
+#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
+#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
+
+#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
+#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
+
+#define CEX4_CLEANUP_TIME (15*HZ)
+
+static struct ap_device_id zcrypt_cex4_ids[] = {
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
+ "Copyright IBM Corp. 2012");
+MODULE_LICENSE("GPL");
+
+static int zcrypt_cex4_probe(struct ap_device *ap_dev);
+static void zcrypt_cex4_remove(struct ap_device *ap_dev);
+
+static struct ap_driver zcrypt_cex4_driver = {
+ .probe = zcrypt_cex4_probe,
+ .remove = zcrypt_cex4_remove,
+ .ids = zcrypt_cex4_ids,
+ .request_timeout = CEX4_CLEANUP_TIME,
+};
+
+/**
+ * Probe function for CEX4 cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex4_probe(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev = NULL;
+ int rc = 0;
+
+ switch (ap_dev->device_type) {
+ case AP_DEVICE_TYPE_CEX4:
+ if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
+ zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->type_string = "CEX4A";
+ zdev->user_space_type = ZCRYPT_CEX3A;
+ zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
+ if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
+ zdev->max_mod_size =
+ CEX4A_MAX_MOD_SIZE_4K;
+ zdev->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_4K;
+ } else {
+ zdev->max_mod_size =
+ CEX4A_MAX_MOD_SIZE_2K;
+ zdev->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_2K;
+ }
+ zdev->short_crt = 1;
+ zdev->speed_rating = CEX4A_SPEED_RATING;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
+ MSGTYPE50_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
+ zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->type_string = "CEX4C";
+ zdev->user_space_type = ZCRYPT_CEX3C;
+ zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ zdev->short_crt = 0;
+ zdev->speed_rating = CEX4C_SPEED_RATING;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
+ }
+ break;
+ }
+ if (!zdev)
+ return -ENODEV;
+ zdev->ap_dev = ap_dev;
+ zdev->online = 1;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ if (rc) {
+ zcrypt_msgtype_release(zdev->ops);
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ }
+ return rc;
+}
+
+/**
+ * This is called to remove the extended CEX4 driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_cex4_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev = ap_dev->private;
+ struct zcrypt_ops *zops;
+
+ if (zdev) {
+ zops = zdev->ops;
+ zcrypt_device_unregister(zdev);
+ zcrypt_msgtype_release(zops);
+ }
+}
+
+int __init zcrypt_cex4_init(void)
+{
+ return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4");
+}
+
+void __exit zcrypt_cex4_exit(void)
+{
+ ap_driver_unregister(&zcrypt_cex4_driver);
+}
+
+module_init(zcrypt_cex4_init);
+module_exit(zcrypt_cex4_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h
new file mode 100644
index 000000000000..719571375ccc
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright IBM Corp. 2012
+ * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CEX4_H_
+#define _ZCRYPT_CEX4_H_
+
+int zcrypt_cex4_init(void);
+void zcrypt_cex4_exit(void);
+
+#endif /* _ZCRYPT_CEX4_H_ */
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
new file mode 100644
index 000000000000..841ea72e4a4e
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright IBM Corp. 2012
+ * Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
+ */
+#ifndef ZCRYPT_DEBUG_H
+#define ZCRYPT_DEBUG_H
+
+#include <asm/debug.h>
+#include "zcrypt_api.h"
+
+/* that gives us 15 characters in the text event views */
+#define ZCRYPT_DBF_LEN 16
+
+/* sort out low debug levels early to avoid wasted sprints */
+static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+ return (level <= dbf_grp->level);
+}
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 6 /* informational */
+
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define ZCRYPT_DBF_COMMON(level, text...) \
+ do { \
+ if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
+ char debug_buffer[ZCRYPT_DBF_LEN]; \
+ snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
+ debug_text_event(zcrypt_dbf_common, level, \
+ debug_buffer); \
+ } \
+ } while (0)
+
+#define ZCRYPT_DBF_DEVICES(level, text...) \
+ do { \
+ if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
+ char debug_buffer[ZCRYPT_DBF_LEN]; \
+ snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
+ debug_text_event(zcrypt_dbf_devices, level, \
+ debug_buffer); \
+ } \
+ } while (0)
+
+#define ZCRYPT_DBF_DEV(level, device, text...) \
+ do { \
+ if (zcrypt_dbf_passes(device->dbf_area, level)) { \
+ char debug_buffer[ZCRYPT_DBF_LEN]; \
+ snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
+ debug_text_event(device->dbf_area, level, \
+ debug_buffer); \
+ } \
+ } while (0)
+
+int zcrypt_debug_init(void);
+void zcrypt_debug_exit(void);
+
+#endif /* ZCRYPT_DEBUG_H */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 0965e2626d18..0079b6617211 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -26,6 +26,8 @@
#ifndef _ZCRYPT_ERROR_H_
#define _ZCRYPT_ERROR_H_
+#include <linux/atomic.h>
+#include "zcrypt_debug.h"
#include "zcrypt_api.h"
/**
@@ -108,16 +110,27 @@ static inline int convert_error(struct zcrypt_device *zdev,
* and then repeat the request.
*/
WARN_ON(1);
+ atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid,
+ zdev->online, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
// REP88_ERROR_MODULE_FAILURE // '10' CEX2A
/* If a card fails disable it and repeat the request. */
+ atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid,
+ zdev->online, ehdr->reply_code);
return -EAGAIN;
default:
zdev->online = 0;
+ ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
+ zdev->ap_dev->qid,
+ zdev->online, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
new file mode 100644
index 000000000000..035b6dc31b71
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -0,0 +1,531 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype50.h"
+
+#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
+
+#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+
+#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
+ * (max outputdatalength) +
+ * type80_hdr*/
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
+ "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+
+/**
+ * The type 50 message family is associated with a CEX2A card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+ unsigned char reserved1;
+ unsigned char msg_type_code; /* 0x50 */
+ unsigned short msg_len;
+ unsigned char reserved2;
+ unsigned char ignored;
+ unsigned short reserved3;
+} __packed;
+
+#define TYPE50_TYPE_CODE 0x50
+
+#define TYPE50_MEB1_FMT 0x0001
+#define TYPE50_MEB2_FMT 0x0002
+#define TYPE50_MEB3_FMT 0x0003
+#define TYPE50_CRB1_FMT 0x0011
+#define TYPE50_CRB2_FMT 0x0012
+#define TYPE50_CRB3_FMT 0x0013
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0001 */
+ unsigned char reserved[6];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+ unsigned char message[128];
+} __packed;
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0002 */
+ unsigned char reserved[6];
+ unsigned char exponent[256];
+ unsigned char modulus[256];
+ unsigned char message[256];
+} __packed;
+
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0003 */
+ unsigned char reserved[6];
+ unsigned char exponent[512];
+ unsigned char modulus[512];
+ unsigned char message[512];
+} __packed;
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0011 */
+ unsigned char reserved[6];
+ unsigned char p[64];
+ unsigned char q[64];
+ unsigned char dp[64];
+ unsigned char dq[64];
+ unsigned char u[64];
+ unsigned char message[128];
+} __packed;
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0012 */
+ unsigned char reserved[6];
+ unsigned char p[128];
+ unsigned char q[128];
+ unsigned char dp[128];
+ unsigned char dq[128];
+ unsigned char u[128];
+ unsigned char message[256];
+} __packed;
+
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0013 */
+ unsigned char reserved[6];
+ unsigned char p[256];
+ unsigned char q[256];
+ unsigned char dp[256];
+ unsigned char dq[256];
+ unsigned char u[256];
+ unsigned char message[512];
+} __packed;
+
+/**
+ * The type 80 response family is associated with a CEX2A card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+ unsigned char reserved1;
+ unsigned char type; /* 0x80 */
+ unsigned short len;
+ unsigned char code; /* 0x00 */
+ unsigned char reserved2[3];
+ unsigned char reserved3[8];
+} __packed;
+
+/**
+ * Convert a ICAMEX message to a type50 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ unsigned char *mod, *exp, *inp;
+ int mod_len;
+
+ mod_len = mex->inputdatalength;
+
+ if (mod_len <= 128) {
+ struct type50_meb1_msg *meb1 = ap_msg->message;
+ memset(meb1, 0, sizeof(*meb1));
+ ap_msg->length = sizeof(*meb1);
+ meb1->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb1->header.msg_len = sizeof(*meb1);
+ meb1->keyblock_type = TYPE50_MEB1_FMT;
+ mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
+ exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
+ inp = meb1->message + sizeof(meb1->message) - mod_len;
+ } else if (mod_len <= 256) {
+ struct type50_meb2_msg *meb2 = ap_msg->message;
+ memset(meb2, 0, sizeof(*meb2));
+ ap_msg->length = sizeof(*meb2);
+ meb2->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb2->header.msg_len = sizeof(*meb2);
+ meb2->keyblock_type = TYPE50_MEB2_FMT;
+ mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
+ exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
+ inp = meb2->message + sizeof(meb2->message) - mod_len;
+ } else {
+ /* mod_len > 256 = 4096 bit RSA Key */
+ struct type50_meb3_msg *meb3 = ap_msg->message;
+ memset(meb3, 0, sizeof(*meb3));
+ ap_msg->length = sizeof(*meb3);
+ meb3->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb3->header.msg_len = sizeof(*meb3);
+ meb3->keyblock_type = TYPE50_MEB3_FMT;
+ mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
+ exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
+ inp = meb3->message + sizeof(meb3->message) - mod_len;
+ }
+
+ if (copy_from_user(mod, mex->n_modulus, mod_len) ||
+ copy_from_user(exp, mex->b_key, mod_len) ||
+ copy_from_user(inp, mex->inputdata, mod_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type50 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ int mod_len, short_len, long_len, long_offset, limit;
+ unsigned char *p, *q, *dp, *dq, *u, *inp;
+
+ mod_len = crt->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = mod_len / 2 + 8;
+
+ /*
+ * CEX2A cannot handle p, dp, or U > 128 bytes.
+ * If we have one of these, we need to do extra checking.
+ * For CEX3A the limit is 256 bytes.
+ */
+ if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
+ limit = 256;
+ else
+ limit = 128;
+
+ if (long_len > limit) {
+ /*
+ * zcrypt_rsa_crt already checked for the leading
+ * zeroes of np_prime, bp_key and u_mult_inc.
+ */
+ long_offset = long_len - limit;
+ long_len = limit;
+ } else
+ long_offset = 0;
+
+ /*
+ * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
+ * the larger message structure.
+ */
+ if (long_len <= 64) {
+ struct type50_crb1_msg *crb1 = ap_msg->message;
+ memset(crb1, 0, sizeof(*crb1));
+ ap_msg->length = sizeof(*crb1);
+ crb1->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb1->header.msg_len = sizeof(*crb1);
+ crb1->keyblock_type = TYPE50_CRB1_FMT;
+ p = crb1->p + sizeof(crb1->p) - long_len;
+ q = crb1->q + sizeof(crb1->q) - short_len;
+ dp = crb1->dp + sizeof(crb1->dp) - long_len;
+ dq = crb1->dq + sizeof(crb1->dq) - short_len;
+ u = crb1->u + sizeof(crb1->u) - long_len;
+ inp = crb1->message + sizeof(crb1->message) - mod_len;
+ } else if (long_len <= 128) {
+ struct type50_crb2_msg *crb2 = ap_msg->message;
+ memset(crb2, 0, sizeof(*crb2));
+ ap_msg->length = sizeof(*crb2);
+ crb2->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb2->header.msg_len = sizeof(*crb2);
+ crb2->keyblock_type = TYPE50_CRB2_FMT;
+ p = crb2->p + sizeof(crb2->p) - long_len;
+ q = crb2->q + sizeof(crb2->q) - short_len;
+ dp = crb2->dp + sizeof(crb2->dp) - long_len;
+ dq = crb2->dq + sizeof(crb2->dq) - short_len;
+ u = crb2->u + sizeof(crb2->u) - long_len;
+ inp = crb2->message + sizeof(crb2->message) - mod_len;
+ } else {
+ /* long_len >= 256 */
+ struct type50_crb3_msg *crb3 = ap_msg->message;
+ memset(crb3, 0, sizeof(*crb3));
+ ap_msg->length = sizeof(*crb3);
+ crb3->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb3->header.msg_len = sizeof(*crb3);
+ crb3->keyblock_type = TYPE50_CRB3_FMT;
+ p = crb3->p + sizeof(crb3->p) - long_len;
+ q = crb3->q + sizeof(crb3->q) - short_len;
+ dp = crb3->dp + sizeof(crb3->dp) - long_len;
+ dq = crb3->dq + sizeof(crb3->dq) - short_len;
+ u = crb3->u + sizeof(crb3->u) - long_len;
+ inp = crb3->message + sizeof(crb3->message) - mod_len;
+ }
+
+ if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
+ copy_from_user(q, crt->nq_prime, short_len) ||
+ copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
+ copy_from_user(dq, crt->bq_key, short_len) ||
+ copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
+ copy_from_user(inp, crt->inputdata, mod_len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * Copy results from a type 80 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int convert_type80(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type80_hdr *t80h = reply->message;
+ unsigned char *data;
+
+ if (t80h->len < sizeof(*t80h) + outputdatalength) {
+ /* The result is too short, the CEX2A card may not do that.. */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+ if (zdev->user_space_type == ZCRYPT_CEX2A)
+ BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+ else
+ BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
+ data = reply->message + t80h->len - outputdatalength;
+ if (copy_to_user(outputdata, data, outputdatalength))
+ return -EFAULT;
+ return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE80_RSP_CODE:
+ return convert_type80(zdev, reply,
+ outputdata, outputdatalength);
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct type80_hdr *t80h;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply)) {
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ goto out;
+ }
+ t80h = reply->message;
+ if (t80h->type == TYPE80_RSP_CODE) {
+ if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
+ length = min_t(int,
+ CEX2A_MAX_RESPONSE_SIZE, t80h->len);
+ else
+ length = min_t(int,
+ CEX3A_MAX_RESPONSE_SIZE, t80h->len);
+ memcpy(msg->message, reply->message, length);
+ } else
+ memcpy(msg->message, reply->message, sizeof(error_reply));
+out:
+ complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * CEX2A device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo *mex)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_init_message(&ap_msg);
+ if (zdev->user_space_type == ZCRYPT_CEX2A)
+ ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
+ GFP_KERNEL);
+ else
+ ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
+ GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_cex2a_receive;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&work);
+ if (rc == 0)
+ rc = convert_response(zdev, &ap_msg, mex->outputdata,
+ mex->outputdatalength);
+ else
+ /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * CEX2A device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_init_message(&ap_msg);
+ if (zdev->user_space_type == ZCRYPT_CEX2A)
+ ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
+ GFP_KERNEL);
+ else
+ ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
+ GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_cex2a_receive;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&work);
+ if (rc == 0)
+ rc = convert_response(zdev, &ap_msg, crt->outputdata,
+ crt->outputdatalength);
+ else
+ /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The crypto operations for message type 50.
+ */
+static struct zcrypt_ops zcrypt_msgtype50_ops = {
+ .rsa_modexpo = zcrypt_cex2a_modexpo,
+ .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+ .owner = THIS_MODULE,
+ .variant = MSGTYPE50_VARIANT_DEFAULT,
+};
+
+int __init zcrypt_msgtype50_init(void)
+{
+ zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
+ return 0;
+}
+
+void __exit zcrypt_msgtype50_exit(void)
+{
+ zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
+}
+
+module_init(zcrypt_msgtype50_init);
+module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
new file mode 100644
index 000000000000..e56dc72c7733
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -0,0 +1,39 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_MSGTYPE50_H_
+#define _ZCRYPT_MSGTYPE50_H_
+
+#define MSGTYPE50_NAME "zcrypt_msgtype50"
+#define MSGTYPE50_VARIANT_DEFAULT 0
+
+#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
+#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
+
+int zcrypt_msgtype50_init(void);
+void zcrypt_msgtype50_exit(void);
+
+#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
new file mode 100644
index 000000000000..7d97fa5a26d0
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -0,0 +1,856 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_cca_key.h"
+
+#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
+#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
+
+#define CEIL4(x) ((((x)+3)/4)*4)
+
+struct response_type {
+ struct completion work;
+ int type;
+};
+#define PCIXCC_RESPONSE_TYPE_ICA 0
+#define PCIXCC_RESPONSE_TYPE_XCRB 1
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
+ "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+
+/**
+ * CPRB
+ * Note that all shorts, ints and longs are little-endian.
+ * All pointer fields are 32-bits long, and mean nothing
+ *
+ * A request CPRB is followed by a request_parameter_block.
+ *
+ * The request (or reply) parameter block is organized thus:
+ * function code
+ * VUD block
+ * key block
+ */
+struct CPRB {
+ unsigned short cprb_len; /* CPRB length */
+ unsigned char cprb_ver_id; /* CPRB version id. */
+ unsigned char pad_000; /* Alignment pad byte. */
+ unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
+ unsigned char srpi_verb; /* SRPI verb type */
+ unsigned char flags; /* flags */
+ unsigned char func_id[2]; /* function id */
+ unsigned char checkpoint_flag; /* */
+ unsigned char resv2; /* reserved */
+ unsigned short req_parml; /* request parameter buffer */
+ /* length 16-bit little endian */
+ unsigned char req_parmp[4]; /* request parameter buffer *
+ * pointer (means nothing: the *
+ * parameter buffer follows *
+ * the CPRB). */
+ unsigned char req_datal[4]; /* request data buffer */
+ /* length ULELONG */
+ unsigned char req_datap[4]; /* request data buffer */
+ /* pointer */
+ unsigned short rpl_parml; /* reply parameter buffer */
+ /* length 16-bit little endian */
+ unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
+ unsigned char rpl_parmp[4]; /* reply parameter buffer *
+ * pointer (means nothing: the *
+ * parameter buffer follows *
+ * the CPRB). */
+ unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
+ unsigned char rpl_datap[4]; /* reply data buffer */
+ /* pointer */
+ unsigned short ccp_rscode; /* server reason code ULESHORT */
+ unsigned short ccp_rtcode; /* server return code ULESHORT */
+ unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
+ unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
+ unsigned char repd_datal[4]; /* replied data length ULELONG */
+ unsigned char req_pc[2]; /* PC identifier */
+ unsigned char res_origin[8]; /* resource origin */
+ unsigned char mac_value[8]; /* Mac Value */
+ unsigned char logon_id[8]; /* Logon Identifier */
+ unsigned char usage_domain[2]; /* cdx */
+ unsigned char resv3[18]; /* reserved for requestor */
+ unsigned short svr_namel; /* server name length ULESHORT */
+ unsigned char svr_name[8]; /* server name */
+} __packed;
+
+struct function_and_rules_block {
+ unsigned char function_code[2];
+ unsigned short ulen;
+ unsigned char only_rule[8];
+} __packed;
+
+/**
+ * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
+ * card in a type6 message. The 3 fields that must be filled in at execution
+ * time are req_parml, rpl_parml and usage_domain.
+ * Everything about this interface is ascii/big-endian, since the
+ * device does *not* have 'Intel inside'.
+ *
+ * The CPRBX is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (one of:)
+ * + 0x000A 'PKCS-1.2' (MCL2 'PD')
+ * + 0x000A 'ZERO-PAD' (MCL2 'PK')
+ * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
+ * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
+ * - VUD block
+ */
+static struct CPRBX static_cprbx = {
+ .cprb_len = 0x00DC,
+ .cprb_ver_id = 0x02,
+ .func_id = {0x54, 0x32},
+};
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C', 'A',},
+ .function_code = {'P', 'K'},
+ };
+ static struct function_and_rules_block static_pke_fnr = {
+ .function_code = {'P', 'K'},
+ .ulen = 10,
+ .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
+ };
+ static struct function_and_rules_block static_pke_fnr_MCL2 = {
+ .function_code = {'P', 'K'},
+ .ulen = 10,
+ .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __packed * msg = ap_msg->message;
+ int size;
+
+ /* VUD.ciphertext */
+ msg->length = mex->inputdatalength + 2;
+ if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+ return -EFAULT;
+
+ /* Set up key which is located after the variable length text. */
+ size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + mex->inputdatalength;
+
+ /* message header, cprbx and f&r */
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprbx = static_cprbx;
+ msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+
+ msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
+ static_pke_fnr_MCL2 : static_pke_fnr;
+
+ msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+ ap_msg->length = size;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C', 'A',},
+ .function_code = {'P', 'D'},
+ };
+ static struct function_and_rules_block static_pkd_fnr = {
+ .function_code = {'P', 'D'},
+ .ulen = 10,
+ .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
+ };
+
+ static struct function_and_rules_block static_pkd_fnr_MCL2 = {
+ .function_code = {'P', 'D'},
+ .ulen = 10,
+ .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __packed * msg = ap_msg->message;
+ int size;
+
+ /* VUD.ciphertext */
+ msg->length = crt->inputdatalength + 2;
+ if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+ return -EFAULT;
+
+ /* Set up key which is located after the variable length text. */
+ size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
+
+ /* message header, cprbx and f&r */
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprbx = static_cprbx;
+ msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
+ size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+ msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
+ static_pkd_fnr_MCL2 : static_pkd_fnr;
+
+ ap_msg->length = size;
+ return 0;
+}
+
+/**
+ * Convert a XCRB message to a type6 CPRB message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @xcRB: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT, -EINVAL.
+ */
+struct type86_fmt2_msg {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+} __packed;
+
+static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_xcRB *xcRB)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ } __packed * msg = ap_msg->message;
+
+ int rcblen = CEIL4(xcRB->request_control_blk_length);
+ int replylen;
+ char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+ char *function_code;
+
+ /* length checks */
+ ap_msg->length = sizeof(struct type6_hdr) +
+ CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
+ return -EINVAL;
+ replylen = sizeof(struct type86_fmt2_msg) +
+ CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if (replylen > MSGTYPE06_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ /* prepare type6 header */
+ msg->hdr = static_type6_hdrX;
+ memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
+ msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
+ if (xcRB->request_data_length) {
+ msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
+ msg->hdr.ToCardLen2 = xcRB->request_data_length;
+ }
+ msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
+ msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+
+ /* prepare CPRB */
+ if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
+ xcRB->request_control_blk_length))
+ return -EFAULT;
+ if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
+ xcRB->request_control_blk_length)
+ return -EINVAL;
+ function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
+ memcpy(msg->hdr.function_code, function_code,
+ sizeof(msg->hdr.function_code));
+
+ if (memcmp(function_code, "US", 2) == 0)
+ ap_msg->special = 1;
+ else
+ ap_msg->special = 0;
+
+ /* copy data block */
+ if (xcRB->request_data_length &&
+ copy_from_user(req_data, xcRB->request_data_address,
+ xcRB->request_data_length))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 ICA reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86x_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRBX cprbx;
+ unsigned char pad[4]; /* 4 byte function code/rules block ? */
+ unsigned short length;
+ char text[0];
+} __packed;
+
+static int convert_type86_ica(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ static unsigned char static_pad[] = {
+ 0x00, 0x02,
+ 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
+ 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
+ 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
+ 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
+ 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
+ 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
+ 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
+ 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
+ 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
+ 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
+ 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
+ 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
+ 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
+ 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
+ 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
+ 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
+ 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
+ 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
+ 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
+ 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
+ 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
+ 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
+ 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
+ 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
+ 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
+ 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
+ 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
+ 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
+ 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
+ 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
+ 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
+ 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
+ };
+ struct type86x_reply *msg = reply->message;
+ unsigned short service_rc, service_rs;
+ unsigned int reply_len, pad_len;
+ char *data;
+
+ service_rc = msg->cprbx.ccp_rtcode;
+ if (unlikely(service_rc != 0)) {
+ service_rs = msg->cprbx.ccp_rscode;
+ if (service_rc == 8 && service_rs == 66)
+ return -EINVAL;
+ if (service_rc == 8 && service_rs == 65)
+ return -EINVAL;
+ if (service_rc == 8 && service_rs == 770)
+ return -EINVAL;
+ if (service_rc == 8 && service_rs == 783) {
+ zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+ return -EAGAIN;
+ }
+ if (service_rc == 12 && service_rs == 769)
+ return -EINVAL;
+ if (service_rc == 8 && service_rs == 72)
+ return -EINVAL;
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+ data = msg->text;
+ reply_len = msg->length - 2;
+ if (reply_len > outputdatalength)
+ return -EINVAL;
+ /*
+ * For all encipher requests, the length of the ciphertext (reply_len)
+ * will always equal the modulus length. For MEX decipher requests
+ * the output needs to get padded. Minimum pad size is 10.
+ *
+ * Currently, the cases where padding will be added is for:
+ * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+ * ZERO-PAD and CRT is only supported for PKD requests)
+ * - PCICC, always
+ */
+ pad_len = outputdatalength - reply_len;
+ if (pad_len > 0) {
+ if (pad_len < 10)
+ return -EINVAL;
+ /* 'restore' padding left in the PCICC/PCIXCC card. */
+ if (copy_to_user(outputdata, static_pad, pad_len - 1))
+ return -EFAULT;
+ if (put_user(0, outputdata + pad_len - 1))
+ return -EFAULT;
+ }
+ /* Copy the crypto response to user space. */
+ if (copy_to_user(outputdata + pad_len, data, reply_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 XCRB reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to XCRB
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
+{
+ struct type86_fmt2_msg *msg = reply->message;
+ char *data = reply->message;
+
+ /* Copy CPRB to user */
+ if (copy_to_user(xcRB->reply_control_blk_addr,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
+ return -EFAULT;
+ xcRB->reply_control_blk_length = msg->fmt2.count1;
+
+ /* Copy data buffer to user */
+ if (msg->fmt2.count2)
+ if (copy_to_user(xcRB->reply_data_addr,
+ data + msg->fmt2.offset2, msg->fmt2.count2))
+ return -EFAULT;
+ xcRB->reply_data_length = msg->fmt2.count2;
+ return 0;
+}
+
+static int convert_type86_rng(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char *buffer)
+{
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRBX cprbx;
+ } __packed * msg = reply->message;
+ char *data = reply->message;
+
+ if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
+ return -EINVAL;
+ memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
+ return msg->fmt2.count2;
+}
+
+static int convert_response_ica(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type86x_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->cprbx.ccp_rtcode &&
+ (msg->cprbx.ccp_rscode == 0x14f) &&
+ (outputdatalength > 256)) {
+ if (zdev->max_exp_bit_length <= 17) {
+ zdev->max_exp_bit_length = 17;
+ return -EAGAIN;
+ } else
+ return -EINVAL;
+ }
+ if (msg->hdr.reply_code)
+ return convert_error(zdev, reply);
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_ica(zdev, reply,
+ outputdata, outputdatalength);
+ /* Fall through, no break, incorrect cprb version is an unknown
+ * response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+static int convert_response_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
+{
+ struct type86x_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code) {
+ memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+ return convert_error(zdev, reply);
+ }
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_xcrb(zdev, reply, xcRB);
+ /* Fall through, no break, incorrect cprb version is an unknown
+ * response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+static int convert_response_rng(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char *data)
+{
+ struct type86x_reply *msg = reply->message;
+
+ switch (msg->hdr.type) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return -EINVAL;
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return -EINVAL;
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_rng(zdev, reply, data);
+ /* Fall through, no break, incorrect cprb version is an unknown
+ * response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct response_type *resp_type =
+ (struct response_type *) msg->private;
+ struct type86x_reply *t86r;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply)) {
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ goto out;
+ }
+ t86r = reply->message;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprbx.cprb_ver_id == 0x02) {
+ switch (resp_type->type) {
+ case PCIXCC_RESPONSE_TYPE_ICA:
+ length = sizeof(struct type86x_reply)
+ + t86r->length - 2;
+ length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ break;
+ case PCIXCC_RESPONSE_TYPE_XCRB:
+ length = t86r->fmt2.offset2 + t86r->fmt2.count2;
+ length = min(MSGTYPE06_MAX_MSG_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ break;
+ default:
+ memcpy(msg->message, &error_reply,
+ sizeof(error_reply));
+ }
+ } else
+ memcpy(msg->message, reply->message, sizeof(error_reply));
+out:
+ complete(&(resp_type->work));
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo *mex)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_ICA,
+ };
+ int rc;
+
+ ap_init_message(&ap_msg);
+ ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_msgtype6_receive;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0)
+ rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
+ mex->outputdatalength);
+ else
+ /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+ free_page((unsigned long) ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_ICA,
+ };
+ int rc;
+
+ ap_init_message(&ap_msg);
+ ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_msgtype6_receive;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0)
+ rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
+ crt->outputdatalength);
+ else
+ /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+ free_page((unsigned long) ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a send_cprb request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @xcRB: pointer to the send_cprb request buffer
+ */
+static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
+ struct ica_xcRB *xcRB)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+ int rc;
+
+ ap_init_message(&ap_msg);
+ ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_msgtype6_receive;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0)
+ rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
+ else
+ /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+ kzfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to generate random data.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @buffer: pointer to a memory page to return random data
+ */
+
+static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
+ char *buffer)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+ int rc;
+
+ ap_init_message(&ap_msg);
+ ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.receive = zcrypt_msgtype6_receive;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0)
+ rc = convert_response_rng(zdev, &ap_msg, buffer);
+ else
+ /* Signal pending. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The crypto operations for a PCIXCC/CEX2C card.
+ */
+static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
+ .owner = THIS_MODULE,
+ .variant = MSGTYPE06_VARIANT_NORNG,
+ .rsa_modexpo = zcrypt_msgtype6_modexpo,
+ .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
+ .send_cprb = zcrypt_msgtype6_send_cprb,
+};
+
+static struct zcrypt_ops zcrypt_msgtype6_ops = {
+ .owner = THIS_MODULE,
+ .variant = MSGTYPE06_VARIANT_DEFAULT,
+ .rsa_modexpo = zcrypt_msgtype6_modexpo,
+ .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
+ .send_cprb = zcrypt_msgtype6_send_cprb,
+ .rng = zcrypt_msgtype6_rng,
+};
+
+int __init zcrypt_msgtype6_init(void)
+{
+ zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
+ zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
+ return 0;
+}
+
+void __exit zcrypt_msgtype6_exit(void)
+{
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
+}
+
+module_init(zcrypt_msgtype6_init);
+module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
new file mode 100644
index 000000000000..1e500d3c0735
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -0,0 +1,169 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_MSGTYPE6_H_
+#define _ZCRYPT_MSGTYPE6_H_
+
+#include <asm/zcrypt.h>
+
+#define MSGTYPE06_NAME "zcrypt_msgtype6"
+#define MSGTYPE06_VARIANT_DEFAULT 0
+#define MSGTYPE06_VARIANT_NORNG 1
+
+#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
+
+/**
+ * The type 6 message family is associated with PCICC or PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB, both of which
+ * are described below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type6_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x06 */
+ unsigned char reserved2[2]; /* 0x0000 */
+ unsigned char right[4]; /* 0x00000000 */
+ unsigned char reserved3[2]; /* 0x0000 */
+ unsigned char reserved4[2]; /* 0x0000 */
+ unsigned char apfs[4]; /* 0x00000000 */
+ unsigned int offset1; /* 0x00000058 (offset to CPRB) */
+ unsigned int offset2; /* 0x00000000 */
+ unsigned int offset3; /* 0x00000000 */
+ unsigned int offset4; /* 0x00000000 */
+ unsigned char agent_id[16]; /* PCICC: */
+ /* 0x0100 */
+ /* 0x4343412d4150504c202020 */
+ /* 0x010101 */
+ /* PCIXCC: */
+ /* 0x4341000000000000 */
+ /* 0x0000000000000000 */
+ unsigned char rqid[2]; /* rqid. internal to 603 */
+ unsigned char reserved5[2]; /* 0x0000 */
+ unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
+ unsigned char reserved6[2]; /* 0x0000 */
+ unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
+ unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
+ unsigned int ToCardLen3; /* 0x00000000 */
+ unsigned int ToCardLen4; /* 0x00000000 */
+ unsigned int FromCardLen1; /* response buffer length */
+ unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
+ unsigned int FromCardLen3; /* 0x00000000 */
+ unsigned int FromCardLen4; /* 0x00000000 */
+} __packed;
+
+/**
+ * The type 86 message family is associated with PCICC and PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB. The CPRB is
+ * the same as the request CPRB, which is described above.
+ *
+ * If format is 1, an error condition exists and no data beyond
+ * the 8-byte message header is of interest.
+ *
+ * The non-error message is shown below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type86_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x86 */
+ unsigned char format; /* 0x01 (error) or 0x02 (ok) */
+ unsigned char reserved2; /* 0x00 */
+ unsigned char reply_code; /* reply code (see above) */
+ unsigned char reserved3[3]; /* 0x000000 */
+} __packed;
+
+#define TYPE86_RSP_CODE 0x86
+#define TYPE86_FMT2 0x02
+
+struct type86_fmt2_ext {
+ unsigned char reserved[4]; /* 0x00000000 */
+ unsigned char apfs[4]; /* final status */
+ unsigned int count1; /* length of CPRB + parameters */
+ unsigned int offset1; /* offset to CPRB */
+ unsigned int count2; /* 0x00000000 */
+ unsigned int offset2; /* db offset 0x00000000 for PKD */
+ unsigned int count3; /* 0x00000000 */
+ unsigned int offset3; /* 0x00000000 */
+ unsigned int count4; /* 0x00000000 */
+ unsigned int offset4; /* 0x00000000 */
+} __packed;
+
+/**
+ * Prepare a type6 CPRB message for random number generation
+ *
+ * @ap_dev: AP device pointer
+ * @ap_msg: pointer to AP message
+ */
+static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
+ struct ap_message *ap_msg,
+ unsigned random_number_length)
+{
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg = ap_msg->message;
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C', 'A'},
+ .function_code = {'R', 'L'},
+ .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
+ .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
+ };
+ static struct CPRBX local_cprbx = {
+ .cprb_len = 0x00dc,
+ .cprb_ver_id = 0x02,
+ .func_id = {0x54, 0x32},
+ .req_parml = sizeof(*msg) - sizeof(msg->hdr) -
+ sizeof(msg->cprbx),
+ .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr),
+ };
+
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.FromCardLen2 = random_number_length,
+ msg->cprbx = local_cprbx;
+ msg->cprbx.rpl_datal = random_number_length,
+ msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
+ memcpy(msg->function_code, msg->hdr.function_code, 0x02);
+ msg->rule_length = 0x0a;
+ memcpy(msg->rule, "RANDOM ", 8);
+ msg->verb_length = 0x02;
+ msg->key_length = 0x02;
+ ap_msg->length = sizeof(*msg);
+}
+
+int zcrypt_msgtype6_init(void);
+void zcrypt_msgtype6_exit(void);
+
+#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index ccb4f8b60c75..c7275e303a0d 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -1,13 +1,14 @@
/*
* zcrypt 2.1.0
*
- * Copyright IBM Corp. 2001, 2006
+ * Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -35,9 +36,10 @@
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_error.h"
-#include "zcrypt_pcicc.h"
+#include "zcrypt_msgtype6.h"
#include "zcrypt_pcixcc.h"
#include "zcrypt_cca_key.h"
+#include "zcrypt_msgtype6.h"
#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
@@ -75,14 +77,12 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
- "Copyright IBM Corp. 2001, 2006");
+MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
+ "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
-static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
- struct ap_message *);
static struct ap_driver zcrypt_pcixcc_driver = {
.probe = zcrypt_pcixcc_probe,
@@ -92,766 +92,6 @@ static struct ap_driver zcrypt_pcixcc_driver = {
};
/**
- * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
- * card in a type6 message. The 3 fields that must be filled in at execution
- * time are req_parml, rpl_parml and usage_domain.
- * Everything about this interface is ascii/big-endian, since the
- * device does *not* have 'Intel inside'.
- *
- * The CPRBX is followed immediately by the parm block.
- * The parm block contains:
- * - function code ('PD' 0x5044 or 'PK' 0x504B)
- * - rule block (one of:)
- * + 0x000A 'PKCS-1.2' (MCL2 'PD')
- * + 0x000A 'ZERO-PAD' (MCL2 'PK')
- * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
- * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
- * - VUD block
- */
-static struct CPRBX static_cprbx = {
- .cprb_len = 0x00DC,
- .cprb_ver_id = 0x02,
- .func_id = {0x54,0x32},
-};
-
-/**
- * Convert a ICAMEX message to a type6 MEX message.
- *
- * @zdev: crypto device pointer
- * @ap_msg: pointer to AP message
- * @mex: pointer to user input data
- *
- * Returns 0 on success or -EFAULT.
- */
-static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_rsa_modexpo *mex)
-{
- static struct type6_hdr static_type6_hdrX = {
- .type = 0x06,
- .offset1 = 0x00000058,
- .agent_id = {'C','A',},
- .function_code = {'P','K'},
- };
- static struct function_and_rules_block static_pke_fnr = {
- .function_code = {'P','K'},
- .ulen = 10,
- .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
- };
- static struct function_and_rules_block static_pke_fnr_MCL2 = {
- .function_code = {'P','K'},
- .ulen = 10,
- .only_rule = {'Z','E','R','O','-','P','A','D'}
- };
- struct {
- struct type6_hdr hdr;
- struct CPRBX cprbx;
- struct function_and_rules_block fr;
- unsigned short length;
- char text[0];
- } __attribute__((packed)) *msg = ap_msg->message;
- int size;
-
- /* VUD.ciphertext */
- msg->length = mex->inputdatalength + 2;
- if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
- return -EFAULT;
-
- /* Set up key which is located after the variable length text. */
- size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
- if (size < 0)
- return size;
- size += sizeof(*msg) + mex->inputdatalength;
-
- /* message header, cprbx and f&r */
- msg->hdr = static_type6_hdrX;
- msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
-
- msg->cprbx = static_cprbx;
- msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
- msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
-
- msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
- static_pke_fnr_MCL2 : static_pke_fnr;
-
- msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
-
- ap_msg->length = size;
- return 0;
-}
-
-/**
- * Convert a ICACRT message to a type6 CRT message.
- *
- * @zdev: crypto device pointer
- * @ap_msg: pointer to AP message
- * @crt: pointer to user input data
- *
- * Returns 0 on success or -EFAULT.
- */
-static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_rsa_modexpo_crt *crt)
-{
- static struct type6_hdr static_type6_hdrX = {
- .type = 0x06,
- .offset1 = 0x00000058,
- .agent_id = {'C','A',},
- .function_code = {'P','D'},
- };
- static struct function_and_rules_block static_pkd_fnr = {
- .function_code = {'P','D'},
- .ulen = 10,
- .only_rule = {'Z','E','R','O','-','P','A','D'}
- };
-
- static struct function_and_rules_block static_pkd_fnr_MCL2 = {
- .function_code = {'P','D'},
- .ulen = 10,
- .only_rule = {'P','K','C','S','-','1','.','2'}
- };
- struct {
- struct type6_hdr hdr;
- struct CPRBX cprbx;
- struct function_and_rules_block fr;
- unsigned short length;
- char text[0];
- } __attribute__((packed)) *msg = ap_msg->message;
- int size;
-
- /* VUD.ciphertext */
- msg->length = crt->inputdatalength + 2;
- if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
- return -EFAULT;
-
- /* Set up key which is located after the variable length text. */
- size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
- if (size < 0)
- return size;
- size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
-
- /* message header, cprbx and f&r */
- msg->hdr = static_type6_hdrX;
- msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
-
- msg->cprbx = static_cprbx;
- msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
- msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
- size - sizeof(msg->hdr) - sizeof(msg->cprbx);
-
- msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
- static_pkd_fnr_MCL2 : static_pkd_fnr;
-
- ap_msg->length = size;
- return 0;
-}
-
-/**
- * Convert a XCRB message to a type6 CPRB message.
- *
- * @zdev: crypto device pointer
- * @ap_msg: pointer to AP message
- * @xcRB: pointer to user input data
- *
- * Returns 0 on success or -EFAULT, -EINVAL.
- */
-struct type86_fmt2_msg {
- struct type86_hdr hdr;
- struct type86_fmt2_ext fmt2;
-} __attribute__((packed));
-
-static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_xcRB *xcRB)
-{
- static struct type6_hdr static_type6_hdrX = {
- .type = 0x06,
- .offset1 = 0x00000058,
- };
- struct {
- struct type6_hdr hdr;
- struct CPRBX cprbx;
- } __attribute__((packed)) *msg = ap_msg->message;
-
- int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen;
- char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
- char *function_code;
-
- /* length checks */
- ap_msg->length = sizeof(struct type6_hdr) +
- CEIL4(xcRB->request_control_blk_length) +
- xcRB->request_data_length;
- if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
- return -EINVAL;
- replylen = sizeof(struct type86_fmt2_msg) +
- CEIL4(xcRB->reply_control_blk_length) +
- xcRB->reply_data_length;
- if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
- return -EINVAL;
-
- /* prepare type6 header */
- msg->hdr = static_type6_hdrX;
- memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
- msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
- if (xcRB->request_data_length) {
- msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
- msg->hdr.ToCardLen2 = xcRB->request_data_length;
- }
- msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
- msg->hdr.FromCardLen2 = xcRB->reply_data_length;
-
- /* prepare CPRB */
- if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
- xcRB->request_control_blk_length))
- return -EFAULT;
- if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
- xcRB->request_control_blk_length)
- return -EINVAL;
- function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
- memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
-
- if (memcmp(function_code, "US", 2) == 0)
- ap_msg->special = 1;
- else
- ap_msg->special = 0;
-
- /* copy data block */
- if (xcRB->request_data_length &&
- copy_from_user(req_data, xcRB->request_data_address,
- xcRB->request_data_length))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Prepare a type6 CPRB message for random number generation
- *
- * @ap_dev: AP device pointer
- * @ap_msg: pointer to AP message
- */
-static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
- struct ap_message *ap_msg,
- unsigned random_number_length)
-{
- struct {
- struct type6_hdr hdr;
- struct CPRBX cprbx;
- char function_code[2];
- short int rule_length;
- char rule[8];
- short int verb_length;
- short int key_length;
- } __attribute__((packed)) *msg = ap_msg->message;
- static struct type6_hdr static_type6_hdrX = {
- .type = 0x06,
- .offset1 = 0x00000058,
- .agent_id = {'C', 'A'},
- .function_code = {'R', 'L'},
- .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
- .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
- };
- static struct CPRBX local_cprbx = {
- .cprb_len = 0x00dc,
- .cprb_ver_id = 0x02,
- .func_id = {0x54, 0x32},
- .req_parml = sizeof *msg - sizeof(msg->hdr) -
- sizeof(msg->cprbx),
- .rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
- };
-
- msg->hdr = static_type6_hdrX;
- msg->hdr.FromCardLen2 = random_number_length,
- msg->cprbx = local_cprbx;
- msg->cprbx.rpl_datal = random_number_length,
- msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
- memcpy(msg->function_code, msg->hdr.function_code, 0x02);
- msg->rule_length = 0x0a;
- memcpy(msg->rule, "RANDOM ", 8);
- msg->verb_length = 0x02;
- msg->key_length = 0x02;
- ap_msg->length = sizeof *msg;
-}
-
-/**
- * Copy results from a type 86 ICA reply message back to user space.
- *
- * @zdev: crypto device pointer
- * @reply: reply AP message.
- * @data: pointer to user output data
- * @length: size of user output data
- *
- * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
- */
-struct type86x_reply {
- struct type86_hdr hdr;
- struct type86_fmt2_ext fmt2;
- struct CPRBX cprbx;
- unsigned char pad[4]; /* 4 byte function code/rules block ? */
- unsigned short length;
- char text[0];
-} __attribute__((packed));
-
-static int convert_type86_ica(struct zcrypt_device *zdev,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
-{
- static unsigned char static_pad[] = {
- 0x00,0x02,
- 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
- 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
- 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
- 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
- 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
- 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
- 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
- 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
- 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
- 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
- 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
- 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
- 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
- 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
- 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
- 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
- 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
- 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
- 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
- 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
- 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
- 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
- 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
- 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
- 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
- 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
- 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
- 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
- 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
- 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
- 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
- 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
- };
- struct type86x_reply *msg = reply->message;
- unsigned short service_rc, service_rs;
- unsigned int reply_len, pad_len;
- char *data;
-
- service_rc = msg->cprbx.ccp_rtcode;
- if (unlikely(service_rc != 0)) {
- service_rs = msg->cprbx.ccp_rscode;
- if (service_rc == 8 && service_rs == 66)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 65)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 770)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 783) {
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
- return -EAGAIN;
- }
- if (service_rc == 12 && service_rs == 769)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 72)
- return -EINVAL;
- zdev->online = 0;
- return -EAGAIN; /* repeat the request on a different device. */
- }
- data = msg->text;
- reply_len = msg->length - 2;
- if (reply_len > outputdatalength)
- return -EINVAL;
- /*
- * For all encipher requests, the length of the ciphertext (reply_len)
- * will always equal the modulus length. For MEX decipher requests
- * the output needs to get padded. Minimum pad size is 10.
- *
- * Currently, the cases where padding will be added is for:
- * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
- * ZERO-PAD and CRT is only supported for PKD requests)
- * - PCICC, always
- */
- pad_len = outputdatalength - reply_len;
- if (pad_len > 0) {
- if (pad_len < 10)
- return -EINVAL;
- /* 'restore' padding left in the PCICC/PCIXCC card. */
- if (copy_to_user(outputdata, static_pad, pad_len - 1))
- return -EFAULT;
- if (put_user(0, outputdata + pad_len - 1))
- return -EFAULT;
- }
- /* Copy the crypto response to user space. */
- if (copy_to_user(outputdata + pad_len, data, reply_len))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Copy results from a type 86 XCRB reply message back to user space.
- *
- * @zdev: crypto device pointer
- * @reply: reply AP message.
- * @xcRB: pointer to XCRB
- *
- * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
- */
-static int convert_type86_xcrb(struct zcrypt_device *zdev,
- struct ap_message *reply,
- struct ica_xcRB *xcRB)
-{
- struct type86_fmt2_msg *msg = reply->message;
- char *data = reply->message;
-
- /* Copy CPRB to user */
- if (copy_to_user(xcRB->reply_control_blk_addr,
- data + msg->fmt2.offset1, msg->fmt2.count1))
- return -EFAULT;
- xcRB->reply_control_blk_length = msg->fmt2.count1;
-
- /* Copy data buffer to user */
- if (msg->fmt2.count2)
- if (copy_to_user(xcRB->reply_data_addr,
- data + msg->fmt2.offset2, msg->fmt2.count2))
- return -EFAULT;
- xcRB->reply_data_length = msg->fmt2.count2;
- return 0;
-}
-
-static int convert_type86_rng(struct zcrypt_device *zdev,
- struct ap_message *reply,
- char *buffer)
-{
- struct {
- struct type86_hdr hdr;
- struct type86_fmt2_ext fmt2;
- struct CPRBX cprbx;
- } __attribute__((packed)) *msg = reply->message;
- char *data = reply->message;
-
- if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
- return -EINVAL;
- memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
- return msg->fmt2.count2;
-}
-
-static int convert_response_ica(struct zcrypt_device *zdev,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
-{
- struct type86x_reply *msg = reply->message;
-
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
- case TYPE82_RSP_CODE:
- case TYPE88_RSP_CODE:
- return convert_error(zdev, reply);
- case TYPE86_RSP_CODE:
- if (msg->cprbx.ccp_rtcode &&
- (msg->cprbx.ccp_rscode == 0x14f) &&
- (outputdatalength > 256)) {
- if (zdev->max_exp_bit_length <= 17) {
- zdev->max_exp_bit_length = 17;
- return -EAGAIN;
- } else
- return -EINVAL;
- }
- if (msg->hdr.reply_code)
- return convert_error(zdev, reply);
- if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_ica(zdev, reply,
- outputdata, outputdatalength);
- /* Fall through, no break, incorrect cprb version is an unknown
- * response */
- default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- return -EAGAIN; /* repeat the request on a different device. */
- }
-}
-
-static int convert_response_xcrb(struct zcrypt_device *zdev,
- struct ap_message *reply,
- struct ica_xcRB *xcRB)
-{
- struct type86x_reply *msg = reply->message;
-
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
- case TYPE82_RSP_CODE:
- case TYPE88_RSP_CODE:
- xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
- return convert_error(zdev, reply);
- case TYPE86_RSP_CODE:
- if (msg->hdr.reply_code) {
- memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
- return convert_error(zdev, reply);
- }
- if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_xcrb(zdev, reply, xcRB);
- /* Fall through, no break, incorrect cprb version is an unknown
- * response */
- default: /* Unknown response type, this should NEVER EVER happen */
- xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
- zdev->online = 0;
- return -EAGAIN; /* repeat the request on a different device. */
- }
-}
-
-static int convert_response_rng(struct zcrypt_device *zdev,
- struct ap_message *reply,
- char *data)
-{
- struct type86x_reply *msg = reply->message;
-
- switch (msg->hdr.type) {
- case TYPE82_RSP_CODE:
- case TYPE88_RSP_CODE:
- return -EINVAL;
- case TYPE86_RSP_CODE:
- if (msg->hdr.reply_code)
- return -EINVAL;
- if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_rng(zdev, reply, data);
- /* Fall through, no break, incorrect cprb version is an unknown
- * response */
- default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- return -EAGAIN; /* repeat the request on a different device. */
- }
-}
-
-/**
- * This function is called from the AP bus code after a crypto request
- * "msg" has finished with the reply message "reply".
- * It is called from tasklet context.
- * @ap_dev: pointer to the AP device
- * @msg: pointer to the AP message
- * @reply: pointer to the AP reply message
- */
-static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
- struct ap_message *msg,
- struct ap_message *reply)
-{
- static struct error_hdr error_reply = {
- .type = TYPE82_RSP_CODE,
- .reply_code = REP82_ERROR_MACHINE_FAILURE,
- };
- struct response_type *resp_type =
- (struct response_type *) msg->private;
- struct type86x_reply *t86r;
- int length;
-
- /* Copy the reply message to the request message buffer. */
- if (IS_ERR(reply)) {
- memcpy(msg->message, &error_reply, sizeof(error_reply));
- goto out;
- }
- t86r = reply->message;
- if (t86r->hdr.type == TYPE86_RSP_CODE &&
- t86r->cprbx.cprb_ver_id == 0x02) {
- switch (resp_type->type) {
- case PCIXCC_RESPONSE_TYPE_ICA:
- length = sizeof(struct type86x_reply)
- + t86r->length - 2;
- length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
- memcpy(msg->message, reply->message, length);
- break;
- case PCIXCC_RESPONSE_TYPE_XCRB:
- length = t86r->fmt2.offset2 + t86r->fmt2.count2;
- length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
- memcpy(msg->message, reply->message, length);
- break;
- default:
- memcpy(msg->message, &error_reply, sizeof error_reply);
- }
- } else
- memcpy(msg->message, reply->message, sizeof error_reply);
-out:
- complete(&(resp_type->work));
-}
-
-static atomic_t zcrypt_step = ATOMIC_INIT(0);
-
-/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
- * device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
- * @mex: pointer to the modexpo request buffer
- */
-static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
- struct ica_rsa_modexpo *mex)
-{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_ICA,
- };
- int rc;
-
- ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_pcixcc_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
- if (rc == 0)
- rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
- mex->outputdatalength);
- else
- /* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- free_page((unsigned long) ap_msg.message);
- return rc;
-}
-
-/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
- * device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
- * @crt: pointer to the modexpoc_crt request buffer
- */
-static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
- struct ica_rsa_modexpo_crt *crt)
-{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_ICA,
- };
- int rc;
-
- ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_pcixcc_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
- if (rc == 0)
- rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
- crt->outputdatalength);
- else
- /* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- free_page((unsigned long) ap_msg.message);
- return rc;
-}
-
-/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
- * device to handle a send_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
- * @xcRB: pointer to the send_cprb request buffer
- */
-static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
- struct ica_xcRB *xcRB)
-{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
- };
- int rc;
-
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_pcixcc_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
- if (rc == 0)
- rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
- else
- /* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- kzfree(ap_msg.message);
- return rc;
-}
-
-/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
- * device to generate random data.
- * @zdev: pointer to zcrypt_device structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
- * @buffer: pointer to a memory page to return random data
- */
-
-static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
- char *buffer)
-{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
- };
- int rc;
-
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_pcixcc_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
- if (rc == 0)
- rc = convert_response_rng(zdev, &ap_msg, buffer);
- else
- /* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
- kfree(ap_msg.message);
- return rc;
-}
-
-/**
- * The crypto operations for a PCIXCC/CEX2C card.
- */
-static struct zcrypt_ops zcrypt_pcixcc_ops = {
- .rsa_modexpo = zcrypt_pcixcc_modexpo,
- .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
- .send_cprb = zcrypt_pcixcc_send_cprb,
-};
-
-static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
- .rsa_modexpo = zcrypt_pcixcc_modexpo,
- .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
- .send_cprb = zcrypt_pcixcc_send_cprb,
- .rng = zcrypt_pcixcc_rng,
-};
-
-/**
* Micro-code detection function. Its sends a message to a pcixcc card
* to find out the microcode level.
* @ap_dev: pointer to the AP device.
@@ -1083,9 +323,11 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
return rc;
}
if (rc)
- zdev->ops = &zcrypt_pcixcc_with_rng_ops;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
else
- zdev->ops = &zcrypt_pcixcc_ops;
+ zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_NORNG);
ap_dev->reply = &zdev->reply;
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
@@ -1095,6 +337,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
out_free:
ap_dev->private = NULL;
+ zcrypt_msgtype_release(zdev->ops);
zcrypt_device_free(zdev);
return rc;
}
@@ -1106,8 +349,10 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev = ap_dev->private;
+ struct zcrypt_ops *zops = zdev->ops;
zcrypt_device_unregister(zdev);
+ zcrypt_msgtype_release(zops);
}
int __init zcrypt_pcixcc_init(void)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index c7cdf599e46b..eacafc8962f2 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,12 +1,13 @@
/*
* zcrypt 2.1.0
*
- * Copyright IBM Corp. 2001, 2006
+ * Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7a8b09612c41..cf6da7fafe54 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2993,7 +2993,7 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
struct ccw_dev_id ccwid;
- int level, rc;
+ int level;
tid->chpid = card->info.chpid;
ccw_device_get_id(CARD_RDEV(card), &ccwid);
@@ -3001,17 +3001,10 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
tid->devno = ccwid.devno;
if (!info)
return;
-
- rc = stsi(NULL, 0, 0, 0);
- if (rc == -ENOSYS)
- level = rc;
- else
- level = (((unsigned int) rc) >> 28);
-
- if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
+ level = stsi(NULL, 0, 0, 0);
+ if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
tid->lparnr = info222->lpar_number;
-
- if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
+ if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
}