diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 19:11:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 19:11:24 -0700 |
commit | 6e6c5b960644125b6f2fc2cd04e62bff0771923e (patch) | |
tree | 6d839565616684904fd17f1207c8294b30cb162c /drivers | |
parent | c136b84393d4e340e1b53fc7f737dd5827b19ee5 (diff) | |
parent | c54590cac51db8ab5fd30156bdaba34af915e629 (diff) | |
download | linux-6e6c5b960644125b6f2fc2cd04e62bff0771923e.tar.bz2 |
Merge tag 'for-linus-4.13-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross:
"Other than fixes and cleanups it contains:
- support > 32 VCPUs at domain restore
- support for new sysfs nodes related to Xen
- some performance tuning for Linux running as Xen guest"
* tag 'for-linus-4.13-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
x86/xen: allow userspace access during hypercalls
x86: xen: remove unnecessary variable in xen_foreach_remap_area()
xen: allocate page for shared info page from low memory
xen: avoid deadlock in xenbus driver
xen: add sysfs node for hypervisor build id
xen: sync include/xen/interface/version.h
xen: add sysfs node for guest type
doc,xen: document hypervisor sysfs nodes for xen
xen/vcpu: Handle xen_vcpu_setup() failure at boot
xen/vcpu: Handle xen_vcpu_setup() failure in hotplug
xen/pv: Fix OOPS on restore for a PV, !SMP domain
xen/pvh*: Support > 32 VCPUs at domain restore
xen/vcpu: Simplify xen_vcpu related code
xen-evtchn: Bind dyn evtchn:qemu-dm interrupt to next online VCPU
xen: avoid type warning in xchg_xen_ulong
xen: fix HYPERVISOR_dm_op() prototype
xen: don't print error message in case of missing Xenstore entry
arm/xen: Adjust one function call together with a variable assignment
arm/xen: Delete an error message for a failed memory allocation in __set_phys_to_machine_multi()
arm/xen: Improve a size determination in __set_phys_to_machine_multi()
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/xen/events/events_base.c | 6 | ||||
-rw-r--r-- | drivers/xen/evtchn.c | 34 | ||||
-rw-r--r-- | drivers/xen/manage.c | 12 | ||||
-rw-r--r-- | drivers/xen/sys-hypervisor.c | 62 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_comms.c | 21 |
5 files changed, 118 insertions, 17 deletions
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 2e567d8433b3..b241bfa529ce 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1303,10 +1303,9 @@ void rebind_evtchn_irq(int evtchn, int irq) } /* Rebind an evtchn so that it gets delivered to a specific cpu */ -static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) +int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu) { struct evtchn_bind_vcpu bind_vcpu; - int evtchn = evtchn_from_irq(irq); int masked; if (!VALID_EVTCHN(evtchn)) @@ -1338,12 +1337,13 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) return 0; } +EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu); static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); - int ret = rebind_irq_to_cpu(data->irq, tcpu); + int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 10f1ef582659..9729a64ea1a9 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -421,6 +421,36 @@ static void evtchn_unbind_from_user(struct per_user_data *u, del_evtchn(u, evtchn); } +static DEFINE_PER_CPU(int, bind_last_selected_cpu); + +static void evtchn_bind_interdom_next_vcpu(int evtchn) +{ + unsigned int selected_cpu, irq; + struct irq_desc *desc; + unsigned long flags; + + irq = irq_from_evtchn(evtchn); + desc = irq_to_desc(irq); + + if (!desc) + return; + + raw_spin_lock_irqsave(&desc->lock, flags); + selected_cpu = this_cpu_read(bind_last_selected_cpu); + selected_cpu = cpumask_next_and(selected_cpu, + desc->irq_common_data.affinity, cpu_online_mask); + + if (unlikely(selected_cpu >= nr_cpu_ids)) + selected_cpu = cpumask_first_and(desc->irq_common_data.affinity, + cpu_online_mask); + + this_cpu_write(bind_last_selected_cpu, selected_cpu); + + /* unmask expects irqs to be disabled */ + xen_rebind_evtchn_to_cpu(evtchn, selected_cpu); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + static long evtchn_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -478,8 +508,10 @@ static long evtchn_ioctl(struct file *file, break; rc = evtchn_bind_to_user(u, bind_interdomain.local_port); - if (rc == 0) + if (rc == 0) { rc = bind_interdomain.local_port; + evtchn_bind_interdom_next_vcpu(rc); + } break; } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 9e35032351a0..c425d03d37d2 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -278,8 +278,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, err = xenbus_transaction_start(&xbt); if (err) return; - if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) < 0) { - pr_err("Unable to read sysrq code in control/sysrq\n"); + err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key); + if (err < 0) { + /* + * The Xenstore watch fires directly after registering it and + * after a suspend/resume cycle. So ENOENT is no error but + * might happen in those cases. + */ + if (err != -ENOENT) + pr_err("Error %d reading sysrq code in control/sysrq\n", + err); xenbus_transaction_end(xbt, 1); return; } diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 84106f9c456c..9d314bba7c4e 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c @@ -50,6 +50,35 @@ static int __init xen_sysfs_type_init(void) return sysfs_create_file(hypervisor_kobj, &type_attr.attr); } +static ssize_t guest_type_show(struct hyp_sysfs_attr *attr, char *buffer) +{ + const char *type; + + switch (xen_domain_type) { + case XEN_NATIVE: + /* ARM only. */ + type = "Xen"; + break; + case XEN_PV_DOMAIN: + type = "PV"; + break; + case XEN_HVM_DOMAIN: + type = xen_pvh_domain() ? "PVH" : "HVM"; + break; + default: + return -EINVAL; + } + + return sprintf(buffer, "%s\n", type); +} + +HYPERVISOR_ATTR_RO(guest_type); + +static int __init xen_sysfs_guest_type_init(void) +{ + return sysfs_create_file(hypervisor_kobj, &guest_type_attr.attr); +} + /* xen version attributes */ static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer) { @@ -327,12 +356,40 @@ static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer) HYPERVISOR_ATTR_RO(features); +static ssize_t buildid_show(struct hyp_sysfs_attr *attr, char *buffer) +{ + ssize_t ret; + struct xen_build_id *buildid; + + ret = HYPERVISOR_xen_version(XENVER_build_id, NULL); + if (ret < 0) { + if (ret == -EPERM) + ret = sprintf(buffer, "<denied>"); + return ret; + } + + buildid = kmalloc(sizeof(*buildid) + ret, GFP_KERNEL); + if (!buildid) + return -ENOMEM; + + buildid->len = ret; + ret = HYPERVISOR_xen_version(XENVER_build_id, buildid); + if (ret > 0) + ret = sprintf(buffer, "%s", buildid->buf); + kfree(buildid); + + return ret; +} + +HYPERVISOR_ATTR_RO(buildid); + static struct attribute *xen_properties_attrs[] = { &capabilities_attr.attr, &changeset_attr.attr, &virtual_start_attr.attr, &pagesize_attr.attr, &features_attr.attr, + &buildid_attr.attr, NULL }; @@ -471,6 +528,9 @@ static int __init hyper_sysfs_init(void) ret = xen_sysfs_type_init(); if (ret) goto out; + ret = xen_sysfs_guest_type_init(); + if (ret) + goto guest_type_out; ret = xen_sysfs_version_init(); if (ret) goto version_out; @@ -502,6 +562,8 @@ uuid_out: comp_out: sysfs_remove_group(hypervisor_kobj, &version_group); version_out: + sysfs_remove_file(hypervisor_kobj, &guest_type_attr.attr); +guest_type_out: sysfs_remove_file(hypervisor_kobj, &type_attr.attr); out: return ret; diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 856ada5d39c9..5b081a01779d 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -299,17 +299,7 @@ static int process_msg(void) mutex_lock(&xb_write_mutex); list_for_each_entry(req, &xs_reply_list, list) { if (req->msg.req_id == state.msg.req_id) { - if (req->state == xb_req_state_wait_reply) { - req->msg.type = state.msg.type; - req->msg.len = state.msg.len; - req->body = state.body; - req->state = xb_req_state_got_reply; - list_del(&req->list); - req->cb(req); - } else { - list_del(&req->list); - kfree(req); - } + list_del(&req->list); err = 0; break; } @@ -317,6 +307,15 @@ static int process_msg(void) mutex_unlock(&xb_write_mutex); if (err) goto out; + + if (req->state == xb_req_state_wait_reply) { + req->msg.type = state.msg.type; + req->msg.len = state.msg.len; + req->body = state.body; + req->state = xb_req_state_got_reply; + req->cb(req); + } else + kfree(req); } mutex_unlock(&xs_response_mutex); |