summaryrefslogtreecommitdiffstats
path: root/drivers/hv/channel_mgmt.c
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2016-04-30 19:21:34 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-05-01 09:23:14 -0700
commitcd95aad5579371ac332507fc946008217fc37e6c (patch)
tree3a8558709e95ae6250da08bc67d20c210af57dc0 /drivers/hv/channel_mgmt.c
parent4dbfc2e68004c60edab7e8fd26784383dd3ee9bc (diff)
downloadlinux-cd95aad5579371ac332507fc946008217fc37e6c.tar.bz2
Drivers: hv: vmbus: handle various crash scenarios
Kdump keeps biting. Turns out CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was used for initial contact or to CPU0 depending on host version. vmbus_wait_for_unload() doesn't account for the fact that in case we're crashing on some other CPU we won't get the CHANNELMSG_UNLOAD_RESPONSE message and our wait on the current CPU will never end. Do the following: 1) Check for completion_done() in the loop. In case interrupt handler is still alive we'll get the confirmation we need. 2) Read message pages for all CPUs message page as we're unsure where CHANNELMSG_UNLOAD_RESPONSE is going to be delivered to. We can race with still-alive interrupt handler doing the same, add cmpxchg() to vmbus_signal_eom() to not lose CHANNELMSG_UNLOAD_RESPONSE message. 3) Cleanup message pages on all CPUs. This is required (at least for the current CPU as we're clearing CPU0 messages now but we may want to bring up additional CPUs on crash) as new messages won't be delivered till we consume what's pending. On boot we'll place message pages somewhere else and we won't be able to read stale messages. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/channel_mgmt.c')
-rw-r--r--drivers/hv/channel_mgmt.c58
1 files changed, 43 insertions, 15 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 38b682bab85a..b6c1211b4df7 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -597,27 +597,55 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
static void vmbus_wait_for_unload(void)
{
- int cpu = smp_processor_id();
- void *page_addr = hv_context.synic_message_page[cpu];
- struct hv_message *msg = (struct hv_message *)page_addr +
- VMBUS_MESSAGE_SINT;
+ int cpu;
+ void *page_addr;
+ struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
- bool unloaded = false;
+ u32 message_type;
+ /*
+ * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
+ * used for initial contact or to CPU0 depending on host version. When
+ * we're crashing on a different CPU let's hope that IRQ handler on
+ * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
+ * functional and vmbus_unload_response() will complete
+ * vmbus_connection.unload_event. If not, the last thing we can do is
+ * read message pages for all CPUs directly.
+ */
while (1) {
- if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) {
- mdelay(10);
- continue;
- }
+ if (completion_done(&vmbus_connection.unload_event))
+ break;
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
- if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
- unloaded = true;
+ for_each_online_cpu(cpu) {
+ page_addr = hv_context.synic_message_page[cpu];
+ msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
- vmbus_signal_eom(msg);
+ message_type = READ_ONCE(msg->header.message_type);
+ if (message_type == HVMSG_NONE)
+ continue;
- if (unloaded)
- break;
+ hdr = (struct vmbus_channel_message_header *)
+ msg->u.payload;
+
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ complete(&vmbus_connection.unload_event);
+
+ vmbus_signal_eom(msg, message_type);
+ }
+
+ mdelay(10);
+ }
+
+ /*
+ * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
+ * maybe-pending messages on all CPUs to be able to receive new
+ * messages after we reconnect.
+ */
+ for_each_online_cpu(cpu) {
+ page_addr = hv_context.synic_message_page[cpu];
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+ msg->header.message_type = HVMSG_NONE;
}
}