[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1453252038-31915-45-git-send-email-kamal@canonical.com>
Date: Tue, 19 Jan 2016 17:05:22 -0800
From: Kamal Mostafa <kamal@...onical.com>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org,
kernel-team@...ts.ubuntu.com
Cc: Ross Lagerwall <ross.lagerwall@...rix.com>,
David Vrabel <david.vrabel@...rix.com>,
Kamal Mostafa <kamal@...onical.com>
Subject: [PATCH 3.19.y-ckt 044/160] xen/events/fifo: Consume unprocessed events when a CPU dies
3.19.8-ckt13 -stable review patch. If anyone has any objections, please let me know.
---8<------------------------------------------------------------
From: Ross Lagerwall <ross.lagerwall@...rix.com>
commit 3de88d622fd68bd4dbee0f80168218b23f798fd0 upstream.
When a CPU is offlined, there may be unprocessed events on a port for
that CPU. If the port is subsequently reused on a different CPU, it
could be in an unexpected state with the link bit set, resulting in
interrupts being missed. Fix this by consuming any unprocessed events
for a particular CPU when that CPU dies.
Signed-off-by: Ross Lagerwall <ross.lagerwall@...rix.com>
Signed-off-by: David Vrabel <david.vrabel@...rix.com>
Signed-off-by: Kamal Mostafa <kamal@...onical.com>
---
drivers/xen/events/events_fifo.c | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 417415d..1dd0ba12 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, unsigned long *ready)
+ unsigned priority, unsigned long *ready,
+ bool drop)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
if (head == 0)
clear_bit(priority, ready);
- if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
- handle_irq_for_port(port);
+ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
+ if (unlikely(drop))
+ pr_warn("Dropping pending event for port %u\n", port);
+ else
+ handle_irq_for_port(port);
+ }
q->head[priority] = head;
}
-static void evtchn_fifo_handle_events(unsigned cpu)
+static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
- consume_one_event(cpu, control_block, q, &ready);
+ consume_one_event(cpu, control_block, q, &ready, drop);
ready |= xchg(&control_block->ready, 0);
}
}
+static void evtchn_fifo_handle_events(unsigned cpu)
+{
+ __evtchn_fifo_handle_events(cpu, false);
+}
+
static void evtchn_fifo_resume(void)
{
unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
if (!per_cpu(cpu_control_block, cpu))
ret = evtchn_fifo_alloc_control_block(cpu);
break;
+ case CPU_DEAD:
+ __evtchn_fifo_handle_events(cpu, true);
+ break;
default:
break;
}
--
1.9.1
Powered by blists - more mailing lists