From 2ce5786fd6f29ec09ad653e30e089042ea62b309 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 15 Dec 2020 10:37:11 +0100 Subject: [PATCH] xen/events: don't unmask an event channel when an eoi is pending An event channel should be kept masked when an eoi is pending for it. When being migrated to another cpu it might be unmasked, though. In order to avoid this keep two different flags for each event channel to be able to distinguish "normal" masking/unmasking from eoi related masking/unmasking. The event channel should only be able to generate an interrupt if both flags are cleared. Cc: stable@vger.kernel.org Fixes: 54c9de89895e0a36047 ("xen/events: add a new late EOI evtchn framework") Reported-by: Julien Grall Signed-off-by: Juergen Gross --- drivers/xen/events/events_base.c | 64 +++++++++++++++++++++++++++----- 1 file changed, 54 insertions(+), 10 deletions(-) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 6038c4c35db5..b024200f1677 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -96,7 +96,9 @@ struct irq_info { struct list_head eoi_list; short refcnt; short spurious_cnt; - enum xen_irq_type type; /* type */ + short type; /* type: IRQT_* */ + bool masked; /* Is event explicitly masked? */ + bool eoi_pending; /* Is EOI pending? */ unsigned irq; evtchn_port_t evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ @@ -272,6 +274,8 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; + info->masked = true; + info->eoi_pending = false; ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -545,7 +549,10 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) } info->eoi_time = 0; - unmask_evtchn(evtchn); + info->eoi_pending = false; + + if (!info->masked) + unmask_evtchn(evtchn); } static void xen_irq_lateeoi_worker(struct work_struct *work) @@ -801,7 +808,11 @@ static unsigned int __startup_pirq(unsigned int irq) goto err; out: - unmask_evtchn(evtchn); + info->masked = false; + + if (!info->eoi_pending) + unmask_evtchn(evtchn); + eoi_pirq(irq_get_irq_data(irq)); return 0; @@ -828,6 +839,7 @@ static void shutdown_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; + info->masked = true; mask_evtchn(evtchn); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); @@ -1713,18 +1725,26 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); static void enable_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; - if (VALID_EVTCHN(evtchn)) - unmask_evtchn(evtchn); + if (VALID_EVTCHN(evtchn)) { + info->masked = false; + + if (!info->eoi_pending) + unmask_evtchn(evtchn); + } } static void disable_dynirq(struct irq_data *data) { - evtchn_port_t evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; - if (VALID_EVTCHN(evtchn)) + if (VALID_EVTCHN(evtchn)) { + info->masked = true; mask_evtchn(evtchn); + } } static void ack_dynirq(struct irq_data *data) @@ -1754,6 +1774,30 @@ static void mask_ack_dynirq(struct irq_data *data) ack_dynirq(data); } +static void lateeoi_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + info->masked = false; + info->eoi_pending = true; + mask_evtchn(evtchn); + } +} + +static void lateeoi_mask_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + info->masked = true; + info->eoi_pending = true; + mask_evtchn(evtchn); + } +} + static int retrigger_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); @@ -1973,8 +2017,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = { .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, - .irq_ack = mask_ack_dynirq, - .irq_mask_ack = mask_ack_dynirq, + .irq_ack = lateeoi_ack_dynirq, + .irq_mask_ack = lateeoi_mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, -- 2.26.2