[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201103203239.026010296@linuxfoundation.org>
Date: Tue, 3 Nov 2020 21:35:41 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Juergen Gross <jgross@...e.com>,
Jan Beulich <jbeulich@...e.com>,
Stefano Stabellini <sstabellini@...nel.org>,
Wei Liu <wl@....org>
Subject: [PATCH 4.19 049/191] xen/events: block rogue events for some time
From: Juergen Gross <jgross@...e.com>
commit 5f7f77400ab5b357b5fdb7122c3442239672186c upstream.
In order to avoid high dom0 load due to rogue guests sending events at
high frequency, block those events in case there was no action needed
in dom0 to handle the events.
This is done by adding a per-event counter, which set to zero in case
an EOI without the XEN_EOI_FLAG_SPURIOUS is received from a backend
driver, and incremented when this flag has been set. In case the
counter is 2 or higher delay the EOI by 1 << (cnt - 2) jiffies, but
not more than 1 second.
In order not to waste memory shorten the per-event refcnt to two bytes
(it should normally never exceed a value of 2). Add an overflow check
to evtchn_get() to make sure the 2 bytes really won't overflow.
This is part of XSA-332.
Cc: stable@...r.kernel.org
Signed-off-by: Juergen Gross <jgross@...e.com>
Reviewed-by: Jan Beulich <jbeulich@...e.com>
Reviewed-by: Stefano Stabellini <sstabellini@...nel.org>
Reviewed-by: Wei Liu <wl@....org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/xen/events/events_base.c | 27 ++++++++++++++++++++++-----
drivers/xen/events/events_internal.h | 3 ++-
2 files changed, 24 insertions(+), 6 deletions(-)
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -459,17 +459,34 @@ static void lateeoi_list_add(struct irq_
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
}
-static void xen_irq_lateeoi_locked(struct irq_info *info)
+static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
{
evtchn_port_t evtchn;
unsigned int cpu;
+ unsigned int delay = 0;
evtchn = info->evtchn;
if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
return;
+ if (spurious) {
+ if ((1 << info->spurious_cnt) < (HZ << 2))
+ info->spurious_cnt++;
+ if (info->spurious_cnt > 1) {
+ delay = 1 << (info->spurious_cnt - 2);
+ if (delay > HZ)
+ delay = HZ;
+ if (!info->eoi_time)
+ info->eoi_cpu = smp_processor_id();
+ info->eoi_time = get_jiffies_64() + delay;
+ }
+ } else {
+ info->spurious_cnt = 0;
+ }
+
cpu = info->eoi_cpu;
- if (info->eoi_time && info->irq_epoch == per_cpu(irq_epoch, cpu)) {
+ if (info->eoi_time &&
+ (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
lateeoi_list_add(info);
return;
}
@@ -506,7 +523,7 @@ static void xen_irq_lateeoi_worker(struc
info->eoi_time = 0;
- xen_irq_lateeoi_locked(info);
+ xen_irq_lateeoi_locked(info, false);
}
if (info)
@@ -535,7 +552,7 @@ void xen_irq_lateeoi(unsigned int irq, u
info = info_for_irq(irq);
if (info)
- xen_irq_lateeoi_locked(info);
+ xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
read_unlock_irqrestore(&evtchn_rwlock, flags);
}
@@ -1438,7 +1455,7 @@ int evtchn_get(unsigned int evtchn)
goto done;
err = -EINVAL;
- if (info->refcnt <= 0)
+ if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
goto done;
info->refcnt++;
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -33,7 +33,8 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
- int refcnt;
+ short refcnt;
+ short spurious_cnt;
enum xen_irq_type type; /* type */
unsigned irq;
unsigned int evtchn; /* event channel */
Powered by blists - more mailing lists