[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87r3d1drt4.fsf@vitty.brq.redhat.com>
Date: Tue, 17 May 2016 10:14:47 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: Dexuan Cui <decui@...rosoft.com>
Cc: gregkh@...uxfoundation.org, linux-kernel@...r.kernel.org,
driverdev-devel@...uxdriverproject.org, olaf@...fle.de,
apw@...onical.com, jasowang@...hat.com, kys@...rosoft.com,
haiyangz@...rosoft.com
Subject: Re: [PATCH] Drivers: hv: vmbus: fix the race when querying & updating the percpu list
Dexuan Cui <decui@...rosoft.com> writes:
> There is a rare race when we remove an entry from the global list
> hv_context.percpu_list[cpu] in hv_process_channel_removal() ->
> percpu_channel_deq() -> list_del(): at this time, if vmbus_on_event() ->
> process_chn_event() -> pcpu_relid2channel() is trying to query the list,
> we can get the general protection fault:
>
> general protection fault: 0000 [#1] SMP
> ...
> RIP: 0010:[<ffffffff81461b6b>] [<ffffffff81461b6b>] vmbus_on_event+0xc4/0x149
>
> Similarly, we also have the issue in the code path: vmbus_process_offer() ->
> percpu_channel_enq().
>
> We can resolve the issue by disabling the tasklet when updating the list.
>
> Reported-by: Rolf Neugebauer <rolf.neugebauer@...ker.com>
> Signed-off-by: Dexuan Cui <decui@...rosoft.com>
> ---
> drivers/hv/channel.c | 3 +++
> drivers/hv/channel_mgmt.c | 20 +++++++++-----------
> include/linux/hyperv.h | 3 +++
> 3 files changed, 15 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
> index 56dd261..7811cf9 100644
> --- a/drivers/hv/channel.c
> +++ b/drivers/hv/channel.c
> @@ -546,8 +546,11 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
> put_cpu();
> smp_call_function_single(channel->target_cpu, reset_channel_cb,
> channel, true);
> + smp_call_function_single(channel->target_cpu,
> + percpu_channel_deq, channel, true);
> } else {
> reset_channel_cb(channel);
> + percpu_channel_deq(channel);
> put_cpu();
> }
>
> diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
> index 38b682ba..c4b5c07 100644
> --- a/drivers/hv/channel_mgmt.c
> +++ b/drivers/hv/channel_mgmt.c
> @@ -21,6 +21,7 @@
> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>
> #include <linux/kernel.h>
> +#include <linux/interrupt.h>
> #include <linux/sched.h>
> #include <linux/wait.h>
> #include <linux/mm.h>
> @@ -277,7 +278,7 @@ static void free_channel(struct vmbus_channel *channel)
> kfree(channel);
> }
>
> -static void percpu_channel_enq(void *arg)
> +void percpu_channel_enq(void *arg)
> {
> struct vmbus_channel *channel = arg;
> int cpu = smp_processor_id();
> @@ -285,7 +286,7 @@ static void percpu_channel_enq(void *arg)
> list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
> }
>
> -static void percpu_channel_deq(void *arg)
> +void percpu_channel_deq(void *arg)
> {
> struct vmbus_channel *channel = arg;
>
> @@ -313,15 +314,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
> BUG_ON(!channel->rescind);
> BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
>
> - if (channel->target_cpu != get_cpu()) {
> - put_cpu();
> - smp_call_function_single(channel->target_cpu,
> - percpu_channel_deq, channel, true);
> - } else {
> - percpu_channel_deq(channel);
> - put_cpu();
> - }
> -
> if (channel->primary_channel == NULL) {
> list_del(&channel->listentry);
>
> @@ -363,6 +355,7 @@ void vmbus_free_channels(void)
> */
> static void vmbus_process_offer(struct vmbus_channel *newchannel)
> {
> + struct tasklet_struct *tasklet;
> struct vmbus_channel *channel;
> bool fnew = true;
> unsigned long flags;
> @@ -409,6 +402,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
>
> init_vp_index(newchannel, dev_type);
>
> + tasklet = hv_context.event_dpc[newchannel->target_cpu];
> + tasklet_disable(tasklet);
> if (newchannel->target_cpu != get_cpu()) {
> put_cpu();
> smp_call_function_single(newchannel->target_cpu,
> @@ -418,6 +413,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
> percpu_channel_enq(newchannel);
> put_cpu();
> }
> + tasklet_enable(tasklet);
Do we need to do tasklet_schedule() to make sure there are no events
pending? This is probably not a big issue as some other event will
trigger scheduling but in some corner cases it may bite. Same question
applies to the code below and to vmbus_close_internal().
>
> /*
> * This state is used to indicate a successful open
> @@ -469,6 +465,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
> list_del(&newchannel->listentry);
> mutex_unlock(&vmbus_connection.channel_mutex);
>
> + tasklet_disable(tasklet);
> if (newchannel->target_cpu != get_cpu()) {
> put_cpu();
> smp_call_function_single(newchannel->target_cpu,
> @@ -477,6 +474,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
> percpu_channel_deq(newchannel);
> put_cpu();
> }
> + tasklet_enable(tasklet);
>
> err_free_chan:
> free_channel(newchannel);
> diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
> index 7be7237..95aea09 100644
> --- a/include/linux/hyperv.h
> +++ b/include/linux/hyperv.h
> @@ -1328,6 +1328,9 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
> struct icmsg_negotiate *, u8 *, int,
> int);
>
> +void percpu_channel_enq(void *arg);
> +void percpu_channel_deq(void *arg);
> +
> void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
>
> /*
--
Vitaly
Powered by blists - more mailing lists