[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <DM5PR03MB2490D6F29FECAE2D8813DBC9A0F30@DM5PR03MB2490.namprd03.prod.outlook.com>
Date: Fri, 16 Sep 2016 14:13:26 +0000
From: KY Srinivasan <kys@...rosoft.com>
To: KY Srinivasan <kys@...rosoft.com>,
"gregkh@...uxfoundation.org" <gregkh@...uxfoundation.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"devel@...uxdriverproject.org" <devel@...uxdriverproject.org>,
"olaf@...fle.de" <olaf@...fle.de>,
"apw@...onical.com" <apw@...onical.com>,
"vkuznets@...hat.com" <vkuznets@...hat.com>,
"jasowang@...hat.com" <jasowang@...hat.com>,
"leann.ogasawara@...onical.com" <leann.ogasawara@...onical.com>
CC: Vivek Yadav <vyadav@...rosoft.com>
Subject: RE: [PATCH 1/1] Drivers: hv: hv_util: Avoid dynamic allocation in
time synch
> -----Original Message-----
> From: kys@...hange.microsoft.com [mailto:kys@...hange.microsoft.com]
> Sent: Friday, September 16, 2016 9:31 PM
> To: gregkh@...uxfoundation.org; linux-kernel@...r.kernel.org;
> devel@...uxdriverproject.org; olaf@...fle.de; apw@...onical.com;
> vkuznets@...hat.com; jasowang@...hat.com;
> leann.ogasawara@...onical.com
> Cc: Vivek Yadav <vyadav@...rosoft.com>; KY Srinivasan
> <kys@...rosoft.com>
> Subject: [PATCH 1/1] Drivers: hv: hv_util: Avoid dynamic allocation in time
> synch
>
> From: Vivek yadav <vyadav@...rosoft.com>
>
> Under stress, we have seen allocation failure in time synch code. Avoid
> this dynamic allocation.
>
> Signed-off-by: Vivek Yadav <vyadav@...rosoft.com>
> Signed-off-by: K. Y. Srinivasan <kys@...rosoft.com>
Greg,
Please ignore this patch, It is already committed and got sent because of a glitch.
Regards,
K. Y
> ---
> drivers/hv/hv_util.c | 39 ++++++++++++++++++++++++++++-----------
> 1 files changed, 28 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
> index 6286bdc..4aa3cb6 100644
> --- a/drivers/hv/hv_util.c
> +++ b/drivers/hv/hv_util.c
> @@ -64,9 +64,14 @@ static struct hv_util_service util_shutdown = {
> .util_cb = shutdown_onchannelcallback,
> };
>
> +static int hv_timesync_init(struct hv_util_service *srv);
> +static void hv_timesync_deinit(void);
> +
> static void timesync_onchannelcallback(void *context);
> static struct hv_util_service util_timesynch = {
> .util_cb = timesync_onchannelcallback,
> + .util_init = hv_timesync_init,
> + .util_deinit = hv_timesync_deinit,
> };
>
> static void heartbeat_onchannelcallback(void *context);
> @@ -201,7 +206,6 @@ static void hv_set_host_time(struct work_struct
> *work)
> host_ts = ns_to_timespec(host_tns);
>
> do_settimeofday(&host_ts);
> - kfree(wrk);
> }
>
> /*
> @@ -217,22 +221,24 @@ static void hv_set_host_time(struct work_struct
> *work)
> * typically used as a hint to the guest. The guest is under no obligation
> * to discipline the clock.
> */
> +static struct adj_time_work wrk;
> static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 flags)
> {
> - struct adj_time_work *wrk;
>
> - wrk = kmalloc(sizeof(struct adj_time_work), GFP_ATOMIC);
> - if (wrk == NULL)
> + /*
> + * This check is safe since we are executing in the
> + * interrupt context and time synch messages arre always
> + * delivered on the same CPU.
> + */
> + if (work_pending(&wrk.work))
> return;
>
> - wrk->host_time = hosttime;
> - wrk->ref_time = reftime;
> - wrk->flags = flags;
> + wrk.host_time = hosttime;
> + wrk.ref_time = reftime;
> + wrk.flags = flags;
> if ((flags & (ICTIMESYNCFLAG_SYNC | ICTIMESYNCFLAG_SAMPLE)) !=
> 0) {
> - INIT_WORK(&wrk->work, hv_set_host_time);
> - schedule_work(&wrk->work);
> - } else
> - kfree(wrk);
> + schedule_work(&wrk.work);
> + }
> }
>
> /*
> @@ -457,6 +463,17 @@ static struct hv_driver util_drv = {
> .remove = util_remove,
> };
>
> +static int hv_timesync_init(struct hv_util_service *srv)
> +{
> + INIT_WORK(&wrk.work, hv_set_host_time);
> + return 0;
> +}
> +
> +static void hv_timesync_deinit(void)
> +{
> + cancel_work_sync(&wrk.work);
> +}
> +
> static int __init init_hyperv_utils(void)
> {
> pr_info("Registering HyperV Utility Driver\n");
> --
> 1.7.4.1
Powered by blists - more mailing lists