[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20161220155602.6298-6-rkagan@virtuozzo.com>
Date: Tue, 20 Dec 2016 18:55:52 +0300
From: Roman Kagan <rkagan@...tuozzo.com>
To: Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>
CC: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, <x86@...nel.org>,
Haiyang Zhang <haiyangz@...rosoft.com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <devel@...uxdriverproject.org>,
"Denis V . Lunev" <den@...nvz.org>,
Roman Kagan <rkagan@...tuozzo.com>
Subject: [PATCH 05/15] hyperv: GFP_ATOMIC -> GFP_KERNEL
There's no need in GFP_ATOMIC when initializing the driver state.
While at this, also rely on free_page() to take null argument.
Signed-off-by: Roman Kagan <rkagan@...tuozzo.com>
---
drivers/hv/hv.c | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a7256ec..6bbc0b09 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -393,28 +393,28 @@ int hv_synic_alloc(void)
int cpu;
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (hv_context.hv_numa_map == NULL) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
for_each_online_cpu(cpu) {
- hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+ hv_context.event_dpc[cpu] = kmalloc(size, GFP_KERNEL);
if (hv_context.event_dpc[cpu] == NULL) {
pr_err("Unable to allocate event dpc\n");
goto err;
}
tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
- hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+ hv_context.msg_dpc[cpu] = kmalloc(size, GFP_KERNEL);
if (hv_context.msg_dpc[cpu] == NULL) {
pr_err("Unable to allocate event dpc\n");
goto err;
}
tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
- hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
+ hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_KERNEL);
if (hv_context.clk_evt[cpu] == NULL) {
pr_err("Unable to allocate clock event device\n");
goto err;
@@ -423,7 +423,7 @@ int hv_synic_alloc(void)
hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
hv_context.synic_message_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
+ (void *)get_zeroed_page(GFP_KERNEL);
if (hv_context.synic_message_page[cpu] == NULL) {
pr_err("Unable to allocate SYNIC message page\n");
@@ -431,7 +431,7 @@ int hv_synic_alloc(void)
}
hv_context.synic_event_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
+ (void *)get_zeroed_page(GFP_KERNEL);
if (hv_context.synic_event_page[cpu] == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
@@ -439,7 +439,7 @@ int hv_synic_alloc(void)
}
hv_context.post_msg_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
+ (void *)get_zeroed_page(GFP_KERNEL);
if (hv_context.post_msg_page[cpu] == NULL) {
pr_err("Unable to allocate post msg page\n");
@@ -457,12 +457,9 @@ static void hv_synic_free_cpu(int cpu)
kfree(hv_context.event_dpc[cpu]);
kfree(hv_context.msg_dpc[cpu]);
kfree(hv_context.clk_evt[cpu]);
- if (hv_context.synic_event_page[cpu])
- free_page((unsigned long)hv_context.synic_event_page[cpu]);
- if (hv_context.synic_message_page[cpu])
- free_page((unsigned long)hv_context.synic_message_page[cpu]);
- if (hv_context.post_msg_page[cpu])
- free_page((unsigned long)hv_context.post_msg_page[cpu]);
+ free_page((unsigned long)hv_context.synic_event_page[cpu]);
+ free_page((unsigned long)hv_context.synic_message_page[cpu]);
+ free_page((unsigned long)hv_context.post_msg_page[cpu]);
}
void hv_synic_free(void)
--
2.9.3
Powered by blists - more mailing lists