[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20141028033345.583682494@linuxfoundation.org>
Date: Tue, 28 Oct 2014 11:33:12 +0800
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, "K. Y. Srinivasan" <kys@...rosoft.com>,
Sitsofe Wheeler <sitsofe@...oo.com>
Subject: [PATCH 3.17 050/146] Drivers: hv: vmbus: Cleanup hv_post_message()
3.17-stable review patch. If anyone has any objections, please let me know.
------------------
From: "K. Y. Srinivasan" <kys@...rosoft.com>
commit b29ef3546aecb253a5552b198cef23750d56e1e4 upstream.
Minimize failures in this function by pre-allocating the buffer
for posting messages. The hypercall for posting the message can fail
for a number of reasons:
1. Transient resource related issues
2. Buffer alignment
3. Buffer cannot span a page boundry
We address issues 2 and 3 by preallocating a per-cpu page for the buffer.
Transient resource related failures are handled by retrying by the callers
of this function.
This patch is based on the investigation
done by Dexuan Cui <decui@...rosoft.com>.
I would like to thank Sitsofe Wheeler <sitsofe@...oo.com>
for reporting the issue and helping in debuggging.
Signed-off-by: K. Y. Srinivasan <kys@...rosoft.com>
Reported-by: Sitsofe Wheeler <sitsofe@...oo.com>
Tested-by: Sitsofe Wheeler <sitsofe@...oo.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/hv/hv.c | 27 +++++++++++++++------------
drivers/hv/hyperv_vmbus.h | 4 ++++
2 files changed, 19 insertions(+), 12 deletions(-)
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -138,6 +138,8 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.post_msg_page, 0,
+ sizeof(void *) * NR_CPUS);
memset(hv_context.vp_index, 0,
sizeof(int) * NR_CPUS);
memset(hv_context.event_dpc, 0,
@@ -217,26 +219,18 @@ int hv_post_message(union hv_connection_
enum hv_message_type message_type,
void *payload, size_t payload_size)
{
- struct aligned_input {
- u64 alignment8;
- struct hv_input_post_message msg;
- };
struct hv_input_post_message *aligned_msg;
u16 status;
- unsigned long addr;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -EMSGSIZE;
- addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
- if (!addr)
- return -ENOMEM;
-
aligned_msg = (struct hv_input_post_message *)
- (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
+ hv_context.post_msg_page[get_cpu()];
aligned_msg->connectionid = connection_id;
+ aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
@@ -244,8 +238,7 @@ int hv_post_message(union hv_connection_
status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
& 0xFFFF;
- kfree((void *)addr);
-
+ put_cpu();
return status;
}
@@ -294,6 +287,14 @@ int hv_synic_alloc(void)
pr_err("Unable to allocate SYNIC event page\n");
goto err;
}
+
+ hv_context.post_msg_page[cpu] =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+
+ if (hv_context.post_msg_page[cpu] == NULL) {
+ pr_err("Unable to allocate post msg page\n");
+ goto err;
+ }
}
return 0;
@@ -308,6 +309,8 @@ static void hv_synic_free_cpu(int cpu)
free_page((unsigned long)hv_context.synic_event_page[cpu]);
if (hv_context.synic_message_page[cpu])
free_page((unsigned long)hv_context.synic_message_page[cpu]);
+ if (hv_context.post_msg_page[cpu])
+ free_page((unsigned long)hv_context.post_msg_page[cpu]);
}
void hv_synic_free(void)
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -515,6 +515,10 @@ struct hv_context {
* per-cpu list of the channels based on their CPU affinity.
*/
struct list_head percpu_list[NR_CPUS];
+ /*
+ * buffer to post messages to the host.
+ */
+ void *post_msg_page[NR_CPUS];
};
extern struct hv_context hv_context;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists