[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <AANLkTinqG_G2pevoYJyiHiBda1ZZS-53Uqz9WEspbMOy@mail.gmail.com>
Date: Thu, 28 Oct 2010 09:57:38 +0100
From: Stefan Hajnoczi <stefanha@...il.com>
To: Shirley Ma <mashirle@...ibm.com>
Cc: "mst@...hat.com" <mst@...hat.com>,
David Miller <davem@...emloft.net>, netdev@...r.kernel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH 0/1] vhost: Reduce TX used buffer signal for performance
On Wed, Oct 27, 2010 at 10:05 PM, Shirley Ma <mashirle@...ibm.com> wrote:
> This patch changes vhost TX used buffer signal to guest from one by
> one to up to 3/4 of vring size. This change improves vhost TX message
> size from 256 to 8K performance for both bandwidth and CPU utilization
> without inducing any regression.
Any concerns about introducing latency or does the guest not care when
TX completions come in?
> Signed-off-by: Shirley Ma <xma@...ibm.com>
> ---
>
> drivers/vhost/net.c | 19 ++++++++++++++++++-
> drivers/vhost/vhost.c | 31 +++++++++++++++++++++++++++++++
> drivers/vhost/vhost.h | 3 +++
> 3 files changed, 52 insertions(+), 1 deletions(-)
>
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index 4b4da5b..bd1ba71 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -198,7 +198,24 @@ static void handle_tx(struct vhost_net *net)
> if (err != len)
> pr_debug("Truncated TX packet: "
> " len %d != %zd\n", err, len);
> - vhost_add_used_and_signal(&net->dev, vq, head, 0);
> + /*
> + * if no pending buffer size allocate, signal used buffer
> + * one by one, otherwise, signal used buffer when reaching
> + * 3/4 ring size to reduce CPU utilization.
> + */
> + if (unlikely(vq->pend))
> + vhost_add_used_and_signal(&net->dev, vq, head, 0);
> + else {
> + vq->pend[vq->num_pend].id = head;
I don't understand the logic here: if !vq->pend then we assign to
vq->pend[vq->num_pend].
> + vq->pend[vq->num_pend].len = 0;
> + ++vq->num_pend;
> + if (vq->num_pend == (vq->num - (vq->num >> 2))) {
> + vhost_add_used_and_signal_n(&net->dev, vq,
> + vq->pend,
> + vq->num_pend);
> + vq->num_pend = 0;
> + }
> + }
> total_len += len;
> if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
> vhost_poll_queue(&vq->poll);
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 94701ff..47696d2 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -170,6 +170,16 @@ static void vhost_vq_reset(struct vhost_dev *dev,
> vq->call_ctx = NULL;
> vq->call = NULL;
> vq->log_ctx = NULL;
> + /* signal pending used buffers */
> + if (vq->pend) {
> + if (vq->num_pend != 0) {
> + vhost_add_used_and_signal_n(dev, vq, vq->pend,
> + vq->num_pend);
> + vq->num_pend = 0;
> + }
> + kfree(vq->pend);
> + }
> + vq->pend = NULL;
> }
>
> static int vhost_worker(void *data)
> @@ -273,7 +283,13 @@ long vhost_dev_init(struct vhost_dev *dev,
> dev->vqs[i].heads = NULL;
> dev->vqs[i].dev = dev;
> mutex_init(&dev->vqs[i].mutex);
> + dev->vqs[i].num_pend = 0;
> + dev->vqs[i].pend = NULL;
> vhost_vq_reset(dev, dev->vqs + i);
> + /* signal 3/4 of ring size used buffers */
> + dev->vqs[i].pend = kmalloc((dev->vqs[i].num -
> + (dev->vqs[i].num >> 2)) *
> + sizeof *vq->peed, GFP_KERNEL);
Has this patch been compile tested? vq->peed?
Stefan
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists