[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1317930823.3457.24.camel@edumazet-laptop>
Date: Thu, 06 Oct 2011 21:53:43 +0200
From: Eric Dumazet <eric.dumazet@...il.com>
To: "H.K. Jerry Chu" <hkchu@...gle.com>
Cc: davem@...emloft.net, netdev@...r.kernel.org
Subject: Re: [PATCH] Break up the single NBD lock into one per NBD device
Le lundi 26 septembre 2011 à 16:34 -0700, H.K. Jerry Chu a écrit :
> From: Jerry Chu <hkchu@...gle.com>
>
> This patch breaks up the single NBD lock into one per
> disk. The single NBD lock has become a serious performance
> bottleneck when multiple NBD disks are being used.
>
> The original comment on why a single lock may be ok no
> longer holds for today's much faster NICs.
>
> Signed-off-by: H.K. Jerry Chu <hkchu@...gle.com>
> ---
> drivers/block/nbd.c | 22 +++++++++-------------
> 1 files changed, 9 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
> index f533f33..355e15c 100644
> --- a/drivers/block/nbd.c
> +++ b/drivers/block/nbd.c
> @@ -58,20 +58,9 @@ static unsigned int debugflags;
>
> static unsigned int nbds_max = 16;
> static struct nbd_device *nbd_dev;
> +static spinlock_t *nbd_locks;
static spinlock_t *nbd_locks __read_mostly;
> static int max_part;
>
> -/*
> - * Use just one lock (or at most 1 per NIC). Two arguments for this:
> - * 1. Each NIC is essentially a synchronization point for all servers
> - * accessed through that NIC so there's no need to have more locks
> - * than NICs anyway.
> - * 2. More locks lead to more "Dirty cache line bouncing" which will slow
> - * down each lock to the point where they're actually slower than just
> - * a single lock.
> - * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
> - */
> -static DEFINE_SPINLOCK(nbd_lock);
> -
> #ifndef NDEBUG
> static const char *ioctl_cmd_to_ascii(int cmd)
> {
> @@ -753,6 +742,12 @@ static int __init nbd_init(void)
> if (!nbd_dev)
> return -ENOMEM;
>
> + nbd_locks = kcalloc(nbds_max, sizeof(*nbd_locks), GFP_KERNEL);
> + if (!nbd_locks) {
> + kfree(nbd_dev);
> + return -ENOMEM;
> + }
> +
Please add loop to init spinlocks to help LOCKDEP...
for (i = 0; i < nbds_max; i++)
spin_lock_init(&nbd_locks[i]);
> part_shift = 0;
> if (max_part > 0) {
> part_shift = fls(max_part);
> @@ -784,7 +779,7 @@ static int __init nbd_init(void)
> * every gendisk to have its very own request_queue struct.
> * These structs are big so we dynamically allocate them.
> */
> - disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
> + disk->queue = blk_init_queue(do_nbd_request, &nbd_locks[i]);
> if (!disk->queue) {
> put_disk(disk);
> goto out;
> @@ -832,6 +827,7 @@ out:
> put_disk(nbd_dev[i].disk);
> }
> kfree(nbd_dev);
> + kfree(nbd_locks);
> return err;
> }
>
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists