[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20090623143856.GB4262@redhat.com>
Date: Tue, 23 Jun 2009 10:38:56 -0400
From: Vivek Goyal <vgoyal@...hat.com>
To: Gui Jianfeng <guijianfeng@...fujitsu.com>
Cc: linux-kernel@...r.kernel.org,
containers@...ts.linux-foundation.org, dm-devel@...hat.com,
jens.axboe@...cle.com, nauman@...gle.com, dpshah@...gle.com,
lizf@...fujitsu.com, mikew@...gle.com, fchecconi@...il.com,
paolo.valente@...more.it, ryov@...inux.co.jp,
fernando@....ntt.co.jp, s-uchida@...jp.nec.com, taka@...inux.co.jp,
jmoyer@...hat.com, dhaval@...ux.vnet.ibm.com,
balbir@...ux.vnet.ibm.com, righi.andrea@...il.com,
m-ikeda@...jp.nec.com, jbaron@...hat.com, agk@...hat.com,
snitzer@...hat.com, akpm@...ux-foundation.org, peterz@...radead.org
Subject: Re: [PATCH 07/20] io-controller: Export disk time used and nr
sectors dipatched through cgroups
On Tue, Jun 23, 2009 at 08:10:54PM +0800, Gui Jianfeng wrote:
> Vivek Goyal wrote:
> ...
> > +
> > +static int io_cgroup_disk_sectors_read(struct cgroup *cgroup,
> > + struct cftype *cftype, struct seq_file *m)
> > +{
> > + struct io_cgroup *iocg;
> > + struct io_group *iog;
> > + struct hlist_node *n;
> > +
> > + if (!cgroup_lock_live_group(cgroup))
> > + return -ENODEV;
> > +
> > + iocg = cgroup_to_io_cgroup(cgroup);
> > +
> > + spin_lock_irq(&iocg->lock);
>
> It's better to make use of rcu_read_lock instead since it's
> a read action.
>
Thanks Gui. Queued for next posting.
Vivek
> Signed-off-by: Gui Jianfeng <guijianfeng@...fujitsu.com>
> ---
> block/elevator-fq.c | 12 ++++++------
> 1 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/block/elevator-fq.c b/block/elevator-fq.c
> index 2ad40eb..d779282 100644
> --- a/block/elevator-fq.c
> +++ b/block/elevator-fq.c
> @@ -1418,7 +1418,7 @@ static int io_cgroup_disk_time_read(struct cgroup *cgroup,
>
> iocg = cgroup_to_io_cgroup(cgroup);
>
> - spin_lock_irq(&iocg->lock);
> + rcu_read_lock();
> hlist_for_each_entry_rcu(iog, n, &iocg->group_data, group_node) {
> /*
> * There might be groups which are not functional and
> @@ -1430,7 +1430,7 @@ static int io_cgroup_disk_time_read(struct cgroup *cgroup,
> iog->entity.total_service);
> }
> }
> - spin_unlock_irq(&iocg->lock);
> + rcu_read_unlock();
> cgroup_unlock();
>
> return 0;
> @@ -1448,7 +1448,7 @@ static int io_cgroup_disk_sectors_read(struct cgroup *cgroup,
>
> iocg = cgroup_to_io_cgroup(cgroup);
>
> - spin_lock_irq(&iocg->lock);
> + rcu_read_lock();
> hlist_for_each_entry_rcu(iog, n, &iocg->group_data, group_node) {
> /*
> * There might be groups which are not functional and
> @@ -1460,7 +1460,7 @@ static int io_cgroup_disk_sectors_read(struct cgroup *cgroup,
> iog->entity.total_sector_service);
> }
> }
> - spin_unlock_irq(&iocg->lock);
> + rcu_read_unlock();
> cgroup_unlock();
>
> return 0;
> @@ -1478,7 +1478,7 @@ static int io_cgroup_disk_queue_read(struct cgroup *cgroup,
> return -ENODEV;
>
> iocg = cgroup_to_io_cgroup(cgroup);
> - spin_lock_irq(&iocg->lock);
> + rcu_read_lock();
> /* Loop through all the io groups and print statistics */
> hlist_for_each_entry_rcu(iog, n, &iocg->group_data, group_node) {
> /*
> @@ -1491,7 +1491,7 @@ static int io_cgroup_disk_queue_read(struct cgroup *cgroup,
> iog->queue_duration);
> }
> }
> - spin_unlock_irq(&iocg->lock);
> + rcu_read_unlock();
> cgroup_unlock();
>
> return 0;
> --
> 1.5.4.rc3
>
>
>
> > + hlist_for_each_entry_rcu(iog, n, &iocg->group_data, group_node) {
> > + /*
> > + * There might be groups which are not functional and
> > + * waiting to be reclaimed upon cgoup deletion.
> > + */
> > + if (iog->key) {
> > + seq_printf(m, "%u %u %lu\n", MAJOR(iog->dev),
> > + MINOR(iog->dev),
> > + iog->entity.total_sector_service);
> > + }
> > + }
> > + spin_unlock_irq(&iocg->lock);
> > + cgroup_unlock();
> > +
> > + return 0;
> > +}
> > +
> >
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists