[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080716171547.GA13478@linux.intel.com>
Date: Wed, 16 Jul 2008 10:15:47 -0700
From: mark gross <mgross@...ux.intel.com>
To: "Jakub W. Jozwicki" <jozwicki@...er.pl>
Cc: linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2.6.25.10] pm_qos_params: change spinlock to rwlock
nack.
On Sun, Jul 13, 2008 at 01:19:19AM +0200, Jakub W. Jozwicki wrote:
> Concurrent calls to pm_qos_requirement shouldn't block each other. This patch
> changes spinlock to rwlock and fixes issues with PREEMPT_RT.
>
> Signed-off-by: Jakub Jozwicki <jozwicki@...er.pl>
>
> --- linux-2.6.25.10/kernel/pm_qos_params.c 2008-07-03 05:46:47.000000000 +0200
> +++ linux-2.6.25.10-rt7/kernel/pm_qos_params.c 2008-07-12 23:18:20.696615771
> +0200
> @@ -110,7 +110,7 @@
> &network_throughput_pm_qos
> };
>
> -static DEFINE_SPINLOCK(pm_qos_lock);
> +static DEFINE_RWLOCK(pm_qos_lock);
I don't see a problem with using spinlocks, and as this issues only
shows up running the PREEMPT-RT I feel that perhaps this would be better
in the RT tree.
Sorry,
--mgross
>
> static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
> size_t count, loff_t *f_pos);
> @@ -142,7 +142,7 @@
> unsigned long flags;
> int call_notifier = 0;
>
> - spin_lock_irqsave(&pm_qos_lock, flags);
> + write_lock_irqsave(&pm_qos_lock, flags);
> extreme_value = pm_qos_array[target]->default_value;
> list_for_each_entry(node,
> &pm_qos_array[target]->requirements.list, list) {
> @@ -155,7 +155,7 @@
> pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
> pm_qos_array[target]->target_value);
> }
> - spin_unlock_irqrestore(&pm_qos_lock, flags);
> + write_unlock_irqrestore(&pm_qos_lock, flags);
>
> if (call_notifier)
> blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
> @@ -195,9 +195,9 @@
> int ret_val;
> unsigned long flags;
>
> - spin_lock_irqsave(&pm_qos_lock, flags);
> + read_lock_irqsave(&pm_qos_lock, flags);
> ret_val = pm_qos_array[pm_qos_class]->target_value;
> - spin_unlock_irqrestore(&pm_qos_lock, flags);
> + read_unlock_irqrestore(&pm_qos_lock, flags);
>
> return ret_val;
> }
> @@ -228,10 +228,10 @@
> if (!dep->name)
> goto cleanup;
>
> - spin_lock_irqsave(&pm_qos_lock, flags);
> + write_lock_irqsave(&pm_qos_lock, flags);
> list_add(&dep->list,
> &pm_qos_array[pm_qos_class]->requirements.list);
> - spin_unlock_irqrestore(&pm_qos_lock, flags);
> + write_unlock_irqrestore(&pm_qos_lock, flags);
> update_target(pm_qos_class);
>
> return 0;
> @@ -260,7 +260,7 @@
> struct requirement_list *node;
> int pending_update = 0;
>
> - spin_lock_irqsave(&pm_qos_lock, flags);
> + write_lock_irqsave(&pm_qos_lock, flags);
> list_for_each_entry(node,
> &pm_qos_array[pm_qos_class]->requirements.list, list) {
> if (strcmp(node->name, name) == 0) {
> @@ -273,7 +273,7 @@
> break;
> }
> }
> - spin_unlock_irqrestore(&pm_qos_lock, flags);
> + write_unlock_irqrestore(&pm_qos_lock, flags);
> if (pending_update)
> update_target(pm_qos_class);
>
> @@ -295,7 +295,7 @@
> struct requirement_list *node;
> int pending_update = 0;
>
> - spin_lock_irqsave(&pm_qos_lock, flags);
> + write_lock_irqsave(&pm_qos_lock, flags);
> list_for_each_entry(node,
> &pm_qos_array[pm_qos_class]->requirements.list, list) {
> if (strcmp(node->name, name) == 0) {
> @@ -306,7 +306,7 @@
> break;
> }
> }
> - spin_unlock_irqrestore(&pm_qos_lock, flags);
> + write_unlock_irqrestore(&pm_qos_lock, flags);
> if (pending_update)
> update_target(pm_qos_class);
> }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists