[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2b86e5d5-0861-9074-ab40-df111f54c7f0@gmail.com>
Date: Tue, 26 Sep 2023 17:18:24 +0800
From: Like Xu <like.xu.linux@...il.com>
To: Alex Williamson <alex.williamson@...hat.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, Sean Christopherson <seanjc@...gle.com>
Subject: Re: [PATCH v2 2/2] KVM: irqbypass: Convert producers/consumers single
linked list to XArray
On 25/9/2023 11:24 pm, Like Xu wrote:
>>> @@ -97,24 +98,23 @@ int irq_bypass_register_producer(struct
>>> irq_bypass_producer *producer)
>>> mutex_lock(&lock);
>>> - list_for_each_entry(tmp, &producers, node) {
>>> - if (tmp->token == producer->token || tmp == producer) {
>>> - ret = -EBUSY;
>>> + tmp = xa_load(&producers, token);
>>> + if (tmp || tmp == producer) {
>>> + ret = -EBUSY;
>>> + goto out_err;
>>> + }
>>> +
>>> + ret = xa_err(xa_store(&producers, token, producer, GFP_KERNEL));
>>> + if (ret)
>>> + goto out_err;
>>> +
>>> + consumer = xa_load(&consumers, token);
>>> + if (consumer) {
>>> + ret = __connect(producer, consumer);
>>> + if (ret)
>>> goto out_err;
>>
>> This doesn't match previous behavior, the producer is registered to the
>> xarray regardless of the result of the connect operation and the caller
>> cannot distinguish between failures. The module reference is released
>> regardless of xarray item. Nak.
>
> Hi Alex,
>
> Thanks for your comments and indeed, the additional error throwing logic
> breaks the caller's expectations as you said.
>
> What if we use LIST as a fallback option for XARRAY? Specifically, when
> xa_err(xa_store()) is true, then fallback to use LIST to check for
> producers/consumers, and in most cases it still takes the XARRAY path:
>
> static DEFINE_XARRAY(xproducers);
> ...
> if (xa_err(xa_store(&xproducers, (unsigned long)producer->token,
> producer, GFP_KERNEL)))
> list_add(&producer->node, &producers);
> ...
>
> There will also be a LIST option on the lookup path.
>
> The rough code already works, could we move in this direction (combining
> XARRAY with LIST to hidden the memory allocation error from xa_store) ?
For better discussion and further improvement, here's the draft code combining
xarray and list, using both xarray and list to store producers and consumers,
but with xarray preferred for queries:
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index e0aabbbf27ec..7cc30d699ece 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -18,12 +18,15 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/xarray.h>
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IRQ bypass manager utility module");
static LIST_HEAD(producers);
static LIST_HEAD(consumers);
+static DEFINE_XARRAY(xproducers);
+static DEFINE_XARRAY(xconsumers);
static DEFINE_MUTEX(lock);
/* @lock must be held when calling connect */
@@ -74,6 +77,117 @@ static void __disconnect(struct irq_bypass_producer *prod,
prod->start(prod);
}
+#define CHECK_TOKEN BIT_ULL(0)
+#define CHECK_POINTER BIT_ULL(1)
+
+static inline bool
+producer_already_exist(struct irq_bypass_producer *producer, u64 flags)
+{
+ struct irq_bypass_producer *tmp;
+
+ if (((flags & CHECK_POINTER) && xa_load(&xproducers,
+ (unsigned long)producer)) ||
+ ((flags & CHECK_TOKEN) && xa_load(&xproducers,
+ (unsigned long)producer->token)))
+ return true;
+
+ list_for_each_entry(tmp, &producers, node) {
+ if (((flags & CHECK_POINTER) && tmp == producer) ||
+ ((flags & CHECK_TOKEN) && tmp->token == producer->token))
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+consumer_already_exist(struct irq_bypass_consumer *consumer, u64 flags)
+{
+ struct irq_bypass_consumer *tmp;
+
+ if (((flags & CHECK_POINTER) && xa_load(&xconsumers,
+ (unsigned long)consumer)) ||
+ ((flags & CHECK_TOKEN) && xa_load(&xconsumers,
+ (unsigned long)consumer->token)))
+ return true;
+
+ list_for_each_entry(tmp, &consumers, node) {
+ if (((flags & CHECK_POINTER) && tmp == consumer) ||
+ ((flags & CHECK_TOKEN) && tmp->token == consumer->token))
+ return true;
+ }
+
+ return false;
+}
+
+static inline struct irq_bypass_producer *get_producer_by_token(void *token)
+{
+ struct irq_bypass_producer *tmp;
+
+ tmp = xa_load(&xproducers, (unsigned long)token);
+ if (tmp)
+ return tmp;
+
+ list_for_each_entry(tmp, &producers, node) {
+ if (tmp->token == token)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static inline struct irq_bypass_consumer *get_consumer_by_token(void *token)
+{
+ struct irq_bypass_consumer *tmp;
+
+ tmp = xa_load(&xconsumers, (unsigned long)token);
+ if (tmp)
+ return tmp;
+
+ list_for_each_entry(tmp, &consumers, node) {
+ if (tmp->token == token)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static inline void add_irq_bypass_producer(struct irq_bypass_producer *producer)
+{
+ xa_store(&xproducers, (unsigned long)producer->token,
+ producer, GFP_KERNEL);
+ xa_store(&xproducers, (unsigned long)producer,
+ producer, GFP_KERNEL);
+
+ list_add(&producer->node, &producers);
+}
+
+static inline void del_irq_bypass_producer(struct irq_bypass_producer *producer)
+{
+ xa_erase(&xproducers, (unsigned long)producer->token);
+ xa_erase(&xproducers, (unsigned long)producer);
+
+ list_del(&producer->node);
+}
+
+static inline void add_irq_bypass_consumer(struct irq_bypass_consumer *consumer)
+{
+ xa_store(&xconsumers, (unsigned long)consumer->token,
+ consumer, GFP_KERNEL);
+ xa_store(&xconsumers, (unsigned long)consumer,
+ consumer, GFP_KERNEL);
+
+ list_add(&consumer->node, &consumers);
+}
+
+static inline void del_irq_bypass_consumer(struct irq_bypass_consumer *consumer)
+{
+ xa_erase(&xconsumers, (unsigned long)consumer->token);
+ xa_erase(&xconsumers, (unsigned long)consumer);
+
+ list_del(&consumer->node);
+}
+
/**
* irq_bypass_register_producer - register IRQ bypass producer
* @producer: pointer to producer structure
@@ -83,7 +197,6 @@ static void __disconnect(struct irq_bypass_producer *prod,
*/
int irq_bypass_register_producer(struct irq_bypass_producer *producer)
{
- struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
int ret;
@@ -97,23 +210,19 @@ int irq_bypass_register_producer(struct irq_bypass_producer
*producer)
mutex_lock(&lock);
- list_for_each_entry(tmp, &producers, node) {
- if (tmp->token == producer->token || tmp == producer) {
- ret = -EBUSY;
+ if (producer_already_exist(producer, CHECK_TOKEN | CHECK_POINTER)) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+
+ consumer = get_consumer_by_token(producer->token);
+ if (consumer) {
+ ret = __connect(producer, consumer);
+ if (ret)
goto out_err;
- }
}
- list_for_each_entry(consumer, &consumers, node) {
- if (consumer->token == producer->token) {
- ret = __connect(producer, consumer);
- if (ret)
- goto out_err;
- break;
- }
- }
-
- list_add(&producer->node, &producers);
+ add_irq_bypass_producer(producer);
mutex_unlock(&lock);
@@ -134,7 +243,6 @@ EXPORT_SYMBOL_GPL(irq_bypass_register_producer);
*/
void irq_bypass_unregister_producer(struct irq_bypass_producer *producer)
{
- struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
if (!producer->token)
@@ -147,20 +255,13 @@ void irq_bypass_unregister_producer(struct
irq_bypass_producer *producer)
mutex_lock(&lock);
- list_for_each_entry(tmp, &producers, node) {
- if (tmp != producer)
- continue;
+ if (producer_already_exist(producer, CHECK_POINTER)) {
+ consumer = get_consumer_by_token(producer->token);
+ if (consumer)
+ __disconnect(producer, consumer);
- list_for_each_entry(consumer, &consumers, node) {
- if (consumer->token == producer->token) {
- __disconnect(producer, consumer);
- break;
- }
- }
-
- list_del(&producer->node);
+ del_irq_bypass_producer(producer);
module_put(THIS_MODULE);
- break;
}
mutex_unlock(&lock);
@@ -178,7 +279,6 @@ EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
*/
int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
{
- struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
int ret;
@@ -193,23 +293,19 @@ int irq_bypass_register_consumer(struct
irq_bypass_consumer *consumer)
mutex_lock(&lock);
- list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token == consumer->token || tmp == consumer) {
- ret = -EBUSY;
+ if (consumer_already_exist(consumer, CHECK_TOKEN | CHECK_POINTER)) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+
+ producer = get_producer_by_token(consumer->token);
+ if (producer) {
+ ret = __connect(producer, consumer);
+ if (ret)
goto out_err;
- }
}
- list_for_each_entry(producer, &producers, node) {
- if (producer->token == consumer->token) {
- ret = __connect(producer, consumer);
- if (ret)
- goto out_err;
- break;
- }
- }
-
- list_add(&consumer->node, &consumers);
+ add_irq_bypass_consumer(consumer);
mutex_unlock(&lock);
@@ -230,7 +326,6 @@ EXPORT_SYMBOL_GPL(irq_bypass_register_consumer);
*/
void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
{
- struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
if (!consumer->token)
@@ -243,20 +338,13 @@ void irq_bypass_unregister_consumer(struct
irq_bypass_consumer *consumer)
mutex_lock(&lock);
- list_for_each_entry(tmp, &consumers, node) {
- if (tmp != consumer)
- continue;
+ if (consumer_already_exist(consumer, CHECK_POINTER)) {
+ producer = get_producer_by_token(consumer->token);
+ if (producer)
+ __disconnect(producer, consumer);
- list_for_each_entry(producer, &producers, node) {
- if (producer->token == consumer->token) {
- __disconnect(producer, consumer);
- break;
- }
- }
-
- list_del(&consumer->node);
+ del_irq_bypass_consumer(consumer);
module_put(THIS_MODULE);
- break;
}
mutex_unlock(&lock);
@@ -264,3 +352,10 @@ void irq_bypass_unregister_consumer(struct
irq_bypass_consumer *consumer)
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer);
+
+static void __exit irqbypass_exit(void)
+{
+ xa_destroy(&xproducers);
+ xa_destroy(&xconsumers);
+}
+module_exit(irqbypass_exit);
--
2.42.0
Powered by blists - more mailing lists