[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210111231225.105347-5-tyreld@linux.ibm.com>
Date: Mon, 11 Jan 2021 17:12:08 -0600
From: Tyrel Datwyler <tyreld@...ux.ibm.com>
To: james.bottomley@...senpartnership.com
Cc: martin.petersen@...cle.com, linux-scsi@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
brking@...ux.ibm.com, Tyrel Datwyler <tyreld@...ux.ibm.com>
Subject: [PATCH v4 04/21] ibmvfc: add size parameter to ibmvfc_init_event_pool
With the upcoming addition of Sub-CRQs the event pool size may vary
per-queue.
Add a size parameter to ibmvfc_init_event_pool such that different size
event pools can be requested by ibmvfc_alloc_queue.
Signed-off-by: Tyrel Datwyler <tyreld@...ux.ibm.com>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 9330f5a65a7e..524e81164d70 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -723,19 +723,23 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
* Returns zero on success.
**/
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
- struct ibmvfc_queue *queue)
+ struct ibmvfc_queue *queue,
+ unsigned int size)
{
int i;
struct ibmvfc_event_pool *pool = &queue->evt_pool;
ENTER;
- pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
- pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+ if (!size)
+ return 0;
+
+ pool->size = size;
+ pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
if (!pool->events)
return -ENOMEM;
pool->iu_storage = dma_alloc_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
+ size * sizeof(*pool->iu_storage),
&pool->iu_token, 0);
if (!pool->iu_storage) {
@@ -747,7 +751,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
INIT_LIST_HEAD(&queue->free);
spin_lock_init(&queue->l_lock);
- for (i = 0; i < pool->size; ++i) {
+ for (i = 0; i < size; ++i) {
struct ibmvfc_event *evt = &pool->events[i];
atomic_set(&evt->free, 1);
@@ -5013,6 +5017,7 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
{
struct device *dev = vhost->dev;
size_t fmt_size;
+ unsigned int pool_size = 0;
ENTER;
spin_lock_init(&queue->_lock);
@@ -5021,10 +5026,7 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
switch (fmt) {
case IBMVFC_CRQ_FMT:
fmt_size = sizeof(*queue->msgs.crq);
- if (ibmvfc_init_event_pool(vhost, queue)) {
- dev_err(dev, "Couldn't initialize event pool.\n");
- return -ENOMEM;
- }
+ pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
break;
case IBMVFC_ASYNC_FMT:
fmt_size = sizeof(*queue->msgs.async);
@@ -5034,6 +5036,11 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
return -EINVAL;
}
+ if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
+ dev_err(dev, "Couldn't initialize event pool.\n");
+ return -ENOMEM;
+ }
+
queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
if (!queue->msgs.handle)
return -ENOMEM;
--
2.27.0
Powered by blists - more mailing lists