[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1605748345-32062-2-git-send-email-tlfalcon@linux.ibm.com>
Date: Wed, 18 Nov 2020 19:12:17 -0600
From: Thomas Falcon <tlfalcon@...ux.ibm.com>
To: kuba@...nel.org
Cc: netdev@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
cforno12@...ux.ibm.com, ljp@...ux.vnet.ibm.com,
ricklind@...ux.ibm.com, dnbanerg@...ibm.com,
drt@...ux.vnet.ibm.com, brking@...ux.vnet.ibm.com,
sukadev@...ux.vnet.ibm.com, tlfalcon@...ux.ibm.com
Subject: [PATCH net-next v2 1/9] ibmvnic: Introduce indirect subordinate Command Response Queue buffer
This patch introduces the infrastructure to send batched subordinate
Command Response Queue descriptors, which are used by the ibmvnic
driver to send TX frame and RX buffer descriptors.
Signed-off-by: Thomas Falcon <tlfalcon@...ux.ibm.com>
---
drivers/net/ethernet/ibm/ibmvnic.c | 23 +++++++++++++++++++++++
drivers/net/ethernet/ibm/ibmvnic.h | 9 +++++++++
2 files changed, 32 insertions(+)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index da15913879f8..3884f8a683a7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2858,6 +2858,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
atomic_set(&scrq->used, 0);
scrq->cur = 0;
+ scrq->ind_buf.index = 0;
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
@@ -2909,6 +2910,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
}
+ dma_free_coherent(dev,
+ IBMVNIC_IND_ARR_SZ,
+ scrq->ind_buf.indir_arr,
+ scrq->ind_buf.indir_dma);
+
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
@@ -2955,6 +2961,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
+ scrq->ind_buf.index = 0;
+
+ scrq->ind_buf.indir_arr =
+ dma_alloc_coherent(dev,
+ IBMVNIC_IND_ARR_SZ,
+ &scrq->ind_buf.indir_dma,
+ GFP_KERNEL);
+
+ if (!scrq->ind_buf.indir_arr)
+ goto indir_failed;
+
spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev,
@@ -2963,6 +2980,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
+indir_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 217dcc7ded70..4a63e9886719 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -31,6 +31,8 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
+#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -861,6 +863,12 @@ union sub_crq {
struct ibmvnic_rx_buff_add_desc rx_add;
};
+struct ibmvnic_ind_xmit_queue {
+ union sub_crq *indir_arr;
+ dma_addr_t indir_dma;
+ int index;
+};
+
struct ibmvnic_sub_crq_queue {
union sub_crq *msgs;
int size, cur;
@@ -873,6 +881,7 @@ struct ibmvnic_sub_crq_queue {
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
+ struct ibmvnic_ind_xmit_queue ind_buf;
atomic_t used;
char name[32];
u64 handle;
--
2.26.2
Powered by blists - more mailing lists