[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241204055659.1700459-2-bbhushan2@marvell.com>
Date: Wed, 4 Dec 2024 11:26:52 +0530
From: Bharat Bhushan <bbhushan2@...vell.com>
To: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<sgoutham@...vell.com>, <gakula@...vell.com>, <sbhatta@...vell.com>,
<hkelam@...vell.com>, <davem@...emloft.net>, <edumazet@...gle.com>,
<kuba@...nel.org>, <pabeni@...hat.com>, <jerinj@...vell.com>,
<lcherian@...vell.com>, <ndabilpuram@...vell.com>,
<andrew+netdev@...n.ch>, <richardcochran@...il.com>,
<bbhushan2@...vell.com>
Subject: [net-next PATCH v10 1/8] octeontx2-pf: map skb data as device writeable
Crypto hardware need write permission for in-place encrypt
or decrypt operation on skb-data to support IPsec crypto
offload. That patch uses skb_unshare to make skb data writeable
for ipsec crypto offload and map skb fragment memory as
device read-write.
Signed-off-by: Bharat Bhushan <bbhushan2@...vell.com>
---
v7->v8:
- spell correction (s/sdk/skb) in description
v6->v7:
- skb data was mapped as device writeable but it was not ensured
that skb is writeable. This version calls skb_unshare() to make
skb data writeable.
.../ethernet/marvell/octeontx2/nic/otx2_txrx.c | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 04bc06a80e23..3b0457e52a6a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -83,10 +84,17 @@ static unsigned int frag_num(unsigned int i)
static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len)
{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
const skb_frag_t *frag;
struct page *page;
int offset;
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
/* First segment is always skb->data */
if (!seg) {
page = virt_to_page(skb->data);
@@ -98,16 +106,22 @@ static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
offset = skb_frag_off(frag);
*len = skb_frag_size(frag);
}
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
}
static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
int seg;
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
for (seg = 0; seg < sg->num_segs; seg++) {
otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
+ sg->size[seg], dir);
}
sg->num_segs = 0;
}
--
2.34.1
Powered by blists - more mailing lists