lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231213095349.69895-1-sumang@marvell.com>
Date: Wed, 13 Dec 2023 15:23:49 +0530
From: Suman Ghosh <sumang@...vell.com>
To: <davem@...emloft.net>, <edumazet@...gle.com>, <kuba@...nel.org>,
        <pabeni@...hat.com>, <sgoutham@...vell.com>, <sbhatta@...vell.com>,
        <jerinj@...vell.com>, <gakula@...vell.com>, <hkelam@...vell.com>,
        <lcherian@...vell.com>, <netdev@...r.kernel.org>,
        <linux-kernel@...r.kernel.org>, <horms@...nel.org>,
        <wojciech.drewek@...el.com>
CC: Suman Ghosh <sumang@...vell.com>
Subject: [net-next PATCH v2] octeontx2-af: Fix multicast/mirror group lock/unlock issue

As per the existing implementation, there exists a race between finding
a multicast/mirror group entry and deleting that entry. The group lock
was taken and released independently by rvu_nix_mcast_find_grp_elem()
function. Which is incorrect and group lock should be taken during the
entire operation of group updation/deletion. This patch fixes the same.

Fixes: 51b2804c19cd ("octeontx2-af: Add new mbox to support multicast/mirror offload")
Signed-off-by: Suman Ghosh <sumang@...vell.com>
---
Note: This is a follow up of

https://urldefense.proofpoint.com/v2/url?u=https-3A__git.kernel.org_netdev_net-2Dnext_c_51b2804c19cd&d=DwIDaQ&c=nKjWec2b6R0mOyPaz7xtfQ&r=7si3Xn9Ly-Se1a655kvEPIYU0nQ9HPeN280sEUv5ROU&m=NjKPoTkYVlL5Dh4aSr3-dVo-AukiIperlvB0S4_Mqzkyl_VcYAAKrWhkGZE5Cx-p&s=AkBf0454Xm-0adqV0Os7ZE8peaCXtYyuNbCS5kit6Jk&e=

v2 changes:
- Addresed review comments from Simon about a missed unlock and re-org the code
  with some goto labels.

 .../ethernet/marvell/octeontx2/af/rvu_nix.c   | 84 ++++++++++++-------
 1 file changed, 54 insertions(+), 30 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index b01503acd520..72e0a7717c3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -6142,14 +6142,12 @@ static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_g
 	struct nix_mcast_grp_elem *iter;
 	bool is_found = false;
 
-	mutex_lock(&mcast_grp->mcast_grp_lock);
 	list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
 		if (iter->mcast_grp_idx == mcast_grp_idx) {
 			is_found = true;
 			break;
 		}
 	}
-	mutex_unlock(&mcast_grp->mcast_grp_lock);
 
 	if (is_found)
 		return iter;
@@ -6162,7 +6160,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
 	struct nix_mcast_grp_elem *elem;
 	struct nix_mcast_grp *mcast_grp;
 	struct nix_hw *nix_hw;
-	int blkaddr;
+	int blkaddr, ret;
 
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
@@ -6170,11 +6168,15 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
 		return NIX_AF_ERR_INVALID_NIXBLK;
 
 	mcast_grp = &nix_hw->mcast_grp;
+	mutex_lock(&mcast_grp->mcast_grp_lock);
 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
 	if (!elem)
-		return NIX_AF_ERR_INVALID_MCAST_GRP;
+		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+	else
+		ret = elem->mce_start_index;
 
-	return elem->mce_start_index;
+	mutex_unlock(&mcast_grp->mcast_grp_lock);
+	return ret;
 }
 
 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
@@ -6238,7 +6240,7 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
 	struct nix_mcast_grp_elem *elem;
 	struct nix_mcast_grp *mcast_grp;
 	struct nix_hw *nix_hw;
-	int blkaddr;
+	int blkaddr, ret = 0;
 
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
@@ -6246,13 +6248,15 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
 		return NIX_AF_ERR_INVALID_NIXBLK;
 
 	mcast_grp = &nix_hw->mcast_grp;
+	mutex_lock(&mcast_grp->mcast_grp_lock);
 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
 	if (!elem)
-		return NIX_AF_ERR_INVALID_MCAST_GRP;
-
-	elem->mcam_index = mcam_index;
+		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+	else
+		elem->mcam_index = mcam_index;
 
-	return 0;
+	mutex_unlock(&mcast_grp->mcast_grp_lock);
+	return ret;
 }
 
 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
@@ -6297,18 +6301,27 @@ int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
 	struct npc_delete_flow_rsp uninstall_rsp = { 0 };
 	struct nix_mcast_grp_elem *elem;
 	struct nix_mcast_grp *mcast_grp;
+	int blkaddr, err, ret = 0;
 	struct nix_mcast *mcast;
 	struct nix_hw *nix_hw;
-	int blkaddr, err;
 
 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
 	if (err)
 		return err;
 
 	mcast_grp = &nix_hw->mcast_grp;
+
+	/* If AF is requesting for the deletion,
+	 * then AF is already taking the lock
+	 */
+	if (!req->is_af)
+		mutex_lock(&mcast_grp->mcast_grp_lock);
+
 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
-	if (!elem)
-		return NIX_AF_ERR_INVALID_MCAST_GRP;
+	if (!elem) {
+		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+		goto unlock_grp;
+	}
 
 	/* If no mce entries are associated with the group
 	 * then just remove it from the global list.
@@ -6333,19 +6346,15 @@ int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
 	mutex_unlock(&mcast->mce_lock);
 
 delete_grp:
-	/* If AF is requesting for the deletion,
-	 * then AF is already taking the lock
-	 */
-	if (!req->is_af)
-		mutex_lock(&mcast_grp->mcast_grp_lock);
-
 	list_del(&elem->list);
 	kfree(elem);
 	mcast_grp->count--;
+
+unlock_grp:
 	if (!req->is_af)
 		mutex_unlock(&mcast_grp->mcast_grp_lock);
 
-	return 0;
+	return ret;
 }
 
 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
@@ -6370,9 +6379,18 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 		return err;
 
 	mcast_grp = &nix_hw->mcast_grp;
+
+	/* If AF is requesting for the updation,
+	 * then AF is already taking the lock
+	 */
+	if (!req->is_af)
+		mutex_lock(&mcast_grp->mcast_grp_lock);
+
 	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
-	if (!elem)
-		return NIX_AF_ERR_INVALID_MCAST_GRP;
+	if (!elem) {
+		ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+		goto unlock_grp;
+	}
 
 	/* If any pcifunc matches the group's pcifunc, then we can
 	 * delete the entire group.
@@ -6383,9 +6401,10 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 				/* Delete group */
 				dreq.hdr.pcifunc = elem->pcifunc;
 				dreq.mcast_grp_idx = elem->mcast_grp_idx;
-				dreq.is_af = req->is_af;
+				dreq.is_af = 1;
 				rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
-				return 0;
+				ret = 0;
+				goto unlock_grp;
 			}
 		}
 	}
@@ -6410,7 +6429,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
 						      elem->mcam_index, true);
 				ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
-				goto done;
+				goto unlock_mce;
 			}
 		}
 
@@ -6426,7 +6445,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
 						      elem->mcam_index, true);
 
-			goto done;
+			goto unlock_mce;
 		}
 	} else {
 		if (!prev_count || prev_count < req->num_mce_entry) {
@@ -6434,7 +6453,7 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
 						      elem->mcam_index, true);
 			ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
-			goto done;
+			goto unlock_mce;
 		}
 
 		nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
@@ -6450,14 +6469,14 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 						      elem->mcam_index,
 						      true);
 
-			goto done;
+			goto unlock_mce;
 		}
 	}
 
 	if (elem->mcam_index == -1) {
 		rsp->mce_start_index = elem->mce_start_index;
 		ret = 0;
-		goto done;
+		goto unlock_mce;
 	}
 
 	nix_mcast_update_action(rvu, elem);
@@ -6465,7 +6484,12 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
 	rsp->mce_start_index = elem->mce_start_index;
 	ret = 0;
 
-done:
+unlock_mce:
 	mutex_unlock(&mcast->mce_lock);
+
+unlock_grp:
+	if (!req->is_af)
+		mutex_unlock(&mcast_grp->mcast_grp_lock);
+
 	return ret;
 }
-- 
2.25.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ