lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Mon, 20 Nov 2023 20:11:15 +0000
From:   Simon Horman <horms@...nel.org>
To:     Suman Ghosh <sumang@...vell.com>
Cc:     sgoutham@...vell.com, gakula@...vell.com, sbhatta@...vell.com,
        hkelam@...vell.com, davem@...emloft.net, edumazet@...gle.com,
        kuba@...nel.org, pabeni@...hat.com, netdev@...r.kernel.org,
        linux-kernel@...r.kernel.org, lcherian@...vell.com,
        jerinj@...vell.com
Subject: Re: [net-next PATCH 1/2] octeontx2-af: Add new mbox to support
 multicast/mirror offload

On Thu, Nov 16, 2023 at 03:46:00PM +0530, Suman Ghosh wrote:
> A new mailbox is added to support offloading of multicast/mirror
> functionality. The mailbox also supports dynamic updation of the
> multicast/mirror list.
> 
> Signed-off-by: Suman Ghosh <sumang@...vell.com>

...

> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c

...

> +int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
> +					  struct nix_mcast_grp_update_req *req,
> +					  struct nix_mcast_grp_update_rsp *rsp)
> +{
> +	struct nix_mcast_grp_destroy_req dreq = { 0 };
> +	struct npc_mcam *mcam = &rvu->hw->mcam;
> +	struct nix_mcast_grp_elem *elem;
> +	struct nix_mcast_grp *mcast_grp;
> +	int blkaddr, err, npc_blkaddr;
> +	u16 prev_count, new_count;
> +	struct nix_mcast *mcast;
> +	struct nix_hw *nix_hw;
> +	int i, ret;
> +
> +	if (!req->num_mce_entry)
> +		return 0;
> +
> +	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
> +	if (err)
> +		return err;
> +
> +	mcast_grp = &nix_hw->mcast_grp;
> +	elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
> +	if (!elem)
> +		return NIX_AF_ERR_INVALID_MCAST_GRP;
> +
> +	/* If any pcifunc matches the group's pcifunc, then we can
> +	 * delete the entire group.
> +	 */
> +	if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
> +		for (i = 0; i < req->num_mce_entry; i++) {
> +			if (elem->pcifunc == req->pcifunc[i]) {
> +				/* Delete group */
> +				dreq.hdr.pcifunc = elem->pcifunc;
> +				dreq.mcast_grp_idx = elem->mcast_grp_idx;
> +				rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
> +				return 0;
> +			}
> +		}
> +	}
> +
> +	mcast = &nix_hw->mcast;
> +	mutex_lock(&mcast->mce_lock);
> +	npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
> +	if (elem->mcam_index != -1)
> +		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
> +
> +	prev_count = elem->mcast_mce_list.count;
> +	if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
> +		new_count = prev_count + req->num_mce_entry;
> +		if (prev_count)
> +			nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
> +
> +		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
> +
> +		/* It is possible not to get contiguous memory */
> +		if (elem->mce_start_index < 0) {
> +			if (elem->mcam_index != -1) {
> +				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
> +						      elem->mcam_index, true);
> +				mutex_unlock(&mcast->mce_lock);
> +				return NIX_AF_ERR_NON_CONTIG_MCE_LIST;
> +			}
> +		}
> +
> +		ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
> +		if (ret) {
> +			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
> +			if (prev_count)
> +				elem->mce_start_index = nix_alloc_mce_list(mcast,
> +									   prev_count,
> +									   elem->dir);
> +
> +			if (elem->mcam_index != -1)
> +				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
> +						      elem->mcam_index, true);
> +
> +			mutex_unlock(&mcast->mce_lock);
> +			return ret;
> +		}
> +	} else {
> +		if (!prev_count || prev_count < req->num_mce_entry) {
> +			if (elem->mcam_index != -1)
> +				npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
> +						      elem->mcam_index, true);

Hi Suman,

It looks like a mutex_unlock() is needed here.

As flagged by Smatch.

> +			return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
> +		}
> +
> +		nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
> +		new_count = prev_count - req->num_mce_entry;
> +		elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
> +		ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
> +		if (ret) {
> +			nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
> +			elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
> +			if (elem->mcam_index != -1)
> +				npc_enable_mcam_entry(rvu, mcam,
> +						      npc_blkaddr,
> +						      elem->mcam_index,
> +						      true);
> +
> +			mutex_unlock(&mcast->mce_lock);
> +			return ret;
> +		}
> +	}
> +
> +	if (elem->mcam_index == -1) {
> +		mutex_unlock(&mcast->mce_lock);
> +		rsp->mce_start_index = elem->mce_start_index;
> +		return 0;
> +	}
> +
> +	nix_mcast_update_action(rvu, elem);
> +	npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
> +	mutex_unlock(&mcast->mce_lock);
> +	rsp->mce_start_index = elem->mce_start_index;
> +	return 0;
> +}

...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ