lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Wed, 5 Jun 2024 11:41:27 +0200
From: Wojciech Drewek <wojciech.drewek@...el.com>
To: Mengyuan Lou <mengyuanlou@...-swift.com>, <netdev@...r.kernel.org>
CC: <jiawenwu@...stnetic.com>, <duanqiangwen@...-swift.com>
Subject: Re: [PATCH net-next v4 4/6] net: libwx: Add msg task func



On 04.06.2024 17:57, Mengyuan Lou wrote:
> Implement wx_msg_task which is used to process mailbox
> messages sent by vf.
> 
> Signed-off-by: Mengyuan Lou <mengyuanlou@...-swift.com>
> ---
>  drivers/net/ethernet/wangxun/libwx/wx_hw.c    |  12 +-
>  drivers/net/ethernet/wangxun/libwx/wx_hw.h    |   4 +
>  drivers/net/ethernet/wangxun/libwx/wx_mbx.h   |  50 ++
>  drivers/net/ethernet/wangxun/libwx/wx_sriov.c | 725 ++++++++++++++++++
>  drivers/net/ethernet/wangxun/libwx/wx_sriov.h |   1 +
>  drivers/net/ethernet/wangxun/libwx/wx_type.h  |  17 +
>  6 files changed, 805 insertions(+), 4 deletions(-)
>

<...>

> +
> +static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
> +{
> +	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
> +	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
> +	u32 reg = 0, n = vf * q_per_pool / 32;
> +	u32 i = vf * q_per_pool;
> +
> +	reg = rd32(wx, WX_RDM_PF_QDE(n));
> +	for (i = (vf * q_per_pool - n * 32);
> +	     i < ((vf + 1) * q_per_pool - n * 32);
> +	     i++) {
> +		if (qde == 1)
> +			reg |= qde << i;
> +		else
> +			reg &= qde << i;
> +	}
> +
> +	wr32(wx, WX_RDM_PF_QDE(n), reg);
> +}
> +
> +static void wx_clear_vmvir(struct wx *wx, u32 vf)
> +{
> +	wr32(wx, WX_TDM_VLAN_INS(vf), 0);
> +}
> +
> +static void wx_set_vf_rx_tx(struct wx *wx, int vf)
> +{
> +	u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
> +	u32 index, vf_bit;
> +
> +	vf_bit = vf % 32;
> +	index = vf / 32;

I've seen those calculations a few times, you could define a macro for them:
wx_get_vf_index
than you could leave a comment explaining them

> +
> +	reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
> +	reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
> +
> +	if (wx->vfinfo[vf].link_enable) {
> +		reg_req_tx = reg_cur_tx | BIT(vf_bit);
> +		reg_req_rx = reg_cur_rx | BIT(vf_bit);
> +		/* Enable particular VF */
> +		if (reg_cur_tx != reg_req_tx)
> +			wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
> +		if (reg_cur_rx != reg_req_rx)
> +			wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
> +	} else {
> +		reg_req_tx = BIT(vf_bit);
> +		reg_req_rx = BIT(vf_bit);
> +		/* Disable particular VF */
> +		if (reg_cur_tx & reg_req_tx)
> +			wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
> +		if (reg_cur_rx & reg_req_rx)
> +			wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
> +	}
> +}
> +
> +static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> +	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
> +	unsigned int default_tc = 0;
> +
> +	/* verify the PF is supporting the correct APIs */
> +	switch (wx->vfinfo[vf].vf_api) {
> +	case wx_mbox_api_11 ... wx_mbox_api_20:
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	/* only allow 1 Tx queue for bandwidth limiting */
> +	msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
> +	msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
> +
> +	if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
> +		msgbuf[WX_VF_TRANS_VLAN] = 1;
> +	else
> +		msgbuf[WX_VF_TRANS_VLAN] = 0;
> +
> +	/* notify VF of default queue */
> +	msgbuf[WX_VF_DEF_QUEUE] = default_tc;
> +
> +	return 0;
> +}
> +
> +static void wx_vf_reset_event(struct wx *wx, u16 vf)
> +{
> +	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
> +	u8 num_tcs = netdev_get_num_tc(wx->netdev);
> +
> +	/* add PF assigned VLAN or VLAN 0 */
> +	wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
> +
> +	/* reset offloads to defaults */
> +	wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
> +
> +	/* set outgoing tags for VFs */
> +	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
> +		wx_clear_vmvir(wx, vf);
> +	} else {
> +		if (vfinfo->pf_qos || !num_tcs)
> +			wx_set_vmvir(wx, vfinfo->pf_vlan,
> +				     vfinfo->pf_qos, vf);
> +		else
> +			wx_set_vmvir(wx, vfinfo->pf_vlan,
> +				     wx->default_up, vf);
> +	}
> +
> +	/* reset multicast table array for vf */
> +	wx->vfinfo[vf].num_vf_mc_hashes = 0;
> +
> +	/* Flush and reset the mta with the new values */
> +	wx_set_rx_mode(wx->netdev);
> +
> +	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
> +
> +	/* reset VF api back to unknown */
> +	wx->vfinfo[vf].vf_api = wx_mbox_api_10;
> +}
> +
> +static void wx_vf_reset_msg(struct wx *wx, u16 vf)
> +{
> +	unsigned char *vf_mac = wx->vfinfo[vf].vf_mac_addr;
> +	struct net_device *dev = wx->netdev;
> +	u32 msgbuf[5] = {0, 0, 0, 0, 0};
> +	u8 *addr = (u8 *)(&msgbuf[1]);
> +	u32 reg = 0, index, vf_bit;
> +	int pf_max_frame;
> +
> +	/* reset the filters for the device */
> +	wx_vf_reset_event(wx, vf);
> +
> +	/* set vf mac address */
> +	if (!is_zero_ether_addr(vf_mac))
> +		wx_set_vf_mac(wx, vf, vf_mac);
> +
> +	vf_bit = vf % 32;
> +	index = vf / 32;
> +
> +	/* force drop enable for all VF Rx queues */
> +	wx_write_qde(wx, vf, 1);
> +
> +	/* set transmit and receive for vf */
> +	wx_set_vf_rx_tx(wx, vf);
> +
> +	pf_max_frame = dev->mtu + ETH_HLEN;
> +
> +	if (pf_max_frame > ETH_FRAME_LEN)
> +		reg = BIT(vf_bit);
> +	wr32(wx, WX_RDM_VFRE_CLR(index), reg);
> +
> +	/* enable VF mailbox for further messages */
> +	wx->vfinfo[vf].clear_to_send = true;
> +
> +	/* reply to reset with ack and vf mac address */
> +	msgbuf[0] = WX_VF_RESET;
> +	if (!is_zero_ether_addr(vf_mac)) {
> +		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
> +		memcpy(addr, vf_mac, ETH_ALEN);
> +	} else {
> +		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
> +		wx_err(wx, "VF %d has no MAC address assigned", vf);
> +	}
> +
> +	/* Piggyback the multicast filter type so VF can compute the
> +	 * correct vectors
> +	 */
> +	msgbuf[3] = wx->mac.mc_filter_type;
> +	wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
> +}
> +
> +static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> +	u8 *new_mac = ((u8 *)(&msgbuf[1]));
> +
> +	if (!is_valid_ether_addr(new_mac)) {
> +		wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
> +		return -EINVAL;
> +	}
> +
> +	if (wx->vfinfo[vf].pf_set_mac &&
> +	    memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
> +		wx_err(wx,
> +		       "VF %d attempted to set a MAC address but it already had a MAC address.",
> +		       vf);
> +		return -EBUSY;
> +	}

nit: space bfore return

> +	return wx_set_vf_mac(wx, vf, new_mac) < 0;
> +}
> +
> +static int wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)

this functions can't fail so no need to return

> +{
> +	u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
> +		      >> WX_VT_MSGINFO_SHIFT;
> +	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
> +	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
> +	u32 vector_bit, vector_reg, mta_reg, i;
> +	u16 *hash_list = (u16 *)&msgbuf[1];
> +
> +	/* only so many hash values supported */
> +	entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
> +	/* salt away the number of multi cast addresses assigned
> +	 * to this VF for later use to restore when the PF multi cast
> +	 * list changes
> +	 */
> +	vfinfo->num_vf_mc_hashes = entries;
> +
> +	/* VFs are limited to using the MTA hash table for their multicast
> +	 * addresses
> +	 */
> +	for (i = 0; i < entries; i++)
> +		vfinfo->vf_mc_hashes[i] = hash_list[i];
> +
> +	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
> +		vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
> +		vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
> +		/* errata 5: maintain a copy of the register table conf */
> +		mta_reg = wx->mac.mta_shadow[vector_reg];
> +		mta_reg |= (1 << vector_bit);
> +		wx->mac.mta_shadow[vector_reg] = mta_reg;
> +		wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
> +	}
> +	vmolr |= WX_PSR_VM_L2CTL_ROMPE;
> +	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
> +
> +	return 0;
> +}
> +
> +static int wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
> +{
> +	struct net_device *netdev = wx->netdev;
> +	u32 index, vf_bit, vfre;
> +	u32 max_frs, reg_val;
> +	int pf_max_frame;
> +	int err = 0;
> +
> +	pf_max_frame = netdev->mtu + ETH_HLEN +  ETH_FCS_LEN + VLAN_HLEN;
> +	switch (wx->vfinfo[vf].vf_api) {
> +	case wx_mbox_api_11 ... wx_mbox_api_13:
> +		/* Version 1.1 supports jumbo frames on VFs if PF has
> +		 * jumbo frames enabled which means legacy VFs are
> +		 * disabled
> +		 */
> +		if (pf_max_frame > ETH_FRAME_LEN)
> +			break;
> +		fallthrough;
> +	default:
> +		/* If the PF or VF are running w/ jumbo frames enabled
> +		 * we need to shut down the VF Rx path as we cannot
> +		 * support jumbo frames on legacy VFs
> +		 */
> +		if (pf_max_frame > ETH_FRAME_LEN ||
> +		    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)))
> +			err = -EINVAL;

return -EINVAL here?

> +		break;
> +	}
> +
> +	/* determine VF receive enable location */
> +	vf_bit = vf % 32;
> +	index = vf / 32;
> +
> +	/* enable or disable receive depending on error */
> +	vfre = rd32(wx, WX_RDM_VF_RE(index));
> +	if (err)
> +		vfre &= ~BIT(vf_bit);
> +	else
> +		vfre |= BIT(vf_bit);
> +	wr32(wx, WX_RDM_VF_RE(index), vfre);
> +
> +	if (err) {> +		wx_err(wx, "VF max_frame %d out of range\n", max_frame);
> +		return err;
> +	}
> +	/* pull current max frame size from hardware */
> +	max_frs = DIV_ROUND_UP(max_frame, 1024);
> +	reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
> +	if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
> +		wr32(wx, WX_MAC_WDG_TIMEOUT, max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
> +
> +	return 0;
> +}
> +
> +static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
> +{
> +	int regindex;
> +	u32 vlvf;
> +
> +	/* short cut the special case */
> +	if (vlan == 0)
> +		return 0;
> +
> +	/* Search for the vlan id in the VLVF entries */
> +	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
> +		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
> +		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
> +		if ((vlvf & VLAN_VID_MASK) == vlan)
> +			break;
> +	}
> +
> +	/* Return a negative value if not found */
> +	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
> +		regindex = -EINVAL;
> +
> +	return regindex;
> +}
> +
> +static int wx_set_vf_macvlan(struct wx *wx,
> +			     u16 vf, int index, unsigned char *mac_addr)
> +{
> +	struct vf_macvlans *entry;
> +	struct list_head *pos;
> +	int retval = 0;
> +
> +	if (index <= 1) {
> +		list_for_each(pos, &wx->vf_mvs.l) {
> +			entry = list_entry(pos, struct vf_macvlans, l);
> +			if (entry->vf == vf) {
> +				entry->vf = -1;
> +				entry->free = true;
> +				entry->is_macvlan = false;
> +				wx_del_mac_filter(wx, entry->vf_macvlan, vf);
> +			}
> +		}
> +	}
> +
> +	/* If index was zero then we were asked to clear the uc list
> +	 * for the VF.  We're done.
> +	 */
> +	if (!index)
> +		return 0;
> +
> +	entry = NULL;
> +
> +	list_for_each(pos, &wx->vf_mvs.l) {
> +		entry = list_entry(pos, struct vf_macvlans, l);
> +		if (entry->free)
> +			break;
> +	}
> +
> +	/* If we traversed the entire list and didn't find a free entry
> +	 * then we're out of space on the RAR table.  Also entry may
> +	 * be NULL because the original memory allocation for the list
> +	 * failed, which is not fatal but does mean we can't support
> +	 * VF requests for MACVLAN because we couldn't allocate
> +	 * memory for the list manangbeent required.
> +	 */
> +	if (!entry || !entry->free)
> +		return -ENOSPC;
> +
> +	retval = wx_add_mac_filter(wx, mac_addr, vf);
> +	if (retval >= 0) {
> +		entry->free = false;
> +		entry->is_macvlan = true;
> +		entry->vf = vf;
> +		memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
> +	}
> +
> +	return retval;
> +}
> +
> +static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> +	int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
> +	int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
> +	int err;
> +
> +	if (add)
> +		wx->vfinfo[vf].vlan_count++;
> +	else if (wx->vfinfo[vf].vlan_count)
> +		wx->vfinfo[vf].vlan_count--;
> +
> +	/* in case of promiscuous mode any VLAN filter set for a VF must
> +	 * also have the PF pool added to it.
> +	 */
> +	if (add && wx->netdev->flags & IFF_PROMISC)
> +		err = wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));

err returned here is immediately overwritten, should we check it and return it?

> +
> +	err = wx_set_vf_vlan(wx, add, vid, vf);
> +	if (!err && wx->vfinfo[vf].spoofchk_enabled)
> +		wx_set_vlan_anti_spoofing(wx, true, vf);
> +
> +	/* Go through all the checks to see if the VLAN filter should
> +	 * be wiped completely.
> +	 */
> +	if (!add && wx->netdev->flags & IFF_PROMISC) {
> +		u32 bits = 0, vlvf;
> +		int reg_ndx;
> +
> +		reg_ndx = wx_find_vlvf_entry(wx, vid);
> +		if (reg_ndx < 0)
> +			goto out;

It would be simpler to just return here, no need for goto

> +		wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
> +		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
> +		/* See if any other pools are set for this VLAN filter
> +		 * entry other than the PF.
> +		 */
> +		if (VMDQ_P(0) < 32) {
> +			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
> +			bits &= ~BIT(VMDQ_P(0));
> +			if (wx->mac.type == wx_mac_sp)
> +				bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
> +		} else {
> +			if (wx->mac.type == wx_mac_sp)
> +				bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
> +			bits &= ~BIT(VMDQ_P(0) % 32);
> +			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
> +		}
> +		/* If the filter was removed then ensure PF pool bit
> +		 * is cleared if the PF only added itself to the pool
> +		 * because the PF is in promiscuous mode.
> +		 */
> +		if ((vlvf & VLAN_VID_MASK) == vid && !bits)
> +			wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
> +	}
> +
> +out:
> +	return err;
> +}
> +
> +static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
> +{
> +	int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
> +		    WX_VT_MSGINFO_SHIFT;
> +	u8 *new_mac = ((u8 *)(&msgbuf[1]));
> +	int err;
> +
> +	if (wx->vfinfo[vf].pf_set_mac && index > 0) {
> +		wx_err(wx, "VF %d requested MACVLAN filter but is administratively denied\n", vf);
> +		return -EINVAL;
> +	}
> +
> +	/* An non-zero index indicates the VF is setting a filter */
> +	if (index) {
> +		if (!is_valid_ether_addr(new_mac)) {
> +			wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
> +			return -EINVAL;
> +		}
> +		/* If the VF is allowed to set MAC filters then turn off
> +		 * anti-spoofing to avoid false positives.
> +		 */
> +		if (wx->vfinfo[vf].spoofchk_enabled)
> +			wx_set_vf_spoofchk(wx->netdev, vf, false);
> +	}
> +
> +	err = wx_set_vf_macvlan(wx, vf, index, new_mac);
> +	if (err == -ENOSPC)
> +		wx_err(wx,
> +		       "VF %d has requested a MACVLAN filter but there is no space for it\n",
> +		       vf);
> +
> +	return err < 0;
> +}
> +
> +static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> +	int api = msgbuf[1];
> +
> +	switch (api) {
> +	case wx_mbox_api_10 ... wx_mbox_api_13:
> +		wx->vfinfo[vf].vf_api = api;
> +		return 0;
> +	default:
> +		wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
> +		return -EINVAL;
> +	}
> +}
> +
> +static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> +	/* verify the PF is supporting the correct API */
> +	switch (wx->vfinfo[vf].vf_api) {
> +	case wx_mbox_api_12 ... wx_mbox_api_13:
> +		break;
> +	default:
> +		return -EOPNOTSUPP;
> +	}
> +
> +	msgbuf[1] = wx->vfinfo[vf].link_enable;
> +
> +	return 0;
> +}
> +
> +static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> +	unsigned long fw_version = 0ULL;
> +	int ret = 0;
> +
> +	/* verify the PF is supporting the correct API */
> +	switch (wx->vfinfo[vf].vf_api) {
> +	case wx_mbox_api_12 ... wx_mbox_api_13:
> +		break;
> +	default:
> +		return -EOPNOTSUPP;
> +	}
> +
> +	ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
> +	if (ret)
> +		return -EOPNOTSUPP;
> +	msgbuf[1] = fw_version;
> +
> +	return 0;
> +}
> +
> +static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
> +{
> +	int xcast_mode = msgbuf[1];
> +	u32 vmolr, disable, enable;
> +
> +	/* verify the PF is supporting the correct APIs */
> +	switch (wx->vfinfo[vf].vf_api) {
> +	case wx_mbox_api_12:
> +		/* promisc introduced in 1.3 version */
> +		if (xcast_mode == WXVF_XCAST_MODE_PROMISC)
> +			return -EOPNOTSUPP;
> +		fallthrough;
> +	case wx_mbox_api_13:
> +		break;
> +	default:
> +		return -EOPNOTSUPP;
> +	}

nit: space

> +	if (wx->vfinfo[vf].xcast_mode == xcast_mode)
> +		goto out;
> +
> +	switch (xcast_mode) {
> +	case WXVF_XCAST_MODE_NONE:
> +		disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
> +			  WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> +		enable = 0;
> +		break;
> +	case WXVF_XCAST_MODE_MULTI:
> +		disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> +		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
> +		break;
> +	case WXVF_XCAST_MODE_ALLMULTI:
> +		disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> +		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | WX_PSR_VM_L2CTL_MPE;
> +		break;
> +	case WXVF_XCAST_MODE_PROMISC:
> +		disable = 0;
> +		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
> +			 WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
> +		break;
> +	default:
> +		return -EOPNOTSUPP;
> +	}
> +
> +	vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
> +	vmolr &= ~disable;
> +	vmolr |= enable;
> +	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
> +
> +	wx->vfinfo[vf].xcast_mode = xcast_mode;
> +out:
> +	msgbuf[1] = xcast_mode;
> +
> +	return 0;
> +}
> +

<...>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ