lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Tue, 11 Oct 2022 11:01:13 +0200 From: Michal Wilczynski <michal.wilczynski@...el.com> To: netdev@...r.kernel.org Cc: alexandr.lobakin@...el.com, jacob.e.keller@...el.com, jesse.brandeburg@...el.com, przemyslaw.kitszel@...el.com, anthony.l.nguyen@...el.com, kuba@...nel.org, ecree.xilinx@...il.com, jiri@...nulli.us, Michal Wilczynski <michal.wilczynski@...el.com> Subject: [PATCH net-next v5 4/4] ice: Prevent DCB coexistence with Custom Tx scheduler DCB might interfere with Custom Tx Scheduler changes that user might introduce using devlink-rate API. Check if DCB is active, when user tries to change any setting in exported Tx scheduler tree. Signed-off-by: Michal Wilczynski <michal.wilczynski@...el.com> --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 4 ++ drivers/net/ethernet/intel/ice/ice_devlink.c | 61 ++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_idc.c | 5 ++ drivers/net/ethernet/intel/ice/ice_type.h | 1 + 4 files changed, 71 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index add90e75f05c..8d7fc76f49af 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -364,6 +364,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) /* Enable DCB tagging only when more than one TC */ if (ice_dcb_get_num_tc(new_cfg) > 1) { dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); + if (pf->hw.port_info->is_custom_tx_enabled) { + dev_err(dev, "Custom Tx scheduler feature enabled, can't configure DCB\n"); + return -EBUSY; + } set_bit(ICE_FLAG_DCB_ENA, pf->flags); } else { dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index ea3701822942..e29089b5df29 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -8,6 +8,7 @@ #include "ice_devlink.h" #include "ice_eswitch.h" #include "ice_fw_update.h" +#include "ice_dcb_lib.h" static int ice_active_port_option = -1; @@ -713,6 +714,33 @@ ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, return ice_devlink_port_split(devlink, port, 1, extack); } +/** + * ice_enable_custom_tx - try to enable custom Tx feature + * @pf: devlink struct + * @extack: extended netdev ack structure + * + * This function tries to enabled custom Tx feature, + * it's not possible to enable it, if DCB is active. + */ +static bool ice_enable_custom_tx(struct ice_pf *pf, struct netlink_ext_ack *extack) +{ + struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; + struct device *dev = ice_pf_to_dev(pf); + + if (pi->is_custom_tx_enabled) + /* already enabled, return true */ + return true; + + if (ice_is_dcb_active(pf)) { + dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); + return false; + } + + pi->is_custom_tx_enabled = true; + + return true; +} + /** * ice_traverse_tx_tree - traverse Tx scheduler tree * @devlink: devlink struct @@ -892,6 +920,9 @@ static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, struct netlink_ext_ack *extack) { + if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink), extack)) + return -EBUSY; + return 0; } @@ -905,6 +936,9 @@ static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, tc_node = pi->root->children[0]; node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink), extack)) + return -EBUSY; + if (!rate_node->parent || !node || tc_node == node || !extack) return 0; @@ -924,6 +958,9 @@ static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_vport, voi { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_vport->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -937,6 +974,9 @@ static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_vport, v { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_vport->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -950,6 +990,9 @@ static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_vport { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_vport->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -963,6 +1006,9 @@ static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_vport, { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_vport->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -976,6 +1022,9 @@ static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -989,6 +1038,9 @@ static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, vo { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -1002,6 +1054,9 @@ static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -1015,6 +1070,9 @@ static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, v { struct ice_sched_node *node = priv; + if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink), extack)) + return -EBUSY; + if (!node) return 0; @@ -1041,6 +1099,9 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, if (!extack) return 0; + if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink), extack)) + return -EBUSY; + if (!parent) { if (!node || tc_node == node || node->num_children) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 895c32bcc8b5..f702bd5272f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -273,6 +273,11 @@ int ice_plug_aux_dev(struct ice_pf *pf) if (!ice_is_rdma_ena(pf)) return 0; + if (pf->hw.port_info->is_custom_tx_enabled) { + dev_err(ice_pf_to_dev(pf), "Custom Tx scheduler enabled, it's mutually exclusive with RDMA\n"); + return -EBUSY; + } + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 3b6d317371cd..05eb30f34871 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -714,6 +714,7 @@ struct ice_port_info { struct ice_qos_cfg qos_cfg; struct xarray sched_node_ids; u8 is_vf:1; + u8 is_custom_tx_enabled:1; }; struct ice_switch_info { -- 2.37.2
Powered by blists - more mailing lists