lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <87ilicbqlj.fsf@intel.com> Date: Thu, 15 Dec 2022 13:04:40 -0300 From: Vinicius Costa Gomes <vinicius.gomes@...el.com> To: "Zulkifli, Muhammad Husaini" <muhammad.husaini.zulkifli@...el.com>, "intel-wired-lan@...osl.org" <intel-wired-lan@...osl.org> Cc: "tee.min.tan@...ux.intel.com" <tee.min.tan@...ux.intel.com>, "davem@...emloft.net" <davem@...emloft.net>, "kuba@...nel.org" <kuba@...nel.org>, "netdev@...r.kernel.org" <netdev@...r.kernel.org>, "naamax.meir@...ux.intel.com" <naamax.meir@...ux.intel.com>, "Nguyen, Anthony L" <anthony.l.nguyen@...el.com> Subject: RE: [PATCH net-next v1] igc: offload queue max SDU from tc-taprio Hi Husaini, "Zulkifli, Muhammad Husaini" <muhammad.husaini.zulkifli@...el.com> writes: > Hi Vinicius, > >> -----Original Message----- >> From: Gomes, Vinicius <vinicius.gomes@...el.com> >> Sent: Thursday, 15 December, 2022 1:17 AM >> To: Zulkifli, Muhammad Husaini <muhammad.husaini.zulkifli@...el.com>; >> intel-wired-lan@...osl.org >> Cc: tee.min.tan@...ux.intel.com; davem@...emloft.net; kuba@...nel.org; >> netdev@...r.kernel.org; Zulkifli, Muhammad Husaini >> <muhammad.husaini.zulkifli@...el.com>; naamax.meir@...ux.intel.com; >> Nguyen, Anthony L <anthony.l.nguyen@...el.com> >> Subject: Re: [PATCH net-next v1] igc: offload queue max SDU from tc-taprio >> >> Hi, >> >> Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@...el.com> writes: >> >> > From: Tan Tee Min <tee.min.tan@...ux.intel.com> >> > >> > Add support for configuring the max SDU for each Tx queue. >> > If not specified, keep the default. >> > >> > Signed-off-by: Tan Tee Min <tee.min.tan@...ux.intel.com> >> > Signed-off-by: Muhammad Husaini Zulkifli >> > <muhammad.husaini.zulkifli@...el.com> >> > --- >> > drivers/net/ethernet/intel/igc/igc.h | 1 + >> > drivers/net/ethernet/intel/igc/igc_main.c | 45 >> +++++++++++++++++++++++ >> > include/net/pkt_sched.h | 1 + >> > net/sched/sch_taprio.c | 4 +- >> > 4 files changed, 50 insertions(+), 1 deletion(-) >> > >> > diff --git a/drivers/net/ethernet/intel/igc/igc.h >> > b/drivers/net/ethernet/intel/igc/igc.h >> > index 5da8d162cd38..ce9e88687d8c 100644 >> > --- a/drivers/net/ethernet/intel/igc/igc.h >> > +++ b/drivers/net/ethernet/intel/igc/igc.h >> > @@ -99,6 +99,7 @@ struct igc_ring { >> > >> > u32 start_time; >> > u32 end_time; >> > + u32 max_sdu; >> > >> > /* CBS parameters */ >> > bool cbs_enable; /* indicates if CBS is enabled */ >> > diff --git a/drivers/net/ethernet/intel/igc/igc_main.c >> > b/drivers/net/ethernet/intel/igc/igc_main.c >> > index e07287e05862..7ce05c31e371 100644 >> > --- a/drivers/net/ethernet/intel/igc/igc_main.c >> > +++ b/drivers/net/ethernet/intel/igc/igc_main.c >> > @@ -1508,6 +1508,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct >> sk_buff *skb, >> > __le32 launch_time = 0; >> > u32 tx_flags = 0; >> > unsigned short f; >> > + u32 max_sdu = 0; >> > ktime_t txtime; >> > u8 hdr_len = 0; >> > int tso = 0; >> > @@ -1527,6 +1528,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct >> sk_buff *skb, >> > return NETDEV_TX_BUSY; >> > } >> > >> > + if (tx_ring->max_sdu > 0) { >> > + if (skb_vlan_tagged(skb)) >> > + max_sdu = tx_ring->max_sdu + VLAN_HLEN; >> > + else >> > + max_sdu = tx_ring->max_sdu; >> >> perhaps this? >> max_sdu = tx_ring->max_sdu + (skb_vlan_tagged(skb) ? VLAN_HLEN : 0); >> >> Totally optional. > > Sure. We can change to above suggestion. > >> >> > + >> > + if (skb->len > max_sdu) >> > + goto skb_drop; >> > + } >> > + >> >> I don't think the overhead would be measurable for the pkt/s rates that a >> 2.5G link can handle. But a test and a note in the commit message confirming >> that would be nice. > > IMHO, it should not depends on the link speed but the packet size only. > If we detect packet size greater than max_sdu, we will just drop it. > I was thinking more about the added conditional on the hot path, if it had some performance impact for the case when packets are not dropped. I really don't think there will be any, but it's nice to have some numbers to confirm that. >> >> > if (!tx_ring->launchtime_enable) >> > goto done; >> > >> > @@ -1606,6 +1617,12 @@ static netdev_tx_t igc_xmit_frame_ring(struct >> sk_buff *skb, >> > dev_kfree_skb_any(first->skb); >> > first->skb = NULL; >> > >> > + return NETDEV_TX_OK; >> > + >> > +skb_drop: >> > + dev_kfree_skb_any(skb); >> > + skb = NULL; >> > + >> > return NETDEV_TX_OK; >> > } >> > >> > @@ -6015,6 +6032,7 @@ static int igc_tsn_clear_schedule(struct >> > igc_adapter *adapter) >> > >> > ring->start_time = 0; >> > ring->end_time = NSEC_PER_SEC; >> > + ring->max_sdu = 0; >> > } >> > >> > return 0; >> > @@ -6097,6 +6115,15 @@ static int igc_save_qbv_schedule(struct >> igc_adapter *adapter, >> > } >> > } >> > >> > + for (i = 0; i < adapter->num_tx_queues; i++) { >> > + struct igc_ring *ring = adapter->tx_ring[i]; >> > + >> > + if (qopt->max_frm_len[i] == U32_MAX) >> > + ring->max_sdu = 0; >> > + else >> > + ring->max_sdu = qopt->max_frm_len[i]; >> > + } >> > + >> > return 0; >> > } >> > >> > @@ -6184,12 +6211,30 @@ static int igc_tsn_enable_cbs(struct igc_adapter >> *adapter, >> > return igc_tsn_offload_apply(adapter); } >> > >> > +static int igc_tsn_query_caps(struct tc_query_caps_base *base) { >> > + switch (base->type) { >> > + case TC_SETUP_QDISC_TAPRIO: { >> > + struct tc_taprio_caps *caps = base->caps; >> > + >> > + caps->supports_queue_max_sdu = true; >> > + >> > + return 0; >> > + } >> > + default: >> > + return -EOPNOTSUPP; >> > + } >> > +} >> > + >> > static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, >> > void *type_data) >> > { >> > struct igc_adapter *adapter = netdev_priv(dev); >> > >> > switch (type) { >> > + case TC_QUERY_CAPS: >> > + return igc_tsn_query_caps(type_data); >> > + >> > case TC_SETUP_QDISC_TAPRIO: >> > return igc_tsn_enable_qbv_scheduling(adapter, type_data); >> > >> > diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index >> > 38207873eda6..d2539b1f6529 100644 >> > --- a/include/net/pkt_sched.h >> > +++ b/include/net/pkt_sched.h >> > @@ -178,6 +178,7 @@ struct tc_taprio_qopt_offload { >> > u64 cycle_time; >> > u64 cycle_time_extension; >> > u32 max_sdu[TC_MAX_QUEUE]; >> > + u32 max_frm_len[TC_MAX_QUEUE]; >> > >> >> 'max_frm_len' is an internal taprio optimization, to simplify the code where >> the underlying HW doesn't support offload. > > The max_sdu only comes with MTU payload size. The reason why we are using > this max_frm_len is to get the header + MTU size together. > > We can use max_sdu + header in the igc_save_qbv_schedule() and remove > this piece of code from pkt_sched.h This sounds better, only exposing max_sdu to the drivers, even if it causes a bit of duplicated code. > >> >> For offloading, only 'max_sdu' should be used. Unless you have a strong >> reason. If you have that reason, it should be a separate commit. >> >> > size_t num_entries; >> > struct tc_taprio_sched_entry entries[]; diff --git >> > a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index >> > 570389f6cdd7..d39164074756 100644 >> > --- a/net/sched/sch_taprio.c >> > +++ b/net/sched/sch_taprio.c >> > @@ -1263,8 +1263,10 @@ static int taprio_enable_offload(struct >> net_device *dev, >> > offload->enable = 1; >> > taprio_sched_to_offload(dev, sched, offload); >> > >> > - for (tc = 0; tc < TC_MAX_QUEUE; tc++) >> > + for (tc = 0; tc < TC_MAX_QUEUE; tc++) { >> > offload->max_sdu[tc] = q->max_sdu[tc]; >> > + offload->max_frm_len[tc] = q->max_frm_len[tc]; >> > + } >> > >> > err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); >> > if (err < 0) { >> > -- >> > 2.17.1 >> > >> >> -- >> Vinicius -- Vinicius
Powered by blists - more mailing lists