[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241018091321.gfsdx7qzl4yoixgb@skbuf>
Date: Fri, 18 Oct 2024 12:13:21 +0300
From: Vladimir Oltean <olteanv@...il.com>
To: Furong Xu <0x1207@...il.com>
Cc: netdev@...r.kernel.org, linux-stm32@...md-mailman.stormreply.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Andrew Lunn <andrew@...n.ch>, Simon Horman <horms@...nel.org>,
Serge Semin <fancer.lancer@...il.com>,
Alexandre Torgue <alexandre.torgue@...s.st.com>,
Jose Abreu <joabreu@...opsys.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Maxime Coquelin <mcoquelin.stm32@...il.com>, xfr@...look.com
Subject: Re: [PATCH net-next v2 7/8] net: stmmac: xgmac: Complete FPE support
On Fri, Oct 18, 2024 at 02:39:13PM +0800, Furong Xu wrote:
> Implement the necessary stmmac_fpe_ops function callbacks for xgmac.
>
> Signed-off-by: Furong Xu <0x1207@...il.com>
> ---
> .../net/ethernet/stmicro/stmmac/stmmac_fpe.c | 77 +++++++++++++++++++
> 1 file changed, 77 insertions(+)
>
> diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
> index dfe911b3f486..c90ed7c1279d 100644
> --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
> +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
> @@ -373,6 +373,78 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
> &dwxgmac3_fpe_info);
> }
>
> +static int dwxgmac3_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
> +{
> + return common_fpe_irq_status(ioaddr + XGMAC_MAC_FPE_CTRL_STS, dev);
> +}
> +
> +static void dwxgmac3_fpe_send_mpacket(void __iomem *ioaddr,
> + struct stmmac_fpe_cfg *cfg,
> + enum stmmac_mpacket_type type)
> +{
> + common_fpe_send_mpacket(ioaddr + XGMAC_MAC_FPE_CTRL_STS, cfg, type);
> +}
> +
> +static int dwxgmac3_fpe_get_add_frag_size(const void __iomem *ioaddr)
> +{
> + return FIELD_GET(FPE_MTL_ADD_FRAG_SZ,
> + readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS));
> +}
> +
> +static void dwxgmac3_fpe_set_add_frag_size(void __iomem *ioaddr,
> + u32 add_frag_size)
> +{
> + u32 value;
> +
> + value = readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> + writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
> + ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> +}
> +
> +static int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
> + struct netlink_ext_ack *extack,
> + u32 pclass)
> +{
> + u32 val, offset, count, preemptible_txqs = 0;
> + struct stmmac_priv *priv = netdev_priv(ndev);
> + u32 num_tc = ndev->num_tc;
> +
> + if (!num_tc) {
> + /* Restore default TC:Queue mapping */
> + for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
> + val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
> + writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
> + priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
> + }
> + }
> +
> + /* Synopsys Databook:
> + * "All Queues within a traffic class are selected in a round robin
> + * fashion (when packets are available) when the traffic class is
> + * selected by the scheduler for packet transmission. This is true for
> + * any of the scheduling algorithms."
> + */
> + for (u32 tc = 0; tc < num_tc; tc++) {
> + count = ndev->tc_to_txq[tc].count;
> + offset = ndev->tc_to_txq[tc].offset;
> +
> + if (pclass & BIT(tc))
> + preemptible_txqs |= GENMASK(offset + count - 1, offset);
> +
> + for (u32 i = 0; i < count; i++) {
> + val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
> + writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
> + priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
> + }
> + }
> +
> + val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> + writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
> + priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> +
> + return 0;
> +}
> +
> const struct stmmac_fpe_ops dwmac5_fpe_ops = {
> .fpe_configure = dwmac5_fpe_configure,
> .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
> @@ -384,4 +456,9 @@ const struct stmmac_fpe_ops dwmac5_fpe_ops = {
>
> const struct stmmac_fpe_ops dwxgmac_fpe_ops = {
> .fpe_configure = dwxgmac3_fpe_configure,
> + .fpe_send_mpacket = dwxgmac3_fpe_send_mpacket,
> + .fpe_irq_status = dwxgmac3_fpe_irq_status,
> + .fpe_get_add_frag_size = dwxgmac3_fpe_get_add_frag_size,
> + .fpe_set_add_frag_size = dwxgmac3_fpe_set_add_frag_size,
> + .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
> };
This is much better in terms of visibility into the change.
Though I cannot stop thinking that this implementation design:
stmmac_fpe_configure()
-> stmmac_do_void_callback()
-> fpe_ops->fpe_configure()
/ \
/ \
v v
dwmac5_fpe_configure dwxgmac3_fpe_configure
\ /
\ /
v v
common_fpe_configure()
is, pardon the expression, stuffy.
If you aren't very opposed to the idea of having struct stmmac_fpe_ops
contain a mix of function pointers and integer constants, I would
suggest removing:
.fpe_configure()
.fpe_send_mpacket()
.fpe_irq_status()
.fpe_get_add_frag_size()
.fpe_set_add_frag_size()
and just keeping a single function pointer, .fpe_map_preemption_class(),
inside stmmac_fpe_ops. Only that is sufficiently different to warrant a
completely separate implementation. Then move all current struct
stmmac_fpe_configure_info to struct stmmac_fpe_ops, and reimplement
stmmac_fpe_configure() directly like common_fpe_configure(),
stmmac_fpe_send_mpacket() directly like common_fpe_send_mpacket(), etc etc.
This lets us avoid the antipattern of calling a function pointer (hidden
by an opaque macro) from common code, only to gather some parameters to
call again a common implementation.
I know this is a preposterous and heretic thing to suggest, but a person
who isn't knee-deep in stmmac has a very hard time locating himself in
space due to the unnecessarily complex layering. If that isn't something
that is important, feel free to ignore.
Powered by blists - more mailing lists