[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <924e9c5c-f4a8-4db5-bbe8-964505191849@lunn.ch>
Date: Wed, 23 Oct 2024 16:00:22 +0200
From: Andrew Lunn <andrew@...n.ch>
To: Jijie Shao <shaojijie@...wei.com>
Cc: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com, andrew+netdev@...n.ch, horms@...nel.org,
shenjian15@...wei.com, wangpeiyang1@...wei.com,
liuyonglong@...wei.com, chenhao418@...wei.com,
sudongming1@...wei.com, xujunsheng@...wei.com,
shiyongbang@...wei.com, libaihan@...wei.com,
jonathan.cameron@...wei.com, shameerali.kolothum.thodi@...wei.com,
salil.mehta@...wei.com, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next 2/7] net: hibmcge: Add debugfs supported in this
module
> +static int hbg_dbg_dev_spec(struct seq_file *s, void *unused)
> +{
> + struct net_device *netdev = dev_get_drvdata(s->private);
> + struct hbg_priv *priv = netdev_priv(netdev);
> + struct hbg_dev_specs *specs;
> +
> + specs = &priv->dev_specs;
> + seq_printf(s, "mac id: %u\n", specs->mac_id);
> + seq_printf(s, "phy addr: %u\n", specs->phy_addr);
> + seq_printf(s, "mac addr: %pM\n", specs->mac_addr.sa_data);
> + seq_printf(s, "vlan layers: %u\n", specs->vlan_layers);
> + seq_printf(s, "max frame len: %u\n", specs->max_frame_len);
> + seq_printf(s, "min mtu: %u, max mtu: %u\n",
> + specs->min_mtu, specs->max_mtu);
I think these are all available via standard APIs. There is no need to
have them in debugfs as well.
> + seq_printf(s, "mdio frequency: %u\n", specs->mdio_frequency);
Is this interesting? Are you clocking it greater than 2.5MHz?
> +static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
> +{
> + struct net_device *netdev = dev_get_drvdata(s->private);
> + struct hbg_priv *priv = netdev_priv(netdev);
> + struct hbg_irq_info *info;
> + u32 i;
> +
> + for (i = 0; i < priv->vectors.info_array_len; i++) {
> + info = &priv->vectors.info_array[i];
> + seq_printf(s,
> + "%-20s: is enabled: %s, print: %s, count: %llu\n",
> + info->name,
> + hbg_get_bool_str(hbg_hw_irq_is_enabled(priv,
> + info->mask)),
> + hbg_get_bool_str(info->need_print),
> + info->count);
> + }
How does this differ from what is available already from the IRQ
subsystem?
> +static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
> +{
> + struct net_device *netdev = dev_get_drvdata(s->private);
> + struct hbg_priv *priv = netdev_priv(netdev);
> +
> + seq_printf(s, "event handling state: %s\n",
> + hbg_get_bool_str(test_bit(HBG_NIC_STATE_EVENT_HANDLING,
> + &priv->state)));
> +
> + seq_printf(s, "tx timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
Don't you have this via ethtool -S ?
> @@ -209,6 +210,10 @@ static int hbg_init(struct hbg_priv *priv)
> if (ret)
> return ret;
>
> + ret = hbg_debugfs_init(priv);
> + if (ret)
> + return ret;
> +
There is no need to test the results from debugfs calls.
> +static int __init hbg_module_init(void)
> +{
> + int ret;
> +
> + hbg_debugfs_register();
> + ret = pci_register_driver(&hbg_driver);
> + if (ret)
> + hbg_debugfs_unregister();
This seems odd. I would expect that each device has its own debugfs,
there is nothing global.
Andrew
Powered by blists - more mailing lists