[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2cd96b8d-d1ff-7cf9-1dc4-9acb51c207e9@pensando.io>
Date: Tue, 30 Jul 2019 11:35:43 -0700
From: Shannon Nelson <snelson@...sando.io>
To: Saeed Mahameed <saeedm@...lanox.com>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"davem@...emloft.net" <davem@...emloft.net>
Subject: Re: [PATCH v4 net-next 12/19] ionic: Add async link status check and
basic stats
On 7/24/19 5:04 PM, Saeed Mahameed wrote:
> On Mon, 2019-07-22 at 14:40 -0700, Shannon Nelson wrote:
>> Add code to handle the link status event, and wire up the
>> basic netdev hardware stats.
>>
>> Signed-off-by: Shannon Nelson<snelson@...sando.io>
>> ---
>> .../net/ethernet/pensando/ionic/ionic_lif.c | 116
>> ++++++++++++++++++
>> .../net/ethernet/pensando/ionic/ionic_lif.h | 1 +
>> 2 files changed, 117 insertions(+)
[...]
>> + /* After outstanding events are processed we can check on
>> + * the link status and any outstanding interrupt credits.
>> + *
>> + * We wait until here to check on the link status in case
>> + * there was a long list of link events from a flap episode.
>> + */
>> + if (test_bit(LIF_LINK_CHECK_NEEDED, lif->state)) {
>> + struct ionic_deferred_work *work;
>> +
>> + work = kzalloc(sizeof(*work), GFP_ATOMIC);
>> + if (!work) {
>> + netdev_err(lif->netdev, "%s OOM\n", __func__);
> why not having a pre allocated dedicated lif->link_check_work, instead
> of allocating in atomic context on every link check event ?
I don't want to worry about the possibility of additional requests
driven from other threads using the same struct.
>> + } else {
>> + work->type = DW_TYPE_LINK_STATUS;
>> + ionic_lif_deferred_enqueue(&lif->deferred,
>> work);
>> + }
>> + }
>> +
>> return_to_napi:
>> return work_done;
>> }
>>
>> +static void ionic_get_stats64(struct net_device *netdev,
>> + struct rtnl_link_stats64 *ns)
>> +{
>> + struct lif *lif = netdev_priv(netdev);
>> + struct lif_stats *ls;
>> +
>> + memset(ns, 0, sizeof(*ns));
>> + ls = &lif->info->stats;
>> +
>> + ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
>> + le64_to_cpu(ls->rx_mcast_packets) +
>> + le64_to_cpu(ls->rx_bcast_packets);
>> +
>> + ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
>> + le64_to_cpu(ls->tx_mcast_packets) +
>> + le64_to_cpu(ls->tx_bcast_packets);
>> +
>> + ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
>> + le64_to_cpu(ls->rx_mcast_bytes) +
>> + le64_to_cpu(ls->rx_bcast_bytes);
>> +
>> + ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
>> + le64_to_cpu(ls->tx_mcast_bytes) +
>> + le64_to_cpu(ls->tx_bcast_bytes);
>> +
>> + ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
>> + le64_to_cpu(ls->rx_mcast_drop_packets) +
>> + le64_to_cpu(ls->rx_bcast_drop_packets);
>> +
>> + ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
>> + le64_to_cpu(ls->tx_mcast_drop_packets) +
>> + le64_to_cpu(ls->tx_bcast_drop_packets);
>> +
>> + ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
>> +
>> + ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
>> +
>> + ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
>> + le64_to_cpu(ls->rx_queue_disabled) +
>> + le64_to_cpu(ls->rx_desc_fetch_error) +
>> + le64_to_cpu(ls->rx_desc_data_error);
>> +
>> + ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
>> + le64_to_cpu(ls->tx_queue_disabled) +
>> + le64_to_cpu(ls->tx_desc_fetch_error) +
>> + le64_to_cpu(ls->tx_desc_data_error);
>> +
>> + ns->rx_errors = ns->rx_over_errors +
>> + ns->rx_missed_errors;
>> +
>> + ns->tx_errors = ns->tx_aborted_errors;
>> +}
>> +
>> static int ionic_lif_addr_add(struct lif *lif, const u8 *addr)
>> {
>> struct ionic_admin_ctx ctx = {
>> @@ -581,6 +693,7 @@ static int ionic_vlan_rx_kill_vid(struct
>> net_device *netdev, __be16 proto,
>> static const struct net_device_ops ionic_netdev_ops = {
>> .ndo_open = ionic_open,
>> .ndo_stop = ionic_stop,
>> + .ndo_get_stats64 = ionic_get_stats64,
>> .ndo_set_rx_mode = ionic_set_rx_mode,
>> .ndo_set_features = ionic_set_features,
>> .ndo_set_mac_address = ionic_set_mac_address,
>> @@ -1418,6 +1531,8 @@ static int ionic_lif_init(struct lif *lif)
>>
>> set_bit(LIF_INITED, lif->state);
>>
>> + ionic_link_status_check(lif);
>> +
>> return 0;
>>
>> err_out_notifyq_deinit:
>> @@ -1461,6 +1576,7 @@ int ionic_lifs_register(struct ionic *ionic)
>> return err;
>> }
>>
> are events (NotifyQ) enabled at this stage ? if so then you might endup
> racing ionic_link_status_check with itself.
I'll look at that again to see what such a race might do. I probably
should add a test here and in a couple other spots to see if the link
status check has already been requested.
sln
Powered by blists - more mailing lists