[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210914195543.28ea7ffb@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com>
Date: Tue, 14 Sep 2021 19:55:43 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: Stefan Wahren <stefan.wahren@...e.com>
Cc: "David S. Miller" <davem@...emloft.net>,
Rob Herring <robh+dt@...nel.org>,
Michael Heimpold <michael.heimpold@...tech.com>,
jimmy.shen@...texcom.com, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, devicetree@...r.kernel.org
Subject: Re: [PATCH RFC 3/3] net: vertexcom: Add MSE102x SPI support
On Tue, 14 Sep 2021 17:17:17 +0200 Stefan Wahren wrote:
> This implements an SPI protocol driver for Vertexcom MSE102x
> Homeplug GreenPHY chip.
>
> Signed-off-by: Stefan Wahren <stefan.wahren@...e.com>
> + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
> + skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
> + if (!skb)
> + goto unlock_spi;
> +
> + /* 2 bytes Start of frame (before ethernet header)
> + * 2 bytes Data frame tail (after ethernet frame)
> + * They are copied, but ignored.
> + */
> + rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
This assumes there is SOF_LEN headroom, but you never reserved that
headroom, and SOF_LEN is added to the frame len.. one of those is not
necessary?
> + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
> + mse->ndev->stats.rx_errors++;
> + dev_kfree_skb(skb);
> + goto unlock_spi;
> + }
> +
> + if (netif_msg_pktdata(mse))
> + mse102x_dump_packet(__func__, skb->len, skb->data);
> +
> + skb->protocol = eth_type_trans(skb, mse->ndev);
> + netif_rx_ni(skb);
> +
> + mse->ndev->stats.rx_packets++;
> + mse->ndev->stats.rx_bytes += rxlen;
> +
> +unlock_spi:
> + mutex_unlock(&mses->lock);
> +}
> +
> +static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
> + unsigned long work_timeout)
> +{
> + unsigned int pad = 0;
> + __be16 rx = 0;
> + u16 cmd_resp;
> + int ret;
> + bool first = true;
> +
> + if (txb->len < 60)
> + pad = 60 - txb->len;
> +
> + while (1) {
> + /* It's not predictable how long / many retries it takes to
> + * send at least one packet, so TX timeouts are possible.
> + * That's the reason why the netdev watchdog is not used here.
> + */
> + if (time_after(jiffies, work_timeout))
> + return -ETIMEDOUT;
> +
> + mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
> + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
> + cmd_resp = be16_to_cpu(rx);
> +
> + if (!ret) {
> + /* ready to send frame ? */
> + if (cmd_resp == CMD_CTR)
> + break;
> +
> + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
> + __func__, cmd_resp);
> + mse->stats.invalid_ctr++;
> + }
> +
> + if (first) {
> + /* throttle at first issue */
> + netif_stop_queue(mse->ndev);
> + /* fast retry */
> + usleep_range(50, 100);
> + first = false;
> + } else {
> + msleep(20);
> + }
> + };
> +
> + ret = mse102x_tx_frame_spi(mse, txb, pad);
> + if (ret) {
> + net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
> + __func__, ret);
> + }
No need for brackets.
> + return ret;
> +}
> +
> +#define TX_QUEUE_MAX 10
> +
> +static void mse102x_tx_work(struct work_struct *work)
> +{
> + /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */
> + unsigned long work_timeout = jiffies + msecs_to_jiffies(1000);
Sure this is safe? what if the system is under heavy load and the
worker thread just gets scheduled out for the best part of the second?
> + struct mse102x_net_spi *mses;
> + struct mse102x_net *mse;
> + struct sk_buff *txb;
> + bool done = false;
> + int ret = 0;
> +
> + mses = container_of(work, struct mse102x_net_spi, tx_work);
> + mse = &mses->mse102x;
> +
> + while (!done) {
> + mutex_lock(&mses->lock);
I think you can take the lock just around the mse102x_tx_pkt_spi().
> + txb = skb_dequeue(&mse->txq);
> + if (!txb) {
> + done = true;
> + goto unlock_spi;
> + }
> +
> + ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
> + if (ret) {
> + mse->ndev->stats.tx_dropped++;
> + } else {
> + mse->ndev->stats.tx_bytes += txb->len;
> + mse->ndev->stats.tx_packets++;
> + }
> +
> + dev_kfree_skb(txb);
> +
> +unlock_spi:
> + mutex_unlock(&mses->lock);
> + }
> +
> + if (ret == -ETIMEDOUT) {
> + if (netif_msg_timer(mse))
> + netdev_err(mse->ndev, "tx work timeout\n");
> +
> + mse->stats.tx_timeout++;
> + }
> +
> + netif_wake_queue(mse->ndev);
> +}
> +
> +static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb,
> + struct net_device *ndev)
> +{
> + struct mse102x_net *mse = netdev_priv(ndev);
> + struct mse102x_net_spi *mses = to_mse102x_spi(mse);
> + netdev_tx_t ret = NETDEV_TX_OK;
> +
> + netif_dbg(mse, tx_queued, ndev,
> + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
> +
> + if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX) {
> + netif_stop_queue(ndev);
> + ret = NETDEV_TX_BUSY;
It's best practice to stop the queue in advance if you know you won't
be able to send the next packet, rather than return BUSY and force the
qdisc to requeue the frame.
> + } else {
> + skb_queue_tail(&mse->txq, skb);
> + }
> +
> + schedule_work(&mses->tx_work);
> +
> + return ret;
> +}
> +static int mse102x_net_open(struct net_device *ndev)
> +{
> + struct mse102x_net *mse = netdev_priv(ndev);
> + struct mse102x_net_spi *mses = to_mse102x_spi(mse);
> + int ret;
> +
> + ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
> + ndev->name, mse);
> + if (ret < 0) {
> + netdev_err(ndev, "Failed to get irq: %d\n", ret);
> + return ret;
> + }
> +
> + /* lock the card, even if we may not actually be doing anything
> + * else at the moment
> + */
> + mutex_lock(&mses->lock);
What is this lock protecting?
> + netif_dbg(mse, ifup, ndev, "opening\n");
> +
> + netif_start_queue(ndev);
> +
> + netif_carrier_on(ndev);
> +
> + netif_dbg(mse, ifup, ndev, "network device up\n");
> +
> + mutex_unlock(&mses->lock);
> +
> + return 0;
> +}
> +
> +static int mse102x_net_stop(struct net_device *ndev)
> +{
> + struct mse102x_net *mse = netdev_priv(ndev);
> + struct mse102x_net_spi *mses = to_mse102x_spi(mse);
> +
> + netif_info(mse, ifdown, ndev, "shutting down\n");
> +
> + netif_stop_queue(ndev);
> +
> + /* stop any outstanding work */
> + flush_work(&mses->tx_work);
The work can restart the queue.
> + /* ensure any queued tx buffers are dumped */
> + while (!skb_queue_empty(&mse->txq)) {
> + struct sk_buff *txb = skb_dequeue(&mse->txq);
> +
> + netif_dbg(mse, ifdown, ndev,
> + "%s: freeing txb %p\n", __func__, txb);
> +
> + dev_kfree_skb(txb);
> + }
skb_queue_purge(), maybe?
> + free_irq(ndev->irq, mse);
> +
> + return 0;
> +}
> +static void mse102x_get_drvinfo(struct net_device *ndev,
> + struct ethtool_drvinfo *di)
> +{
> + strscpy(di->driver, DRV_NAME, sizeof(di->driver));
> + strscpy(di->version, "1.00", sizeof(di->version));
Please drop the driver version, we depend on the kernel version these
days (and that's provided by ethtool core by default).
> + strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info));
> +}
Powered by blists - more mailing lists