[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240416193039.272997-6-danielj@nvidia.com>
Date: Tue, 16 Apr 2024 22:30:38 +0300
From: Daniel Jurgens <danielj@...dia.com>
To: <netdev@...r.kernel.org>
CC: <mst@...hat.com>, <jasowang@...hat.com>, <xuanzhuo@...ux.alibaba.com>,
<virtualization@...ts.linux.dev>, <davem@...emloft.net>,
<edumazet@...gle.com>, <kuba@...nel.org>, <pabeni@...hat.com>,
<jiri@...dia.com>, Daniel Jurgens <danielj@...dia.com>
Subject: [PATCH net-next v4 5/6] virtio_net: Add a lock for per queue RX coalesce
Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a spin
lock per queue.
Signed-off-by: Daniel Jurgens <danielj@...dia.com>
---
drivers/net/virtio_net.c | 40 +++++++++++++++++++++++++++++++++-------
1 file changed, 33 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b3aa4d2a15e9..bae5beafe1a1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -184,6 +184,9 @@ struct receive_queue {
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;
+ /* Used to protect dim_enabled and inter_coal */
+ spinlock_t dim_lock;
+
/* Dynamic Interrupt Moderation */
struct dim dim;
@@ -2218,6 +2221,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
+ /* Intentionally not taking dim_lock here. This could result
+ * in a net_dim call with dim now disabled. But virtnet_rx_dim_work
+ * will take the lock not update settings if dim is now disabled.
+ */
if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq);
}
@@ -3087,9 +3094,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
+ spin_lock(&vi->rq[i].dim_lock);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
+ spin_unlock(&vi->rq[i].dim_lock);
if (err)
return err;
}
@@ -3468,6 +3477,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
+ int ret = 0;
int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -3477,16 +3487,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;
+ /* Acquire all queues dim_locks */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ spin_lock(&vi->rq[i].dim_lock);
+
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = true;
- return 0;
+ goto unlock;
}
coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
- if (!coal_rx)
- return -ENOMEM;
+ if (!coal_rx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
@@ -3504,8 +3520,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
- &sgs_rx))
- return -EINVAL;
+ &sgs_rx)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
@@ -3513,8 +3531,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
}
+unlock:
+ for (i = vi->max_queue_pairs - 1; i >= 0; i--)
+ spin_unlock(&vi->rq[i].dim_lock);
- return 0;
+ return ret;
}
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -3538,10 +3559,12 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u16 queue)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
- bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets;
+ bool cur_rx_dim;
int err;
+ guard(spinlock)(&vi->rq[queue].dim_lock);
+ cur_rx_dim = vi->rq[queue].dim_enabled;
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;
@@ -3603,6 +3626,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
qnum = rq - vi->rq;
+ guard(spinlock)(&rq->dim_lock);
if (!rq->dim_enabled)
goto out;
@@ -3756,6 +3780,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ guard(spinlock)(&vi->rq[queue].dim_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
@@ -4501,6 +4526,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
+ spin_lock_init(&vi->rq[i].dim_lock);
}
return 0;
--
2.34.1
Powered by blists - more mailing lists