[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250616-vsock-vmtest-v4-7-bdd1659c33fb@meta.com>
Date: Mon, 16 Jun 2025 21:32:56 -0700
From: Bobby Eshleman <bobbyeshleman@...il.com>
To: Stefano Garzarella <sgarzare@...hat.com>,
Stefan Hajnoczi <stefanha@...hat.com>, Shuah Khan <shuah@...nel.org>
Cc: kvm@...r.kernel.org, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Simon Horman <horms@...nel.org>,
linux-kernel@...r.kernel.org, virtualization@...ts.linux.dev,
netdev@...r.kernel.org, linux-kselftest@...r.kernel.org,
Bobby Eshleman <bobbyeshleman@...il.com>,
Bobby Eshleman <bobbyeshleman@...a.com>
Subject: [PATCH RFC net-next v4 07/11] vhost/vsock: add netns support
From: Bobby Eshleman <bobbyeshleman@...a.com>
Add the ability to isolate vsock flows using namespaces.
The namespace for a VM is inherited from the PID that opened the
vhost-vsock device.
Signed-off-by: Bobby Eshleman <bobbyeshleman@...a.com>
---
drivers/vhost/vsock.c | 48 ++++++++++++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 10 deletions(-)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 802153e23073..863419533a3f 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -46,6 +46,8 @@ static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
+ struct net *net;
+ netns_tracker ns_tracker;
/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
struct hlist_node hash;
@@ -59,6 +61,22 @@ struct vhost_vsock {
bool seqpacket_allow;
};
+static void vhost_vsock_net_set(struct vhost_vsock *vsock, struct net *net)
+{
+ if (net_eq(net, vsock_global_net()))
+ vsock->net = vsock_global_net();
+ else
+ vsock->net = get_net_track(net, &vsock->ns_tracker, GFP_KERNEL);
+}
+
+static void vhost_vsock_net_put(struct vhost_vsock *vsock)
+{
+ if (net_eq(vsock->net, vsock_global_net()))
+ return;
+
+ put_net_track(vsock->net, &vsock->ns_tracker);
+}
+
static u32 vhost_transport_get_local_cid(void)
{
return VHOST_VSOCK_DEFAULT_HOST_CID;
@@ -67,7 +85,7 @@ static u32 vhost_transport_get_local_cid(void)
/* Callers that dereference the return value must hold vhost_vsock_mutex or the
* RCU read lock.
*/
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid, struct net *net)
{
struct vhost_vsock *vsock;
@@ -78,9 +96,8 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
if (other_cid == 0)
continue;
- if (other_cid == guest_cid)
+ if (other_cid == guest_cid && vsock_net_check_mode(net, vsock->net))
return vsock;
-
}
return NULL;
@@ -272,13 +289,14 @@ static int
vhost_transport_send_pkt(struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct net *net = virtio_vsock_skb_net(skb);
struct vhost_vsock *vsock;
int len = skb->len;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid), net);
if (!vsock) {
rcu_read_unlock();
kfree_skb(skb);
@@ -305,7 +323,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk_vsock(vsk)));
if (!vsock)
goto out;
@@ -403,7 +421,7 @@ static bool vhost_transport_msgzerocopy_allow(void)
return true;
}
-static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk, u32 remote_cid);
static struct virtio_transport vhost_transport = {
.transport = {
@@ -459,13 +477,14 @@ static struct virtio_transport vhost_transport = {
.send_pkt = vhost_transport_send_pkt,
};
-static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk, u32 remote_cid)
{
+ struct net *net = sock_net(sk_vsock(vsk));
struct vhost_vsock *vsock;
bool seqpacket_allow = false;
rcu_read_lock();
- vsock = vhost_vsock_get(remote_cid);
+ vsock = vhost_vsock_get(remote_cid, net);
if (vsock)
seqpacket_allow = vsock->seqpacket_allow;
@@ -525,6 +544,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
continue;
}
+ virtio_vsock_skb_set_net(skb, vsock->net);
total_len += sizeof(*hdr) + skb->len;
/* Deliver to monitoring devices all received packets */
@@ -651,10 +671,16 @@ static void vhost_vsock_free(struct vhost_vsock *vsock)
static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
{
+
struct vhost_virtqueue **vqs;
struct vhost_vsock *vsock;
+ struct net *net;
int ret;
+ net = get_net_ns_by_pid(current->pid);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
/* This struct is large and allocation could fail, fall back to vmalloc
* if there is no other way.
*/
@@ -668,6 +694,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
goto out;
}
+ vhost_vsock_net_set(vsock, net);
vsock->guest_cid = 0; /* no CID assigned yet */
vsock->seqpacket_allow = false;
@@ -707,7 +734,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
*/
/* If the peer is still valid, no need to reset connection */
- if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk)))
return;
/* If the close timeout is pending, let it expire. This avoids races
@@ -752,6 +779,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
vhost_dev_cleanup(&vsock->dev);
+ vhost_vsock_net_put(vsock);
kfree(vsock->dev.vqs);
vhost_vsock_free(vsock);
return 0;
@@ -778,7 +806,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
- other = vhost_vsock_get(guest_cid);
+ other = vhost_vsock_get(guest_cid, vsock->net);
if (other && other != vsock) {
mutex_unlock(&vhost_vsock_mutex);
return -EADDRINUSE;
--
2.47.1
Powered by blists - more mailing lists