[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120064230.3602565-12-mrathor@linux.microsoft.com>
Date: Mon, 19 Jan 2026 22:42:26 -0800
From: Mukesh R <mrathor@...ux.microsoft.com>
To: linux-kernel@...r.kernel.org,
linux-hyperv@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
iommu@...ts.linux.dev,
linux-pci@...r.kernel.org,
linux-arch@...r.kernel.org
Cc: kys@...rosoft.com,
haiyangz@...rosoft.com,
wei.liu@...nel.org,
decui@...rosoft.com,
longli@...rosoft.com,
catalin.marinas@....com,
will@...nel.org,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
hpa@...or.com,
joro@...tes.org,
lpieralisi@...nel.org,
kwilczynski@...nel.org,
mani@...nel.org,
robh@...nel.org,
bhelgaas@...gle.com,
arnd@...db.de,
nunodasneves@...ux.microsoft.com,
mhklinux@...look.com,
romank@...ux.microsoft.com
Subject: [PATCH v0 11/15] x86/hyperv: Build logical device ids for PCI passthru hcalls
From: Mukesh Rathor <mrathor@...ux.microsoft.com>
On Hyper-V, most hypercalls related to PCI passthru to map/unmap regions,
interrupts, etc need a device id as a parameter. A device id refers
to a specific device. A device id is of two types:
o Logical: used for direct attach (see below) hypercalls. A logical
device id is a unique 62bit value that is created and
sent during the initial device attach. Then all further
communications (for interrupt remaps etc) must use this
logical id.
o PCI: used for device domain hypercalls such as map, unmap, etc.
This is built using actual device BDF info.
PS: Since an L1VH only supports direct attaches, a logical device id
on an L1VH VM is always a VMBus device id. For non-L1VH cases,
we just use PCI BDF info, altho not strictly needed, to build the
logical device id.
At a high level, Hyper-V supports two ways to do PCI passthru:
1. Device Domain: root must create a device domain in the hypervisor,
and do map/unmap hypercalls for mapping and unmapping guest RAM.
All hypervisor communications use device id of type PCI for
identifying and referencing the device.
2. Direct Attach: the hypervisor will simply use the guest's HW
page table for mappings, thus the host need not do map/unmap
hypercalls. A direct attached device must be referenced
via logical device id and never via the PCI device id. For an
L1VH root/parent, Hyper-V only supports direct attaches.
Signed-off-by: Mukesh Rathor <mrathor@...ux.microsoft.com>
---
arch/x86/hyperv/irqdomain.c | 60 ++++++++++++++++++++++++++++++---
arch/x86/include/asm/mshyperv.h | 14 ++++++++
2 files changed, 70 insertions(+), 4 deletions(-)
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
index ccbe5848a28f..33017aa0caa4 100644
--- a/arch/x86/hyperv/irqdomain.c
+++ b/arch/x86/hyperv/irqdomain.c
@@ -137,7 +137,7 @@ static int get_rid_cb(struct pci_dev *pdev, u16 alias, void *data)
return 0;
}
-static union hv_device_id hv_build_devid_type_pci(struct pci_dev *pdev)
+static u64 hv_build_devid_type_pci(struct pci_dev *pdev)
{
int pos;
union hv_device_id hv_devid;
@@ -197,7 +197,58 @@ static union hv_device_id hv_build_devid_type_pci(struct pci_dev *pdev)
}
out:
- return hv_devid;
+ return hv_devid.as_uint64;
+}
+
+/* Build device id for direct attached devices */
+static u64 hv_build_devid_type_logical(struct pci_dev *pdev)
+{
+ hv_pci_segment segment;
+ union hv_device_id hv_devid;
+ union hv_pci_bdf bdf = {.as_uint16 = 0};
+ struct rid_data data = {
+ .bridge = NULL,
+ .rid = PCI_DEVID(pdev->bus->number, pdev->devfn)
+ };
+
+ segment = pci_domain_nr(pdev->bus);
+ bdf.bus = PCI_BUS_NUM(data.rid);
+ bdf.device = PCI_SLOT(data.rid);
+ bdf.function = PCI_FUNC(data.rid);
+
+ hv_devid.as_uint64 = 0;
+ hv_devid.device_type = HV_DEVICE_TYPE_LOGICAL;
+ hv_devid.logical.id = (u64)segment << 16 | bdf.as_uint16;
+
+ return hv_devid.as_uint64;
+}
+
+/* Build device id after the device has been attached */
+u64 hv_build_devid_oftype(struct pci_dev *pdev, enum hv_device_type type)
+{
+ if (type == HV_DEVICE_TYPE_LOGICAL) {
+ if (hv_l1vh_partition())
+ return hv_pci_vmbus_device_id(pdev);
+ else
+ return hv_build_devid_type_logical(pdev);
+ } else if (type == HV_DEVICE_TYPE_PCI)
+ return hv_build_devid_type_pci(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_build_devid_oftype);
+
+/* Build device id for the interrupt path */
+static u64 hv_build_irq_devid(struct pci_dev *pdev)
+{
+ enum hv_device_type dev_type;
+
+ if (hv_pcidev_is_attached_dev(pdev) || hv_l1vh_partition())
+ dev_type = HV_DEVICE_TYPE_LOGICAL;
+ else
+ dev_type = HV_DEVICE_TYPE_PCI;
+
+ return hv_build_devid_oftype(pdev, dev_type);
}
/*
@@ -221,7 +272,7 @@ int hv_map_msi_interrupt(struct irq_data *data,
msidesc = irq_data_get_msi_desc(data);
pdev = msi_desc_to_pci_dev(msidesc);
- hv_devid = hv_build_devid_type_pci(pdev);
+ hv_devid.as_uint64 = hv_build_irq_devid(pdev);
cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
return hv_map_interrupt(hv_current_partition_id, hv_devid, false, cpu,
@@ -296,7 +347,8 @@ static int hv_unmap_msi_interrupt(struct pci_dev *pdev,
{
union hv_device_id hv_devid;
- hv_devid = hv_build_devid_type_pci(pdev);
+ hv_devid.as_uint64 = hv_build_irq_devid(pdev);
+
return hv_unmap_interrupt(hv_devid.as_uint64, irq_entry);
}
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 0d7fdfb25e76..97477c5a8487 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -188,6 +188,20 @@ bool hv_vcpu_is_preempted(int vcpu);
static inline void hv_apic_init(void) {}
#endif
+#if IS_ENABLED(CONFIG_HYPERV_IOMMU)
+static inline bool hv_pcidev_is_attached_dev(struct pci_dev *pdev)
+{ return false; } /* temporary */
+u64 hv_build_devid_oftype(struct pci_dev *pdev, enum hv_device_type type);
+#else /* CONFIG_HYPERV_IOMMU */
+static inline bool hv_pcidev_is_attached_dev(struct pci_dev *pdev)
+{ return false; }
+
+static inline u64 hv_build_devid_oftype(struct pci_dev *pdev,
+ enum hv_device_type type)
+{ return 0; }
+
+#endif /* CONFIG_HYPERV_IOMMU */
+
u64 hv_pci_vmbus_device_id(struct pci_dev *pdev);
struct irq_domain *hv_create_pci_msi_domain(void);
--
2.51.2.vfs.0.1
Powered by blists - more mailing lists