lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
 <SN6PR02MB41577881ADDD9076F138DA3FD45CA@SN6PR02MB4157.namprd02.prod.outlook.com>
Date: Tue, 22 Jul 2025 17:44:00 +0000
From: Michael Kelley <mhklinux@...look.com>
To: Roman Kisel <romank@...ux.microsoft.com>, "alok.a.tiwari@...cle.com"
	<alok.a.tiwari@...cle.com>, "arnd@...db.de" <arnd@...db.de>, "bp@...en8.de"
	<bp@...en8.de>, "corbet@....net" <corbet@....net>,
	"dave.hansen@...ux.intel.com" <dave.hansen@...ux.intel.com>,
	"decui@...rosoft.com" <decui@...rosoft.com>, "haiyangz@...rosoft.com"
	<haiyangz@...rosoft.com>, "hpa@...or.com" <hpa@...or.com>,
	"kys@...rosoft.com" <kys@...rosoft.com>, "mingo@...hat.com"
	<mingo@...hat.com>, "rdunlap@...radead.org" <rdunlap@...radead.org>,
	"tglx@...utronix.de" <tglx@...utronix.de>, "Tianyu.Lan@...rosoft.com"
	<Tianyu.Lan@...rosoft.com>, "wei.liu@...nel.org" <wei.liu@...nel.org>,
	"linux-arch@...r.kernel.org" <linux-arch@...r.kernel.org>,
	"linux-coco@...ts.linux.dev" <linux-coco@...ts.linux.dev>,
	"linux-doc@...r.kernel.org" <linux-doc@...r.kernel.org>,
	"linux-hyperv@...r.kernel.org" <linux-hyperv@...r.kernel.org>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
	"x86@...nel.org" <x86@...nel.org>
CC: "apais@...rosoft.com" <apais@...rosoft.com>, "benhill@...rosoft.com"
	<benhill@...rosoft.com>, "bperkins@...rosoft.com" <bperkins@...rosoft.com>,
	"sunilmut@...rosoft.com" <sunilmut@...rosoft.com>
Subject: RE: [PATCH hyperv-next v4 05/16] Drivers: hv: Rename fields for SynIC
 message and event pages

From: Roman Kisel <romank@...ux.microsoft.com> Sent: Monday, July 14, 2025 3:16 PM
> 
> Confidential VMBus requires interacting with two SynICs -- one
> provided by the host hypervisor, and one provided by the paravisor.
> Each SynIC requires its own message and event pages.
> 
> Rename the existing host-accessible SynIC message and event pages
> with the "hyp_" prefix to clearly distinguish them from the paravisor
> ones. The field name is also changed in mshv_root.* for consistency.
> 
> No functional changes.
> 
> Signed-off-by: Roman Kisel <romank@...ux.microsoft.com>

Reviewed-by: Michael Kelley <mhklinux@...look.com>

> ---
>  drivers/hv/channel_mgmt.c |  6 ++--
>  drivers/hv/hv.c           | 66 +++++++++++++++++++--------------------
>  drivers/hv/hyperv_vmbus.h |  4 +--
>  drivers/hv/mshv_root.h    |  2 +-
>  drivers/hv/mshv_synic.c   |  6 ++--
>  drivers/hv/vmbus_drv.c    |  6 ++--
>  6 files changed, 45 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
> index 6e084c207414..6f87220e2ca3 100644
> --- a/drivers/hv/channel_mgmt.c
> +++ b/drivers/hv/channel_mgmt.c
> @@ -843,14 +843,14 @@ static void vmbus_wait_for_unload(void)
>  				= per_cpu_ptr(hv_context.cpu_context, cpu);
> 
>  			/*
> -			 * In a CoCo VM the synic_message_page is not allocated
> +			 * In a CoCo VM the hyp_synic_message_page is not allocated
>  			 * in hv_synic_alloc(). Instead it is set/cleared in
>  			 * hv_synic_enable_regs() and hv_synic_disable_regs()
>  			 * such that it is set only when the CPU is online. If
>  			 * not all present CPUs are online, the message page
>  			 * might be NULL, so skip such CPUs.
>  			 */
> -			page_addr = hv_cpu->synic_message_page;
> +			page_addr = hv_cpu->hyp_synic_message_page;
>  			if (!page_addr)
>  				continue;
> 
> @@ -891,7 +891,7 @@ static void vmbus_wait_for_unload(void)
>  		struct hv_per_cpu_context *hv_cpu
>  			= per_cpu_ptr(hv_context.cpu_context, cpu);
> 
> -		page_addr = hv_cpu->synic_message_page;
> +		page_addr = hv_cpu->hyp_synic_message_page;
>  		if (!page_addr)
>  			continue;
> 
> diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
> index 308c8f279df8..964b9102477d 100644
> --- a/drivers/hv/hv.c
> +++ b/drivers/hv/hv.c
> @@ -145,20 +145,20 @@ int hv_synic_alloc(void)
>  		 * Skip these pages allocation here.
>  		 */
>  		if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
> -			hv_cpu->synic_message_page =
> +			hv_cpu->hyp_synic_message_page =
>  				(void *)get_zeroed_page(GFP_ATOMIC);
> -			if (!hv_cpu->synic_message_page) {
> +			if (!hv_cpu->hyp_synic_message_page) {
>  				pr_err("Unable to allocate SYNIC message page\n");
>  				goto err;
>  			}
> 
> -			hv_cpu->synic_event_page =
> +			hv_cpu->hyp_synic_event_page =
>  				(void *)get_zeroed_page(GFP_ATOMIC);
> -			if (!hv_cpu->synic_event_page) {
> +			if (!hv_cpu->hyp_synic_event_page) {
>  				pr_err("Unable to allocate SYNIC event page\n");
> 
> -				free_page((unsigned long)hv_cpu-
> >synic_message_page);
> -				hv_cpu->synic_message_page = NULL;
> +				free_page((unsigned long)hv_cpu-
> >hyp_synic_message_page);
> +				hv_cpu->hyp_synic_message_page = NULL;
>  				goto err;
>  			}
>  		}
> @@ -166,30 +166,30 @@ int hv_synic_alloc(void)
>  		if (!ms_hyperv.paravisor_present &&
>  		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
>  			ret = set_memory_decrypted((unsigned long)
> -				hv_cpu->synic_message_page, 1);
> +				hv_cpu->hyp_synic_message_page, 1);
>  			if (ret) {
>  				pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
> -				hv_cpu->synic_message_page = NULL;
> +				hv_cpu->hyp_synic_message_page = NULL;
> 
>  				/*
>  				 * Free the event page here so that hv_synic_free()
>  				 * won't later try to re-encrypt it.
>  				 */
> -				free_page((unsigned long)hv_cpu->synic_event_page);
> -				hv_cpu->synic_event_page = NULL;
> +				free_page((unsigned long)hv_cpu-
> >hyp_synic_event_page);
> +				hv_cpu->hyp_synic_event_page = NULL;
>  				goto err;
>  			}
> 
>  			ret = set_memory_decrypted((unsigned long)
> -				hv_cpu->synic_event_page, 1);
> +				hv_cpu->hyp_synic_event_page, 1);
>  			if (ret) {
>  				pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
> -				hv_cpu->synic_event_page = NULL;
> +				hv_cpu->hyp_synic_event_page = NULL;
>  				goto err;
>  			}
> 
> -			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
> -			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
> +			memset(hv_cpu->hyp_synic_message_page, 0, PAGE_SIZE);
> +			memset(hv_cpu->hyp_synic_event_page, 0, PAGE_SIZE);
>  		}
>  	}
> 
> @@ -225,28 +225,28 @@ void hv_synic_free(void)
> 
>  		if (!ms_hyperv.paravisor_present &&
>  		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
> -			if (hv_cpu->synic_message_page) {
> +			if (hv_cpu->hyp_synic_message_page) {
>  				ret = set_memory_encrypted((unsigned long)
> -					hv_cpu->synic_message_page, 1);
> +					hv_cpu->hyp_synic_message_page, 1);
>  				if (ret) {
>  					pr_err("Failed to encrypt SYNIC msg page:
> %d\n", ret);
> -					hv_cpu->synic_message_page = NULL;
> +					hv_cpu->hyp_synic_message_page = NULL;
>  				}
>  			}
> 
> -			if (hv_cpu->synic_event_page) {
> +			if (hv_cpu->hyp_synic_event_page) {
>  				ret = set_memory_encrypted((unsigned long)
> -					hv_cpu->synic_event_page, 1);
> +					hv_cpu->hyp_synic_event_page, 1);
>  				if (ret) {
>  					pr_err("Failed to encrypt SYNIC event page:
> %d\n", ret);
> -					hv_cpu->synic_event_page = NULL;
> +					hv_cpu->hyp_synic_event_page = NULL;
>  				}
>  			}
>  		}
> 
>  		free_page((unsigned long)hv_cpu->post_msg_page);
> -		free_page((unsigned long)hv_cpu->synic_event_page);
> -		free_page((unsigned long)hv_cpu->synic_message_page);
> +		free_page((unsigned long)hv_cpu->hyp_synic_event_page);
> +		free_page((unsigned long)hv_cpu->hyp_synic_message_page);
>  	}
> 
>  	kfree(hv_context.hv_numa_map);
> @@ -276,12 +276,12 @@ void hv_synic_enable_regs(unsigned int cpu)
>  		/* Mask out vTOM bit. ioremap_cache() maps decrypted */
>  		u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
>  				~ms_hyperv.shared_gpa_boundary;
> -		hv_cpu->synic_message_page =
> +		hv_cpu->hyp_synic_message_page =
>  			(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
> -		if (!hv_cpu->synic_message_page)
> +		if (!hv_cpu->hyp_synic_message_page)
>  			pr_err("Fail to map synic message page.\n");
>  	} else {
> -		simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
> +		simp.base_simp_gpa = virt_to_phys(hv_cpu-
> >hyp_synic_message_page)
>  			>> HV_HYP_PAGE_SHIFT;
>  	}
> 
> @@ -295,12 +295,12 @@ void hv_synic_enable_regs(unsigned int cpu)
>  		/* Mask out vTOM bit. ioremap_cache() maps decrypted */
>  		u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
>  				~ms_hyperv.shared_gpa_boundary;
> -		hv_cpu->synic_event_page =
> +		hv_cpu->hyp_synic_event_page =
>  			(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
> -		if (!hv_cpu->synic_event_page)
> +		if (!hv_cpu->hyp_synic_event_page)
>  			pr_err("Fail to map synic event page.\n");
>  	} else {
> -		siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
> +		siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page)
>  			>> HV_HYP_PAGE_SHIFT;
>  	}
> 
> @@ -358,8 +358,8 @@ void hv_synic_disable_regs(unsigned int cpu)
>  	 */
>  	simp.simp_enabled = 0;
>  	if (ms_hyperv.paravisor_present || hv_root_partition()) {
> -		iounmap(hv_cpu->synic_message_page);
> -		hv_cpu->synic_message_page = NULL;
> +		iounmap(hv_cpu->hyp_synic_message_page);
> +		hv_cpu->hyp_synic_message_page = NULL;
>  	} else {
>  		simp.base_simp_gpa = 0;
>  	}
> @@ -370,8 +370,8 @@ void hv_synic_disable_regs(unsigned int cpu)
>  	siefp.siefp_enabled = 0;
> 
>  	if (ms_hyperv.paravisor_present || hv_root_partition()) {
> -		iounmap(hv_cpu->synic_event_page);
> -		hv_cpu->synic_event_page = NULL;
> +		iounmap(hv_cpu->hyp_synic_event_page);
> +		hv_cpu->hyp_synic_event_page = NULL;
>  	} else {
>  		siefp.base_siefp_gpa = 0;
>  	}
> @@ -401,7 +401,7 @@ static bool hv_synic_event_pending(void)
>  {
>  	struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
>  	union hv_synic_event_flags *event =
> -		(union hv_synic_event_flags *)hv_cpu->synic_event_page +
> VMBUS_MESSAGE_SINT;
> +		(union hv_synic_event_flags *)hv_cpu->hyp_synic_event_page +
> VMBUS_MESSAGE_SINT;
>  	unsigned long *recv_int_page = event->flags; /* assumes VMBus version >=
> VERSION_WIN8 */
>  	bool pending;
>  	u32 relid;
> diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
> index 0b450e53161e..fc3cdb26ff1a 100644
> --- a/drivers/hv/hyperv_vmbus.h
> +++ b/drivers/hv/hyperv_vmbus.h
> @@ -120,8 +120,8 @@ enum {
>   * Per cpu state for channel handling
>   */
>  struct hv_per_cpu_context {
> -	void *synic_message_page;
> -	void *synic_event_page;
> +	void *hyp_synic_message_page;
> +	void *hyp_synic_event_page;
> 
>  	/*
>  	 * The page is only used in hv_post_message() for a TDX VM (with the
> diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
> index e3931b0f1269..db6b42db2fdc 100644
> --- a/drivers/hv/mshv_root.h
> +++ b/drivers/hv/mshv_root.h
> @@ -169,7 +169,7 @@ struct mshv_girq_routing_table {
>  };
> 
>  struct hv_synic_pages {
> -	struct hv_message_page *synic_message_page;
> +	struct hv_message_page *hyp_synic_message_page;
>  	struct hv_synic_event_flags_page *synic_event_flags_page;
>  	struct hv_synic_event_ring_page *synic_event_ring_page;
>  };
> diff --git a/drivers/hv/mshv_synic.c b/drivers/hv/mshv_synic.c
> index e6b6381b7c36..f8b0337cdc82 100644
> --- a/drivers/hv/mshv_synic.c
> +++ b/drivers/hv/mshv_synic.c
> @@ -394,7 +394,7 @@ mshv_intercept_isr(struct hv_message *msg)
>  void mshv_isr(void)
>  {
>  	struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
> -	struct hv_message_page **msg_page = &spages->synic_message_page;
> +	struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
>  	struct hv_message *msg;
>  	bool handled;
> 
> @@ -456,7 +456,7 @@ int mshv_synic_init(unsigned int cpu)
>  #endif
>  	union hv_synic_scontrol sctrl;
>  	struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
> -	struct hv_message_page **msg_page = &spages->synic_message_page;
> +	struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
>  	struct hv_synic_event_flags_page **event_flags_page =
>  			&spages->synic_event_flags_page;
>  	struct hv_synic_event_ring_page **event_ring_page =
> @@ -550,7 +550,7 @@ int mshv_synic_cleanup(unsigned int cpu)
>  	union hv_synic_sirbp sirbp;
>  	union hv_synic_scontrol sctrl;
>  	struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
> -	struct hv_message_page **msg_page = &spages->synic_message_page;
> +	struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
>  	struct hv_synic_event_flags_page **event_flags_page =
>  		&spages->synic_event_flags_page;
>  	struct hv_synic_event_ring_page **event_ring_page =
> diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
> index 698c86c4ef03..72940a64b0b6 100644
> --- a/drivers/hv/vmbus_drv.c
> +++ b/drivers/hv/vmbus_drv.c
> @@ -1060,7 +1060,7 @@ static void vmbus_onmessage_work(struct work_struct
> *work)
>  void vmbus_on_msg_dpc(unsigned long data)
>  {
>  	struct hv_per_cpu_context *hv_cpu = (void *)data;
> -	void *page_addr = hv_cpu->synic_message_page;
> +	void *page_addr = hv_cpu->hyp_synic_message_page;
>  	struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
>  				  VMBUS_MESSAGE_SINT;
>  	struct vmbus_channel_message_header *hdr;
> @@ -1244,7 +1244,7 @@ static void vmbus_chan_sched(struct hv_per_cpu_context
> *hv_cpu)
>  	 * The event page can be directly checked to get the id of
>  	 * the channel that has the interrupt pending.
>  	 */
> -	void *page_addr = hv_cpu->synic_event_page;
> +	void *page_addr = hv_cpu->hyp_synic_event_page;
>  	union hv_synic_event_flags *event
>  		= (union hv_synic_event_flags *)page_addr +
>  					 VMBUS_MESSAGE_SINT;
> @@ -1327,7 +1327,7 @@ static void vmbus_isr(void)
> 
>  	vmbus_chan_sched(hv_cpu);
> 
> -	page_addr = hv_cpu->synic_message_page;
> +	page_addr = hv_cpu->hyp_synic_message_page;
>  	msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
> 
>  	/* Check if there are actual msgs to be processed */
> --
> 2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ