lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 6 Apr 2023 20:39:26 +0800
From:   Xiaofei Tan <tanxiaofei@...wei.com>
To:     Shuai Xue <xueshuai@...ux.alibaba.com>, <tony.luck@...el.com>,
        <naoya.horiguchi@....com>
CC:     <linux-acpi@...r.kernel.org>, <linux-mm@...ck.org>,
        <linux-kernel@...r.kernel.org>, <justin.he@....com>,
        <akpm@...ux-foundation.org>, <ardb@...nel.org>,
        <ashish.kalra@....com>, <baolin.wang@...ux.alibaba.com>,
        <bp@...en8.de>, <cuibixuan@...ux.alibaba.com>,
        <dave.hansen@...ux.intel.com>, <james.morse@....com>,
        <jarkko@...nel.org>, <lenb@...nel.org>, <linmiaohe@...wei.com>,
        <lvying6@...wei.com>, <rafael@...nel.org>, <xiexiuqi@...wei.com>,
        <zhuo.song@...ux.alibaba.com>
Subject: Re: [PATCH v3 2/2] ACPI: APEI: handle synchronous exceptions in task
 work

Hi Shuai,

Thanks for your this effort, and it's great.
Some comments below.

在 2023/3/17 15:24, Shuai Xue 写道:
> Hardware errors could be signaled by synchronous interrupt, e.g.  when an
> error is detected by a background scrubber, or signaled by synchronous
> exception, e.g. when an uncorrected error is consumed. Both synchronous and
> asynchronous error are queued and handled by a dedicated kthread in
> workqueue.
>
> commit 7f17b4a121d0 ("ACPI: APEI: Kick the memory_failure() queue for
> synchronous errors") keep track of whether memory_failure() work was
> queued, and make task_work pending to flush out the workqueue so that the
> work for synchronous error is processed before returning to user-space.
> The trick ensures that the corrupted page is unmapped and poisoned. And
> after returning to user-space, the task starts at current instruction which
> triggering a page fault in which kernel will send SIGBUS to current process
> due to VM_FAULT_HWPOISON.
>
> However, the memory failure recovery for hwpoison-aware mechanisms does not
> work as expected. For example, hwpoison-aware user-space processes like
> QEMU register their customized SIGBUS handler and enable early kill mode by
> seting PF_MCE_EARLY at initialization. Then the kernel will directy notify
> the process by sending a SIGBUS signal in memory failure with wrong
> si_code: the actual user-space process accessing the corrupt memory
> location, but its memory failure work is handled in a kthread context, so
> it will send SIGBUS with BUS_MCEERR_AO si_code to the actual user-space
> process instead of BUS_MCEERR_AR in kill_proc().
>
> To this end, separate synchronous and asynchronous error handling into
> different paths like X86 platform does:
>
> - task work for synchronous errors.
> - and workqueue for asynchronous errors.
>
> Then for synchronous errors, the current context in memory failure is
> exactly belongs to the task consuming poison data and it will send SIBBUS
> with proper si_code.
>
> Fixes: 7f17b4a121d0 ("ACPI: APEI: Kick the memory_failure() queue for synchronous errors")
> Signed-off-by: Shuai Xue <xueshuai@...ux.alibaba.com>
> ---
>   drivers/acpi/apei/ghes.c | 114 ++++++++++++++++++++++-----------------
>   include/acpi/ghes.h      |   3 --
>   mm/memory-failure.c      |  13 -----
>   3 files changed, 64 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
> index cccd96596efe..1901ee3498c4 100644
> --- a/drivers/acpi/apei/ghes.c
> +++ b/drivers/acpi/apei/ghes.c
> @@ -452,45 +452,79 @@ static void ghes_clear_estatus(struct ghes *ghes,
>   }
>   
>   /*
> - * Called as task_work before returning to user-space.
> - * Ensure any queued work has been done before we return to the context that
> - * triggered the notification.
> + * struct sync_task_work - for synchronous RAS event
> + *
> + * @twork:                callback_head for task work
> + * @pfn:                  page frame number of corrupted page
> + * @flags:                fine tune action taken
> + *
> + * Structure to pass task work to be handled before
> + * ret_to_user via task_work_add().
>    */
> -static void ghes_kick_task_work(struct callback_head *head)
> +struct sync_task_work {
> +	struct callback_head twork;
> +	u64 pfn;
> +	int flags;
> +};
> +
> +static void memory_failure_cb(struct callback_head *twork)
>   {
> -	struct acpi_hest_generic_status *estatus;
> -	struct ghes_estatus_node *estatus_node;
> -	u32 node_len;
> +	int ret;
> +	struct sync_task_work *twcb =
> +		container_of(twork, struct sync_task_work, twork);
>   
> -	estatus_node = container_of(head, struct ghes_estatus_node, task_work);
> -	if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
> -		memory_failure_queue_kick(estatus_node->task_work_cpu);
> +	ret = memory_failure(twcb->pfn, twcb->flags);
> +	kfree(twcb);
>   
> -	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
> -	node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
> -	gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
> +	if (!ret)
> +		return;
> +
> +	/*
> +	 * -EHWPOISON from memory_failure() means that it already sent SIGBUS
> +	 * to the current process with the proper error info,
> +	 * -EOPNOTSUPP means hwpoison_filter() filtered the error event,
> +	 *
> +	 * In both cases, no further processing is required.
> +	 */
> +	if (ret == -EHWPOISON || ret == -EOPNOTSUPP)
> +		return;
> +
> +	pr_err("Memory error not recovered");
> +	force_sig(SIGBUS);
>   }
>   
> -static bool ghes_do_memory_failure(u64 physical_addr, int flags)
> +static void ghes_do_memory_failure(u64 physical_addr, int flags)
>   {
>   	unsigned long pfn;
> +	struct sync_task_work *twcb;
>   
>   	if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
> -		return false;
> +		return;
>   
>   	pfn = PHYS_PFN(physical_addr);
>   	if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
>   		pr_warn_ratelimited(FW_WARN GHES_PFX
>   		"Invalid address in generic error data: %#llx\n",
>   		physical_addr);
> -		return false;
> +		return;

For synchronous errors, we need send SIGBUS to the current task if not recovered,
as the behavior of this patch  in the function memory_failure_cb().
Such abnormal branches should also be taken as not recovered.


> +	}
> +
> +	if (flags == MF_ACTION_REQUIRED && current->mm) {
> +		twcb = kmalloc(sizeof(*twcb), GFP_ATOMIC);
> +		if (!twcb)
> +			return;

It's the same here.


> +
> +		twcb->pfn = pfn;
> +		twcb->flags = flags;
> +		init_task_work(&twcb->twork, memory_failure_cb);
> +		task_work_add(current, &twcb->twork, TWA_RESUME);
> +		return;
>   	}
>   
>   	memory_failure_queue(pfn, flags);
> -	return true;
>   }
>   
> -static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
> +static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
>   				       int sev, bool sync)
>   {
>   	int flags = -1;
> @@ -498,7 +532,7 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
>   	struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
>   
>   	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
> -		return false;
> +		return;

and here.


>   
>   	/* iff following two events can be handled properly by now */
>   	if (sec_sev == GHES_SEV_CORRECTED &&
> @@ -508,16 +542,15 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
>   		flags = sync ? MF_ACTION_REQUIRED : 0;
>   
>   	if (flags != -1)
> -		return ghes_do_memory_failure(mem_err->physical_addr, flags);
> +		ghes_do_memory_failure(mem_err->physical_addr, flags);
>   
> -	return false;
> +	return;
>   }
>   
> -static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
> +static void ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
>   				       int sev, bool sync)
>   {
>   	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
> -	bool queued = false;
>   	int sec_sev, i;
>   	char *p;
>   	int flags = sync ? MF_ACTION_REQUIRED : 0;
> @@ -526,7 +559,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
>   
>   	sec_sev = ghes_severity(gdata->error_severity);
>   	if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
> -		return false;
> +		return;

and here.


>   
>   	p = (char *)(err + 1);
>   	for (i = 0; i < err->err_info_num; i++) {
> @@ -542,7 +575,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
>   		 * and don't filter out 'corrected' error here.
>   		 */
>   		if (is_cache && has_pa) {
> -			queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
> +			ghes_do_memory_failure(err_info->physical_fault_addr, flags);
>   			p += err_info->length;
>   			continue;
>   		}
> @@ -555,8 +588,6 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
>   				    error_type);
>   		p += err_info->length;
>   	}

and here, for the case that memory failure is not done, as PA is invalid.


> -
> -	return queued;
>   }
>   
>   /*
> @@ -654,7 +685,7 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
>   	schedule_work(&entry->work);
>   }
>   
> -static bool ghes_do_proc(struct ghes *ghes,
> +static void ghes_do_proc(struct ghes *ghes,
>   			 const struct acpi_hest_generic_status *estatus)
>   {
>   	int sev, sec_sev;
> @@ -662,7 +693,6 @@ static bool ghes_do_proc(struct ghes *ghes,
>   	guid_t *sec_type;
>   	const guid_t *fru_id = &guid_null;
>   	char *fru_text = "";
> -	bool queued = false;
>   	bool sync = is_hest_sync_notify(ghes);
>   
>   	sev = ghes_severity(estatus->error_severity);
> @@ -681,13 +711,13 @@ static bool ghes_do_proc(struct ghes *ghes,
>   			atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
>   
>   			arch_apei_report_mem_error(sev, mem_err);
> -			queued = ghes_handle_memory_failure(gdata, sev, sync);
> +			ghes_handle_memory_failure(gdata, sev, sync);
>   		}
>   		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
>   			ghes_handle_aer(gdata);
>   		}
>   		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
> -			queued = ghes_handle_arm_hw_error(gdata, sev, sync);
> +			ghes_handle_arm_hw_error(gdata, sev, sync);
>   		} else {
>   			void *err = acpi_hest_get_payload(gdata);
>   
> @@ -697,8 +727,6 @@ static bool ghes_do_proc(struct ghes *ghes,
>   					       gdata->error_data_length);
>   		}
>   	}
> -
> -	return queued;
>   }
>   
>   static void __ghes_print_estatus(const char *pfx,
> @@ -1000,9 +1028,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
>   	struct ghes_estatus_node *estatus_node;
>   	struct acpi_hest_generic *generic;
>   	struct acpi_hest_generic_status *estatus;
> -	bool task_work_pending;
>   	u32 len, node_len;
> -	int ret;
>   
>   	llnode = llist_del_all(&ghes_estatus_llist);
>   	/*
> @@ -1017,25 +1043,14 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
>   		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
>   		len = cper_estatus_len(estatus);
>   		node_len = GHES_ESTATUS_NODE_LEN(len);
> -		task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
> +		ghes_do_proc(estatus_node->ghes, estatus);
>   		if (!ghes_estatus_cached(estatus)) {
>   			generic = estatus_node->generic;
>   			if (ghes_print_estatus(NULL, generic, estatus))
>   				ghes_estatus_cache_add(generic, estatus);
>   		}
> -
> -		if (task_work_pending && current->mm) {
> -			estatus_node->task_work.func = ghes_kick_task_work;
> -			estatus_node->task_work_cpu = smp_processor_id();
> -			ret = task_work_add(current, &estatus_node->task_work,
> -					    TWA_RESUME);
> -			if (ret)
> -				estatus_node->task_work.func = NULL;
> -		}
> -
> -		if (!estatus_node->task_work.func)
> -			gen_pool_free(ghes_estatus_pool,
> -				      (unsigned long)estatus_node, node_len);
> +		gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
> +			      node_len);
>   
>   		llnode = next;
>   	}
> @@ -1096,7 +1111,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
>   
>   	estatus_node->ghes = ghes;
>   	estatus_node->generic = ghes->generic;
> -	estatus_node->task_work.func = NULL;
>   	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
>   
>   	if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
> diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
> index 3c8bba9f1114..e5e0c308d27f 100644
> --- a/include/acpi/ghes.h
> +++ b/include/acpi/ghes.h
> @@ -35,9 +35,6 @@ struct ghes_estatus_node {
>   	struct llist_node llnode;
>   	struct acpi_hest_generic *generic;
>   	struct ghes *ghes;
> -
> -	int task_work_cpu;
> -	struct callback_head task_work;
>   };
>   
>   struct ghes_estatus_cache {
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index fae9baf3be16..6ea8c325acb3 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -2355,19 +2355,6 @@ static void memory_failure_work_func(struct work_struct *work)
>   	}
>   }
>   
> -/*
> - * Process memory_failure work queued on the specified CPU.
> - * Used to avoid return-to-userspace racing with the memory_failure workqueue.
> - */
> -void memory_failure_queue_kick(int cpu)
> -{
> -	struct memory_failure_cpu *mf_cpu;
> -
> -	mf_cpu = &per_cpu(memory_failure_cpu, cpu);
> -	cancel_work_sync(&mf_cpu->work);
> -	memory_failure_work_func(&mf_cpu->work);
> -}
> -
>   static int __init memory_failure_init(void)
>   {
>   	struct memory_failure_cpu *mf_cpu;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ