lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 19 Aug 2022 17:20:36 -0700
From:   Ira Weiny <ira.weiny@...el.com>
To:     Davidlohr Bueso <dave@...olabs.net>
CC:     <linux-arch@...r.kernel.org>, <dan.j.williams@...el.com>,
        <peterz@...radead.org>, <mark.rutland@....com>,
        <dave.jiang@...el.com>, <Jonathan.Cameron@...wei.com>,
        <a.manzanares@...sung.com>, <bwidawsk@...nel.org>,
        <alison.schofield@...el.com>, <linux-cxl@...r.kernel.org>,
        <nvdimm@...ts.linux.dev>, <x86@...nel.org>,
        <linux-arm-kernel@...ts.infradead.org>,
        <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2] arch/cacheflush: Introduce flush_all_caches()

On Fri, Aug 19, 2022 at 10:10:24AM -0700, Davidlohr Bueso wrote:
> With CXL security features, global CPU cache flushing nvdimm requirements
> are no longer specific to that subsystem, even beyond the scope of
> security_ops. CXL will need such semantics for features not necessarily
> limited to persistent memory.
> 
> The functionality this is enabling is to be able to instantaneously
> secure erase potentially terabytes of memory at once and the kernel
> needs to be sure that none of the data from before the secure is still
> present in the cache. It is also used when unlocking a memory device
> where speculative reads and firmware accesses could have cached poison
> from before the device was unlocked.
> 
> This capability is typically only used once per-boot (for unlock), or
> once per bare metal provisioning event (secure erase), like when handing
> off the system to another tenant or decomissioning a device. That small
> scope plus the fact that none of this is available to a VM limits the
> potential damage.
> 
> While the scope of this is for physical address space, add a new
> flush_all_caches() in cacheflush headers such that each architecture
> can define it, when capable. For x86 just use the wbinvd hammer and
> prevent any other arch from being capable.
> 
> Signed-off-by: Davidlohr Bueso <dave@...olabs.net>
> ---
> 
> Changes from v1 (https://lore.kernel.org/all/20220815160706.tqd42dv24tgb7x7y@offworld/):
> - Added comments and improved changelog to reflect this is
>   routine should be avoided and not considered a general API (Peter, Dan).
> 
>  arch/x86/include/asm/cacheflush.h |  4 +++
>  drivers/acpi/nfit/intel.c         | 41 ++++++++++++++-----------------
>  include/asm-generic/cacheflush.h  | 31 +++++++++++++++++++++++
>  3 files changed, 53 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
> index b192d917a6d0..ac4d4fd4e508 100644
> --- a/arch/x86/include/asm/cacheflush.h
> +++ b/arch/x86/include/asm/cacheflush.h
> @@ -10,4 +10,8 @@
>  
>  void clflush_cache_range(void *addr, unsigned int size);
>  
> +/* see comments in the stub version */
> +#define flush_all_caches() \
> +	do { wbinvd_on_all_cpus(); } while(0)
> +
>  #endif /* _ASM_X86_CACHEFLUSH_H */
> diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
> index 8dd792a55730..f2f6c31e6ab7 100644
> --- a/drivers/acpi/nfit/intel.c
> +++ b/drivers/acpi/nfit/intel.c
> @@ -4,6 +4,7 @@
>  #include <linux/ndctl.h>
>  #include <linux/acpi.h>
>  #include <asm/smp.h>
> +#include <linux/cacheflush.h>
>  #include "intel.h"
>  #include "nfit.h"
>  
> @@ -190,8 +191,6 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
>  	}
>  }
>  
> -static void nvdimm_invalidate_cache(void);
> -
>  static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
>  		const struct nvdimm_key_data *key_data)
>  {
> @@ -210,6 +209,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
>  	};
>  	int rc;
>  
> +	if (!flush_all_caches_capable())
> +		return -EINVAL;
> +
>  	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
>  		return -ENOTTY;
>  
> @@ -228,7 +230,7 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
>  	}
>  
>  	/* DIMM unlocked, invalidate all CPU caches before we read it */
> -	nvdimm_invalidate_cache();
> +	flush_all_caches();
>  
>  	return 0;
>  }
> @@ -294,11 +296,14 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
>  		},
>  	};
>  
> +	if (!flush_all_caches_capable())
> +		return -EINVAL;
> +
>  	if (!test_bit(cmd, &nfit_mem->dsm_mask))
>  		return -ENOTTY;
>  
>  	/* flush all cache before we erase DIMM */
> -	nvdimm_invalidate_cache();
> +	flush_all_caches();
>  	memcpy(nd_cmd.cmd.passphrase, key->data,
>  			sizeof(nd_cmd.cmd.passphrase));
>  	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
> @@ -318,7 +323,7 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
>  	}
>  
>  	/* DIMM erased, invalidate all CPU caches before we read it */
> -	nvdimm_invalidate_cache();
> +	flush_all_caches();
>  	return 0;
>  }
>  
> @@ -338,6 +343,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
>  		},
>  	};
>  
> +	if (!flush_all_caches_capable())
> +		return -EINVAL;
> +
>  	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
>  		return -ENOTTY;
>  
> @@ -355,7 +363,7 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
>  	}
>  
>  	/* flush all cache before we make the nvdimms available */
> -	nvdimm_invalidate_cache();
> +	flush_all_caches();
>  	return 0;
>  }
>  
> @@ -377,11 +385,14 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
>  		},
>  	};
>  
> +	if (!flush_all_caches_capable())
> +		return -EINVAL;
> +
>  	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
>  		return -ENOTTY;
>  
>  	/* flush all cache before we erase DIMM */
> -	nvdimm_invalidate_cache();
> +	flush_all_caches();
>  	memcpy(nd_cmd.cmd.passphrase, nkey->data,
>  			sizeof(nd_cmd.cmd.passphrase));
>  	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
> @@ -401,22 +412,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
>  	}
>  }
>  
> -/*
> - * TODO: define a cross arch wbinvd equivalent when/if
> - * NVDIMM_FAMILY_INTEL command support arrives on another arch.
> - */
> -#ifdef CONFIG_X86
> -static void nvdimm_invalidate_cache(void)
> -{
> -	wbinvd_on_all_cpus();
> -}
> -#else
> -static void nvdimm_invalidate_cache(void)
> -{
> -	WARN_ON_ONCE("cache invalidation required after unlock\n");
> -}
> -#endif
> -
>  static const struct nvdimm_security_ops __intel_security_ops = {
>  	.get_flags = intel_security_flags,
>  	.freeze = intel_security_freeze,
> diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
> index 4f07afacbc23..89f310f92498 100644
> --- a/include/asm-generic/cacheflush.h
> +++ b/include/asm-generic/cacheflush.h
> @@ -115,4 +115,35 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
>  	memcpy(dst, src, len)
>  #endif
>  
> +/*
> + * Flush the entire caches across all CPUs, however:
> + *
> + *       YOU DO NOT WANT TO USE THIS FUNCTION.
> + *
> + * It is considered a big hammer and can affect overall
> + * system performance and increase latency/response times.
> + * As such it is not for general usage, but for specific use
> + * cases involving instantaneously invalidating wide swaths
> + * of memory on bare metal.
> +
> + * Unlike the APIs above, this function can be defined on
> + * architectures which have VIPT or PIPT caches, and thus is
> + * beyond the scope of virtual to physical mappings/page
> + * tables changing.
> + *
> + * The limitation here is that the architectures that make
> + * use of it must can actually comply with the semantics,

	"must can"?

Did you mean "must"?

> + * such as those which caches are in a consistent state. The
> + * caller can verify the situation early on.
> + */
> +#ifndef flush_all_caches
> +# define flush_all_caches_capable() false
> +static inline void flush_all_caches(void)
> +{
> +	WARN_ON_ONCE("cache invalidation required\n");
> +}

With the addition of flush_all_caches_capable() will flush_all_caches() ever be
called?

Ira

> +#else
> +# define flush_all_caches_capable() true
> +#endif
> +
>  #endif /* _ASM_GENERIC_CACHEFLUSH_H */
> -- 
> 2.37.2
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ