lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250827182123.GB2206304@nvidia.com>
Date: Wed, 27 Aug 2025 15:21:23 -0300
From: Jason Gunthorpe <jgg@...dia.com>
To: Nicolin Chen <nicolinc@...dia.com>
Cc: will@...nel.org, robin.murphy@....com, joro@...tes.org,
	jean-philippe@...aro.org, miko.lenczewski@....com,
	balbirs@...dia.com, peterz@...radead.org, smostafa@...gle.com,
	kevin.tian@...el.com, praan@...gle.com, zhangzekun11@...wei.com,
	linux-arm-kernel@...ts.infradead.org, iommu@...ts.linux.dev,
	linux-kernel@...r.kernel.org, patches@...ts.linux.dev
Subject: Re: [PATCH rfcv1 6/8] iommu/arm-smmu-v3: Populate smmu_domain->invs
 when attaching masters

On Wed, Aug 13, 2025 at 06:25:37PM -0700, Nicolin Chen wrote:
> +typedef struct arm_smmu_invs *(*invs_fn)(struct arm_smmu_invs *old_invs,
> +					 struct arm_smmu_invs *invs);

no reason to pass in fn, this always just calls it as the last thing
so the caller can do it..

> +static struct arm_smmu_invs *arm_smmu_build_invs(
> +	struct arm_smmu_invs *old_invs, struct arm_smmu_domain *smmu_domain,
> +	struct arm_smmu_master *master, bool ats, ioasid_t ssid, invs_fn fn)
> +{
> +	const bool e2h = master->smmu->features & ARM_SMMU_FEAT_E2H;
> +	const bool nesting = smmu_domain->nest_parent;
> +	struct arm_smmu_inv *cur = master->invs->inv;
> +	size_t num_invs = 1;
> +	size_t i;
> +
> +	switch (smmu_domain->stage) {
> +	case ARM_SMMU_DOMAIN_SVA:
> +	case ARM_SMMU_DOMAIN_S1:
> +		cur->smmu = master->smmu;
> +		cur->type = INV_TYPE_S1_ASID;
> +		cur->id = smmu_domain->cd.asid;
> +		cur->size_opcode = e2h ? CMDQ_OP_TLBI_EL2_VA :
> +					 CMDQ_OP_TLBI_NH_VA;
> +		cur->nsize_opcode = e2h ? CMDQ_OP_TLBI_EL2_ASID :
> +					  CMDQ_OP_TLBI_NH_ASID;
> +		break;
> +	case ARM_SMMU_DOMAIN_S2:
> +		cur->smmu = master->smmu;
> +		cur->type = INV_TYPE_S2_VMID;
> +		cur->id = smmu_domain->s2_cfg.vmid;
> +		cur->size_opcode = CMDQ_OP_TLBI_S2_IPA;
> +		cur->nsize_opcode = CMDQ_OP_TLBI_S12_VMALL;
> +		break;
> +	default:
> +		WARN_ON(true);
> +		return old_invs;

Return ERR_PTR, it makes the error flows possibly wrong or at least over
complex to return something that shouldn't be freed.

> +	}
> +
> +	/* Range-based invalidation requires the leaf pgsize for calculation */
> +	if (master->smmu->features & ARM_SMMU_FEAT_RANGE_INV)
> +		cur->pgsize = __ffs(smmu_domain->domain.pgsize_bitmap);
> +
> +	/* All the nested S1 ASIDs have to be flushed when S2 parent changes */
> +	if (nesting) {
> +		cur = &master->invs->inv[num_invs++];

Don't do both 'cur as an iterator' and 'num_invs as the
location'. Delete num_invs entirely and just use cur.

> +		cur->smmu = master->smmu;
> +		cur->type = INV_TYPE_S2_VMID_S1_CLEAR;
> +		cur->id = smmu_domain->s2_cfg.vmid;
> +		cur->size_opcode = CMDQ_OP_TLBI_NH_ALL;
> +		cur->nsize_opcode = CMDQ_OP_TLBI_NH_ALL;
> +	}
> +
> +	if (ats) {
> +		for (i = 0, cur++; i < master->num_streams; i++) {
> +			cur->smmu = master->smmu;
> +			/*
> +			 * If an S2 used as a nesting parent is changed we have
> +			 * no option but to completely flush the ATC.
> +			 */
> +			cur->type = nesting ? INV_TYPE_ATS_FULL : INV_TYPE_ATS;
> +			cur->id = master->streams[i].id;
> +			cur->ssid = ssid;
> +			cur->size_opcode = CMDQ_OP_ATC_INV;
> +			cur->nsize_opcode = CMDQ_OP_ATC_INV;
> +		}
> +		num_invs += master->num_streams;
> +	}
> +
> +	master->invs->num_invs = num_invs;

Like this:

	master->invs->num_invs = cur - master->invs->inv;

> +static int arm_smmu_attach_prepare_invs(struct arm_smmu_attach_state *state,
> +					struct arm_smmu_domain *new_smmu_domain)
> +{

How about a comment:

/*
 * During attachment the invalidation lists on the two domains are sequenced:
 *  1. old domain is invalidating master
 *  2. new and old domain are invalidating master
 *  3. new domain is invalidating master
 *
 * This uses two updated invalidation lists, one with master added to new domain
 * and one with master removed from old domain. Prepare these lists in advance
 * of changing anything. arm_smmu_asid_lock ensures that the invalidation list
 * in the domains doesn't change while we are sequencing to update it.
 */

> +	struct arm_smmu_domain *old_smmu_domain =
> +		to_smmu_domain_devices(state->old_domain);
> +	struct arm_smmu_master *master = state->master;
> +	bool blocking = false;
> +
> +	/* A re-attach case doesn't need to update invs array */
> +	if (new_smmu_domain == old_smmu_domain)
> +		return 0;
> +
> +	if (new_smmu_domain) {

This if wants a comment, it is tricky:

	/*
	 * At this point a NULL domain indicates the domain doesn't use the
	 * IOTLB, see to_smmu_domain_devices().
	 */

> +		state->new_domain_oinvs = rcu_dereference_protected(
> +			new_smmu_domain->invs,
> +			lockdep_is_held(&arm_smmu_asid_lock));
> +		state->new_domain_ninvs = arm_smmu_build_invs(
> +			state->new_domain_oinvs, new_smmu_domain, master,
> +			state->ats_enabled, state->ssid, arm_smmu_invs_add);
> +		if (IS_ERR(state->new_domain_ninvs))
> +			return PTR_ERR(state->new_domain_ninvs);
> +		state->new_domain_invs = &new_smmu_domain->invs;
> +		blocking = new_smmu_domain->domain.type == IOMMU_DOMAIN_BLOCKED;
> +	}
> +
> +	if (old_smmu_domain) {
> +		state->old_domain_oinvs = rcu_dereference_protected(
> +			old_smmu_domain->invs,
> +			lockdep_is_held(&arm_smmu_asid_lock));
> +		state->old_domain_ninvs = arm_smmu_build_invs(
> +			state->old_domain_oinvs, old_smmu_domain, master,
> +			master->ats_enabled, state->ssid, arm_smmu_invs_del);
> +		if (IS_ERR(state->old_domain_ninvs)) {

Then here, as per the last email, just get rid of invs_del and use
the scratch list master->invs for the next step. So all this goes away:

Jason

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ