lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250827200002.GD2206304@nvidia.com>
Date: Wed, 27 Aug 2025 17:00:02 -0300
From: Jason Gunthorpe <jgg@...dia.com>
To: Nicolin Chen <nicolinc@...dia.com>
Cc: will@...nel.org, robin.murphy@....com, joro@...tes.org,
	jean-philippe@...aro.org, miko.lenczewski@....com,
	balbirs@...dia.com, peterz@...radead.org, smostafa@...gle.com,
	kevin.tian@...el.com, praan@...gle.com, zhangzekun11@...wei.com,
	linux-arm-kernel@...ts.infradead.org, iommu@...ts.linux.dev,
	linux-kernel@...r.kernel.org, patches@...ts.linux.dev
Subject: Re: [PATCH rfcv1 4/8] iommu/arm-smmu-v3: Introduce a per-domain
 arm_smmu_invs array

On Wed, Aug 13, 2025 at 06:25:35PM -0700, Nicolin Chen wrote:
> +struct arm_smmu_invs *arm_smmu_invs_add(struct arm_smmu_invs *old_invs,
> +					struct arm_smmu_invs *add_invs)
> +{

It turns out it is fairly easy and cheap to sort add_invs by sorting
the ids during probe:

@@ -3983,6 +3989,14 @@ static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
        return 0;
 }
 
+static int arm_smmu_ids_cmp(const void *_l, const void *_r)
+{
+       const typeof_member(struct iommu_fwspec, ids[0]) *l = _l;
+       const typeof_member(struct iommu_fwspec, ids[0]) *r = _r;
+
+       return cmp_int(*l, *r);
+}
+
 static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
                                  struct arm_smmu_master *master)
 {
@@ -4011,6 +4025,13 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
                return PTR_ERR(master->invs);
        }
 
+       /*
+        * Put the ids into order so that arm_smmu_build_invs() can trivially
+        * generate sorted lists.
+        */
+       sort_nonatomic(fwspec->ids, fwspec->num_ids, sizeof(fwspec->ids[0]),
+                      arm_smmu_ids_cmp, NULL);
+
        mutex_lock(&smmu->streams_mutex);
        for (i = 0; i < fwspec->num_ids; i++) {
                struct arm_smmu_stream *new_stream = &master->streams[i];

Then arm_smmu_build_invs() trivially makes sorted lists.

So if old_invs and add_invs are both sorted list we can use variations
on a merge algorithm for sorted lists which is both simpler to
understand and runs faster:

/*
 * Compare used for merging two sorted lists. Merge compare of two sorted list
 * items. If one side is past the end of the list then return the other side to
 * let it run out the iteration.
 */
static inline int arm_smmu_invs_merge_cmp(const struct arm_smmu_invs *lhs,
					  size_t lhs_idx,
					  const struct arm_smmu_invs *rhs,
					  size_t rhs_idx)
{
	if (lhs_idx != lhs->num_invs && rhs_idx != rhs->num_invs)
		return arm_smmu_invs_cmp(&lhs->inv[lhs_idx],
					 &rhs->inv[rhs_idx]);
	if (lhs_idx != lhs->num_invs)
		return -1;
	return 1;
}

struct arm_smmu_invs *arm_smmu_invs_add(struct arm_smmu_invs *invs,
					struct arm_smmu_invs *add_invs)
{
	struct arm_smmu_invs *new_invs;
	struct arm_smmu_inv *new;
	size_t to_add = 0;
	size_t to_del = 0;
	size_t i, j;

	for (i = 0, j = 0; i != invs->num_invs || j != add_invs->num_invs;) {
		int cmp = arm_smmu_invs_merge_cmp(invs, i, add_invs, j);

		if (cmp < 0) {
			/* not found in add_invs, leave alone */
			if (refcount_read(&invs->inv[i].users))
				i++;
			else
				to_del++;
		} else if (cmp == 0) {
			/* same item */
			i++;
			j++;
		} else {
			/* unique to add_invs */
			to_add++;
			j++;
		}
	}

	new_invs = arm_smmu_invs_alloc(invs->num_invs + to_add - to_del);
	if (IS_ERR(new_invs))
		return new_invs;

	new = new_invs->inv;
	for (i = 0, j = 0; i != invs->num_invs || j != add_invs->num_invs;) {
		int cmp = arm_smmu_invs_merge_cmp(invs, i, add_invs, j);

		if (cmp <= 0 && !refcount_read(&invs->inv[i].users)) {
			i++;
			continue;
		}

		if (cmp < 0) {
			*new = invs->inv[i];
			i++;
		} else if (cmp == 0) {
			*new = invs->inv[i];
			refcount_inc(&new->users);
			i++;
			j++;
		} else {
			*new = add_invs->inv[j];
			refcount_set(&new->users, 1);
			j++;
		}
		if (arm_smmu_inv_is_ats(new))
			new_invs->has_ats = true;
		new++;
	}

	WARN_ON(new != new_invs->inv + new_invs->num_invs);

	/*
	 * A sorted array allows batching invalidations together for fewer SYNCs.
	 * Also, ATS must follow the ASID/VMID invalidation SYNC.
	 */
	sort_nonatomic(new_invs->inv, new_invs->num_invs,
		       sizeof(add_invs->inv[0]), arm_smmu_invs_cmp, NULL);
	return new_invs;
}

size_t arm_smmu_invs_dec(struct arm_smmu_invs *invs,
			 struct arm_smmu_invs *dec_invs)
{
	size_t to_del = 0;
	size_t i, j;

	for (i = 0, j = 0; i != invs->num_invs || j != dec_invs->num_invs;) {
		int cmp = arm_smmu_invs_merge_cmp(invs, i, dec_invs, j);

		if (cmp < 0) {
			/* not found in dec_invs, leave alone */
			i++;
		} else if (cmp == 0) {
			/* same item */
			if (refcount_dec_and_test(&invs->inv[i].users)) {
				dec_invs->inv[j].todel = true;
				to_del++;
			}
			i++;
			j++;
		} else {
			/* item in dec_invs is not in invs? */
			WARN_ON(true);
			j++;
		}
	}
	return to_del;
}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ