lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230321153407.GB2272870@hirez.programming.kicks-ass.net>
Date:   Tue, 21 Mar 2023 16:34:07 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Anna-Maria Behnsen <anna-maria@...utronix.de>
Cc:     linux-kernel@...r.kernel.org, John Stultz <jstultz@...gle.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Eric Dumazet <edumazet@...gle.com>,
        "Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
        Arjan van de Ven <arjan@...radead.org>,
        "Paul E . McKenney" <paulmck@...nel.org>,
        Frederic Weisbecker <fweisbec@...il.com>,
        Rik van Riel <riel@...riel.com>
Subject: Re: [PATCH v5 16/18] timer: Implement the hierarchical pull model

On Wed, Mar 01, 2023 at 03:17:42PM +0100, Anna-Maria Behnsen wrote:
> +static bool tmigr_inactive_up(struct tmigr_group *group,
> +			      struct tmigr_group *child,
> +			      void *ptr)
> +{
> +	union tmigr_state curstate, newstate;
> +	struct tmigr_walk *data = ptr;
> +	bool walk_done;
> +	u32 childmask;
> +
> +	childmask = data->childmask;
> +	newstate = curstate = data->groupstate;
> +
> +retry:
> +	walk_done = true;
> +
> +	/* Reset active bit when child is no longer active */
> +	if (!data->childstate.active)
> +		newstate.active &= ~(u8)childmask;
> +
> +	if (newstate.migrator == (u8)childmask) {
> +		/*
> +		 * Find a new migrator for the group, because child group
> +		 * is idle!
> +		 */
> +		if (!data->childstate.active) {
> +			unsigned long new_migr_bit, active = newstate.active;
> +
> +			new_migr_bit = find_first_bit(&active, BIT_CNT);
> +
> +			/* Changes need to be propagated */
> +			walk_done = false;
> +
> +			if (new_migr_bit != BIT_CNT)
> +				newstate.migrator = BIT(new_migr_bit);
> +			else
> +				newstate.migrator = TMIGR_NONE;
> +		}
> +	}
> +
> +	newstate.seq++;
> +
> +	DBG_BUG_ON((newstate.migrator != TMIGR_NONE) && !(newstate.active));
> +
> +	if (atomic_cmpxchg(group->migr_state, curstate.state, newstate.state) != curstate.state) {
> +		/*
> +		 * Something changed in child/parent group in the meantime,
> +		 * reread the state of child and parent; Update of
> +		 * data->childstate is required for event handling;
> +		 */
> +		if (child)
> +			data->childstate.state = atomic_read(child->migr_state);
> +		newstate.state = curstate.state = atomic_read(group->migr_state);
> +
> +		goto retry;
> +	}

Idem:

	if (!atomic_try_cmpxchg(group->migr_state, &curstate.state, newstate.state)) {
		newstate.state = curstate.state;
		if (child)
			data->childstate.state = atomic_read(child->migr_state);
		goto retry;
	}

> +
> +	data->groupstate = newstate;
> +	data->remote = false;
> +
> +	/* Event Handling */
> +	tmigr_update_events(group, child, data);
> +
> +	if (group->parent && (walk_done == false)) {
> +		data->childmask = group->childmask;
> +		data->childstate = newstate;
> +		data->groupstate.state = atomic_read(group->parent->migr_state);
> +	}
> +
> +	/*
> +	 * data->nextexp was set by tmigr_update_events() and contains the
> +	 * expiry of first global event which needs to be handled
> +	 */
> +	if (data->nextexp != KTIME_MAX) {
> +		DBG_BUG_ON(group->parent);
> +		/*
> +		 * Toplevel path - If this cpu is about going offline wake
> +		 * up some random other cpu so it will take over the
> +		 * migrator duty and program its timer properly. Ideally
> +		 * wake the cpu with the closest expiry time, but that's
> +		 * overkill to figure out.
> +		 */
> +		if (!(this_cpu_ptr(&tmigr_cpu)->online)) {
> +			unsigned int cpu = smp_processor_id();
> +
> +			cpu = cpumask_any_but(cpu_online_mask, cpu);
> +			smp_send_reschedule(cpu);
> +		}
> +	}
> +
> +	return walk_done;
> +}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ