lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 18 Dec 2018 12:14:31 +0100
From:   Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To:     zhe.he@...driver.com
Cc:     acme@...nel.org, ak@...ux.intel.com,
        alexander.shishkin@...ux.intel.com, bp@...en8.de, hpa@...or.com,
        jolsa@...nel.org, jolsa@...hat.com, kan.liang@...ux.intel.com,
        mingo@...hat.com, namhyung@...nel.org, peterz@...radead.org,
        tglx@...utronix.de, x86@...nel.org, linux-kernel@...r.kernel.org,
        linux-rt-users@...r.kernel.org
Subject: Re: [PATCH] perf/x86/intel: Avoid unnecessary reallocations of
 memory allocated in cpu hotplug prepare state

On 2018-12-18 18:30:33 [+0800], zhe.he@...driver.com wrote:
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -3398,13 +3398,16 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
>  	return x86_event_sysfs_show(page, config, event);
>  }
>  
> -struct intel_shared_regs *allocate_shared_regs(int cpu)
> +void allocate_shared_regs(struct intel_shared_regs **pregs, int cpu)
>  {
> -	struct intel_shared_regs *regs;
> +	struct intel_shared_regs *regs = *pregs;
>  	int i;
>  
> -	regs = kzalloc_node(sizeof(struct intel_shared_regs),
> -			    GFP_KERNEL, cpu_to_node(cpu));
> +	if (regs)
> +		memset(regs, 0, sizeof(struct intel_shared_regs));
> +	else
> +		regs = *pregs = kzalloc_node(sizeof(struct intel_shared_regs),
> +					     GFP_KERNEL, cpu_to_node(cpu));
>  	if (regs) {
>  		/*
>  		 * initialize the locks to keep lockdep happy

void allocate_shared_regs(int cpu)
{
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
        struct intel_shared_regs *regs = puc->shared_regs;
        int i;

	if (!regs)
		regs = kmalloc_node(sizeof(struct intel_shared_regs),
                            GFP_KERNEL, cpu_to_node(cpu));
        if (!regs)
		return;
	memset(regs, 0, sizeof(struct intel_shared_regs));
        for (i = 0; i < EXTRA_REG_MAX; i++)
        	raw_spin_lock_init(&regs->regs[i].lock);

        return regs;
}


> @@ -3414,20 +3417,21 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
>  
>  		regs->core_id = -1;
>  	}
> -	return regs;
>  }
>  
> -static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
> +static void allocate_excl_cntrs(struct intel_excl_cntrs **pc, int cpu)
>  {
> -	struct intel_excl_cntrs *c;
> +	struct intel_excl_cntrs *c = *pc;
>  
> -	c = kzalloc_node(sizeof(struct intel_excl_cntrs),
> -			 GFP_KERNEL, cpu_to_node(cpu));
> +	if (c)
> +		memset(c, 0, sizeof(struct intel_excl_cntrs));
> +	else
> +		c = *pc = kzalloc_node(sizeof(struct intel_excl_cntrs),
> +				       GFP_KERNEL, cpu_to_node(cpu));
>  	if (c) {
>  		raw_spin_lock_init(&c->lock);
>  		c->core_id = -1;
>  	}
> -	return c;
>  }

static void allocate_excl_cntrs(int cpu)
{
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
	struct intel_excl_cntrs *c = cpuc->excl_cntrs;
	
	if (!c)
		c = kmalloc_node(sizeof(struct intel_excl_cntrs),
			 GFP_KERNEL, cpu_to_node(cpu));
	if (!c)
		return;
	memset(c, 0, sizeof(struct intel_excl_cntrs));
	raw_spin_lock_init(&c->lock);
	c->core_id = -1;
	cpuc->excl_cntrs = c;
}


>  static void intel_pmu_cpu_dying(int cpu)
>  {
> -	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> -	struct intel_shared_regs *pc;
> -
> -	pc = cpuc->shared_regs;
> -	if (pc) {
> -		if (pc->core_id == -1 || --pc->refcnt == 0)

I think ->refcnt member can go, too. It is only incremented now for no
reason now.

> -			kfree(pc);
> -		cpuc->shared_regs = NULL;
> -	}
> -
> -	free_excl_cntrs(cpu);
> -
>  	fini_debug_store_on_cpu(cpu);
>  
>  	if (x86_pmu.counter_freezing)

Sebastian

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ