lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 22 Aug 2017 18:56:38 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     kan.liang@...el.com
Cc:     mingo@...hat.com, linux-kernel@...r.kernel.org, acme@...nel.org,
        jolsa@...hat.com, tglx@...utronix.de, eranian@...gle.com,
        ak@...ux.intel.com
Subject: Re: [PATCH V5] perf: Add PERF_SAMPLE_PHYS_ADDR

On Thu, Aug 17, 2017 at 02:17:23PM -0400, kan.liang@...el.com wrote:
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index a3b873f..6783c69 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -944,6 +944,8 @@ struct perf_sample_data {
>  
>  	struct perf_regs		regs_intr;
>  	u64				stack_user_size;
> +
> +	u64				phys_addr;
>  } ____cacheline_aligned;
>  
>  /* default value for data source */
> @@ -964,6 +966,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
>  	data->weight = 0;
>  	data->data_src.val = PERF_MEM_NA;
>  	data->txn = 0;
> +	data->phys_addr = 0;
>  }

So this is very unfortunate...

struct perf_sample_data {
        u64                        addr;                 /*     0     8 */
        struct perf_raw_record *   raw;                  /*     8     8 */
        struct perf_branch_stack * br_stack;             /*    16     8 */
        u64                        period;               /*    24     8 */
        u64                        weight;               /*    32     8 */
        u64                        txn;                  /*    40     8 */
        union perf_mem_data_src    data_src;             /*    48     8 */
        u64                        type;                 /*    56     8 */
        /* --- cacheline 1 boundary (64 bytes) --- */
        u64                        ip;                   /*    64     8 */
        struct {
                u32                pid;                  /*    72     4 */
                u32                tid;                  /*    76     4 */
        } tid_entry;                                     /*    72     8 */
        u64                        time;                 /*    80     8 */
        u64                        id;                   /*    88     8 */
        u64                        stream_id;            /*    96     8 */
        struct {
                u32                cpu;                  /*   104     4 */
                u32                reserved;             /*   108     4 */
        } cpu_entry;                                     /*   104     8 */
        struct perf_callchain_entry * callchain;         /*   112     8 */
        struct perf_regs           regs_user;            /*   120    16 */
        /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */
        struct pt_regs             regs_user_copy;       /*   136   168 */
        /* --- cacheline 4 boundary (256 bytes) was 48 bytes ago --- */
        struct perf_regs           regs_intr;            /*   304    16 */
        /* --- cacheline 5 boundary (320 bytes) --- */
        u64                        stack_user_size;      /*   320     8 */

        /* size: 384, cachelines: 6, members: 19 */
        /* padding: 56 */
};


static inline void perf_sample_data_init(struct perf_sample_data *data,
					 u64 addr, u64 period)
{
	/* remaining struct members initialized in perf_prepare_sample() */
	data->addr = addr;
	data->raw  = NULL;
	data->br_stack = NULL;
	data->period = period;
	data->weight = 0;
	data->data_src.val = PERF_MEM_NA;
	data->txn = 0;
}

You'll note that that only touches the first cacheline of the data
structure, and you just wrecked that. Back when I did that this made a
measurable difference.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ