lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 1 Mar 2011 08:43:12 +0100
From:	Stephane Eranian <eranian@...gle.com>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Lin Ming <ming.m.lin@...el.com>, Ingo Molnar <mingo@...e.hu>,
	Andi Kleen <andi@...stfloor.org>,
	lkml <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2 -tip] perf: x86, add SandyBridge support

On Mon, Feb 28, 2011 at 10:15 AM, Peter Zijlstra <a.p.zijlstra@...llo.nl> wrote:
> On Mon, 2011-02-28 at 15:22 +0800, Lin Ming wrote:
>> This patch adds basic SandyBridge support, including hardware cache
>> events and PEBS events support.
>>
>> LLC-* hareware cache events don't work for now, it depends on the
>> offcore patches.
>
> What's the status of those, Stephane reported some problems last I
> remember?
>
I tried the trick I mentioned and it seems to work.

Something like below with hwc->extra_alloc.
Could probably find a better name for that field.

static struct event_constraint *
intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
        struct hw_perf_event *hwc = &event->hw;
        unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
        struct event_constraint *c;
        struct intel_percore *pc;
        struct er_account *era;
        int i;
        int free_slot;
        int found;

        if (!x86_pmu.percore_constraints)
                return NULL;

        if (hwc->extra_alloc)
                return NULL;

        for (c = x86_pmu.percore_constraints; c->cmask; c++) {
                if (e != c->code)
                        continue;

                /*
                 * Allocate resource per core.
                 */
                c = NULL;
                pc = cpuc->per_core;
                if (!pc)
                        break;
                c = &emptyconstraint;
                raw_spin_lock(&pc->lock);
                free_slot = -1;
                found = 0;
                for (i = 0; i < MAX_EXTRA_REGS; i++) {
                        era = &pc->regs[i];
                        if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
                                /* Allow sharing same config */
                                if (hwc->extra_config == era->extra_config) {
                                        era->ref++;
                                        cpuc->percore_used = 1;
                                        hwc->extra_alloc = 1;
                                        c = NULL;
                                }
                                /* else conflict */
                                found = 1;
                                break;
                        } else if (era->ref == 0 && free_slot == -1)
                                free_slot = i;
                }
                if (!found && free_slot != -1) {
                        era = &pc->regs[free_slot];
                        era->ref = 1;
                        era->extra_reg = hwc->extra_reg;
                        era->extra_config = hwc->extra_config;
                        cpuc->percore_used = 1;
                        hwc->extra_alloc = 1;
                        c = NULL;
                }
                raw_spin_unlock(&pc->lock);
                return c;
        }

        return NULL;
}

static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
{
        struct extra_reg *er;
        struct intel_percore *pc;
        struct er_account *era;
        struct hw_perf_event *hwc = &event->hw;
        int i, allref;

        if (!cpuc->percore_used)
                return;

        for (er = x86_pmu.extra_regs; er->msr; er++) {
                if (er->event != (hwc->config & er->config_mask))
                        continue;

                pc = cpuc->per_core;
                raw_spin_lock(&pc->lock);
                for (i = 0; i < MAX_EXTRA_REGS; i++) {
                        era = &pc->regs[i];
                        if (era->ref > 0 &&
                            era->extra_config == hwc->extra_config &&
                            era->extra_reg == er->msr) {
                                era->ref--;
                                hwc->extra_alloc = 0;
                                break;
                        }
                }
                allref = 0;
                for (i = 0; i < MAX_EXTRA_REGS; i++)
                        allref += pc->regs[i].ref;
                if (allref == 0)
                        cpuc->percore_used = 0;
                raw_spin_unlock(&pc->lock);
                break;
        }
}

>
>>  #define INTEL_EVENT_CONSTRAINT(c, n) \
>>       EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
>> +#define INTEL_EVENT_CONSTRAINT2(c, n)        \
>> +     EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
>
> That's a particularly bad name, how about something like
>
> INTEL_UEVENT_CONSTRAINT or somesuch.
>
>> @@ -702,7 +738,13 @@ static void intel_ds_init(void)
>>                       printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
>>                       x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
>>                       x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
>> -                     x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
>> +                     switch (boot_cpu_data.x86_model) {
>> +                     case 42: /* SandyBridge */
>> +                             x86_pmu.pebs_constraints = intel_snb_pebs_events;
>> +                             break;
>> +                     default:
>> +                             x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
>> +                     }
>>                       break;
>>
>>               default:
>
> We already have this massive model switch right after this function,
> might as well move the pebs constraint assignment there.
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ