lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9hhy4kmvxro.fsf@e105922-lin.cambridge.arm.com>
Date:	Mon, 18 May 2015 14:08:27 +0100
From:	Punit Agrawal <punit.agrawal@....com>
To:	"Suzuki K. Poulose" <suzuki.poulose@....com>
Cc:	linux-arm-kernel@...ts.infradead.org,
	Mark Rutland <mark.rutland@....com>,
	devicetree@...r.kernel.org,
	Lorenzo Pieralisi <lorenzo.pieralisi@....com>,
	Arnd Bergmann <arnd@...db.de>, Pawel Moll <pawel.moll@....com>,
	Will Deacon <will.deacon@....com>,
	linux-kernel@...r.kernel.org, arm@...nel.org,
	Olof Johansson <olof@...om.net>
Subject: Re: [PATCH 4/7] arm-cci: Abstract handling for CCI events

"Suzuki K. Poulose" <suzuki.poulose@....com> writes:

> From: "Suzuki K. Poulose" <suzuki.poulose@....com>
>
> Given that each CCI has different set of interfaces and
> its associated events, it is good to abstract the validation of the
> event codes to make it easier to add support for a new CCI model.
>
> This patch also abstracts the mapping of a given event to a counter,
> as there are some special counters for certain specific events.
>
> We assume that the fixed hardware counters are always at the beginning,
> so that we can use cci_model->fixed_hw_events as an upper bound to given
> idx to check if we need to program the counter for an event.
>
> Cc: Punit Agrawal <punit.agrawal@....com>
> Cc: Mark Rutland <mark.rutland@....com>
> Cc: Will Deacon <will.deacon@....com>
> Signed-off-by: Suzuki K. Poulose <suzuki.poulose@....com>

Acked-by: Punit Agrawal <punit.agrawal@....com>

> ---
>  drivers/bus/arm-cci.c |   78 ++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 55 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
> index 82d5681..4b9a8d3 100644
> --- a/drivers/bus/arm-cci.c
> +++ b/drivers/bus/arm-cci.c
> @@ -107,6 +107,7 @@ struct cci_pmu_hw_events {
>  	raw_spinlock_t pmu_lock;
>  };
>  
> +struct cci_pmu;
>  /*
>   * struct cci_pmu_model:
>   * @fixed_hw_cntrs - Number of fixed event counters
> @@ -119,6 +120,8 @@ struct cci_pmu_model {
>  	u32 num_hw_cntrs;
>  	u32 cntr_size;
>  	struct event_range event_ranges[CCI_IF_MAX];
> +	int (*validate_hw_event)(struct cci_pmu *, unsigned long);
> +	int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
>  };
>  
>  static struct cci_pmu_model cci_pmu_models[];
> @@ -189,7 +192,29 @@ enum cci400_perf_events {
>  #define CCI_REV_R1_MASTER_PORT_MIN_EV	0x00
>  #define CCI_REV_R1_MASTER_PORT_MAX_EV	0x11
>  
> -static int pmu_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
> +static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
> +				struct cci_pmu_hw_events *hw,
> +				unsigned long cci_event)
> +{
> +	int idx;
> +
> +	/* cycles event idx is fixed */
> +	if (cci_event == CCI_PMU_CYCLES) {
> +		if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
> +			return -EAGAIN;
> +
> +		return CCI_PMU_CYCLE_CNTR_IDX;
> +	}
> +
> +	for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
> +		if (!test_and_set_bit(idx, hw->used_mask))
> +			return idx;
> +
> +	/* No counters available */
> +	return -EAGAIN;
> +}
> +
> +static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
>  {
>  	u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
>  	u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
> @@ -198,6 +223,9 @@ static int pmu_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event
>  	if (hw_event & ~CCI_PMU_EVENT_MASK)
>  		return -ENOENT;
>  
> +	if (hw_event == CCI_PMU_CYCLES)
> +		return hw_event;
> +
>  	switch (ev_source) {
>  	case CCI_PORT_S0:
>  	case CCI_PORT_S1:
> @@ -289,18 +317,14 @@ static u32 pmu_get_max_counters(void)
>  static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
>  {
>  	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
> -	struct hw_perf_event *hw_event = &event->hw;
> -	unsigned long cci_event = hw_event->config_base;
> +	unsigned long cci_event = event->hw.config_base;
>  	int idx;
>  
> -	if (cci_event == CCI_PMU_CYCLES) {
> -		if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
> -			return -EAGAIN;
> +	if (cci_pmu->model->get_event_idx)
> +		return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
>  
> -		return CCI_PMU_CYCLE_CNTR_IDX;
> -	}
> -
> -	for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
> +	/* Generic code to find an unused idx from the mask */
> +	for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
>  		if (!test_and_set_bit(idx, hw->used_mask))
>  			return idx;
>  
> @@ -310,19 +334,13 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
>  
>  static int pmu_map_event(struct perf_event *event)
>  {
> -	int mapping;
> -	unsigned long config = event->attr.config;
> +	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
>  
> -	if (event->attr.type < PERF_TYPE_MAX)
> +	if (event->attr.type < PERF_TYPE_MAX ||
> +			!cci_pmu->model->validate_hw_event)
>  		return -ENOENT;
>  
> -	if (config == CCI_PMU_CYCLES)
> -		mapping = config;
> -	else
> -		mapping = pmu_validate_hw_event(to_cci_pmu(event->pmu),
> -							config);
> -
> -	return mapping;
> +	return	cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
>  }
>  
>  static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
> @@ -450,7 +468,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
>  	 * This should work regardless of whether we have per-counter overflow
>  	 * interrupt or a combined overflow interrupt.
>  	 */
> -	for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
> +	for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
>  		struct perf_event *event = events->events[idx];
>  		struct hw_perf_event *hw_counter;
>  
> @@ -538,6 +556,16 @@ static void cci_pmu_disable(struct pmu *pmu)
>  	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
>  }
>  
> +/*
> + * Check if the idx represents a non-programmable counter.
> + * All the fixed event counters are mapped before the programmable
> + * counters.
> + */
> +static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
> +{
> +	return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
> +}
> +
>  static void cci_pmu_start(struct perf_event *event, int pmu_flags)
>  {
>  	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
> @@ -562,8 +590,8 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags)
>  
>  	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
>  
> -	/* Configure the event to count, unless you are counting cycles */
> -	if (idx != CCI_PMU_CYCLE_CNTR_IDX)
> +	/* Configure the counter unless you are counting a fixed event */
> +	if (!pmu_fixed_hw_idx(cci_pmu, idx))
>  		pmu_set_event(cci_pmu, idx, hwc->config_base);
>  
>  	pmu_event_set_period(event);
> @@ -907,6 +935,8 @@ static struct cci_pmu_model cci_pmu_models[] = {
>  				CCI_REV_R0_MASTER_PORT_MAX_EV,
>  			},
>  		},
> +		.validate_hw_event = cci400_validate_hw_event,
> +		.get_event_idx = cci400_get_event_idx,
>  	},
>  	[CCI_REV_R1] = {
>  		.name = "CCI_400_r1",
> @@ -923,6 +953,8 @@ static struct cci_pmu_model cci_pmu_models[] = {
>  				CCI_REV_R1_MASTER_PORT_MAX_EV,
>  			},
>  		},
> +		.validate_hw_event = cci400_validate_hw_event,
> +		.get_event_idx = cci400_get_event_idx,
>  	},
>  };
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ