[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240531131142.1716-2-ilpo.jarvinen@linux.intel.com>
Date: Fri, 31 May 2024 16:11:27 +0300
From: Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
To: linux-kselftest@...r.kernel.org,
Reinette Chatre <reinette.chatre@...el.com>,
Shuah Khan <shuah@...nel.org>,
Babu Moger <babu.moger@....com>,
Maciej Wieczór-Retman <maciej.wieczor-retman@...el.com>
Cc: linux-kernel@...r.kernel.org,
Fenghua Yu <fenghua.yu@...el.com>,
Shuah Khan <skhan@...uxfoundation.org>,
Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
Subject: [PATCH v5 01/16] selftests/resctrl: Fix closing IMC fds on error and open-code R+W instead of loops
The imc perf fd close() calls are missing from all error paths. In
addition, get_mem_bw_imc() handles fds in a for loop but close() is
based on two fixed indexes READ and WRITE.
Open code inner for loops to READ+WRITE entries for clarity and add a
function to close() IMC fds properly in all cases.
Fixes: 7f4d257e3a2a ("selftests/resctrl: Add callback to start a benchmark")
Suggested-by: Reinette Chatre <reinette.chatre@...el.com>
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
---
v4:
- Merge with the fix close patch
- Add loop to reset imc_counters_config fds to -1 to be able know which
need closing
- Introduce perf_close_imc_mem_bw() to close fds
v3:
- Rework entirely, use open coding instead of for loops for clarity
---
tools/testing/selftests/resctrl/resctrl_val.c | 54 ++++++++++++-------
1 file changed, 36 insertions(+), 18 deletions(-)
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index 445f306d4c2f..f55f5989de72 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -293,6 +293,18 @@ static int initialize_mem_bw_imc(void)
return 0;
}
+static void perf_close_imc_mem_bw(void)
+{
+ int mc;
+
+ for (mc = 0; mc < imcs; mc++) {
+ if (imc_counters_config[mc][READ].fd != -1)
+ close(imc_counters_config[mc][READ].fd);
+ if (imc_counters_config[mc][WRITE].fd != -1)
+ close(imc_counters_config[mc][WRITE].fd);
+ }
+}
+
/*
* get_mem_bw_imc: Memory band width as reported by iMC counters
* @cpu_no: CPU number that the benchmark PID is binded to
@@ -306,26 +318,33 @@ static int initialize_mem_bw_imc(void)
static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
{
float reads, writes, of_mul_read, of_mul_write;
- int imc, j, ret;
+ int imc, ret;
+
+ for (imc = 0; imc < imcs; imc++) {
+ imc_counters_config[imc][READ].fd = -1;
+ imc_counters_config[imc][WRITE].fd = -1;
+ }
/* Start all iMC counters to log values (both read and write) */
reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
for (imc = 0; imc < imcs; imc++) {
- for (j = 0; j < 2; j++) {
- ret = open_perf_event(imc, cpu_no, j);
- if (ret)
- return -1;
- }
- for (j = 0; j < 2; j++)
- membw_ioctl_perf_event_ioc_reset_enable(imc, j);
+ ret = open_perf_event(imc, cpu_no, READ);
+ if (ret)
+ goto close_fds;
+ ret = open_perf_event(imc, cpu_no, WRITE);
+ if (ret)
+ goto close_fds;
+
+ membw_ioctl_perf_event_ioc_reset_enable(imc, READ);
+ membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE);
}
sleep(1);
/* Stop counters after a second to get results (both read and write) */
for (imc = 0; imc < imcs; imc++) {
- for (j = 0; j < 2; j++)
- membw_ioctl_perf_event_ioc_disable(imc, j);
+ membw_ioctl_perf_event_ioc_disable(imc, READ);
+ membw_ioctl_perf_event_ioc_disable(imc, WRITE);
}
/*
@@ -341,15 +360,13 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
if (read(r->fd, &r->return_value,
sizeof(struct membw_read_format)) == -1) {
ksft_perror("Couldn't get read b/w through iMC");
-
- return -1;
+ goto close_fds;
}
if (read(w->fd, &w->return_value,
sizeof(struct membw_read_format)) == -1) {
ksft_perror("Couldn't get write bw through iMC");
-
- return -1;
+ goto close_fds;
}
__u64 r_time_enabled = r->return_value.time_enabled;
@@ -369,10 +386,7 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
writes += w->return_value.value * of_mul_write * SCALE;
}
- for (imc = 0; imc < imcs; imc++) {
- close(imc_counters_config[imc][READ].fd);
- close(imc_counters_config[imc][WRITE].fd);
- }
+ perf_close_imc_mem_bw();
if (strcmp(bw_report, "reads") == 0) {
*bw_imc = reads;
@@ -386,6 +400,10 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
*bw_imc = reads + writes;
return 0;
+
+close_fds:
+ perf_close_imc_mem_bw();
+ return -1;
}
void set_mbm_path(const char *ctrlgrp, const char *mongrp, int domain_id)
--
2.39.2
Powered by blists - more mailing lists