[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190624054311.30256-7-hch@lst.de>
Date: Mon, 24 Jun 2019 07:43:00 +0200
From: Christoph Hellwig <hch@....de>
To: Palmer Dabbelt <palmer@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>
Cc: Damien Le Moal <damien.lemoal@....com>,
linux-riscv@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 06/17] riscv: refactor the IPI code
This prepare for adding native non-SBI IPI code.
Signed-off-by: Christoph Hellwig <hch@....de>
---
arch/riscv/kernel/smp.c | 55 +++++++++++++++++++++++------------------
1 file changed, 31 insertions(+), 24 deletions(-)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 5a9834503a2f..8cd730239613 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -78,13 +78,38 @@ static void ipi_stop(void)
wait_for_interrupt();
}
+static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+ int cpuid, hartid;
+ struct cpumask hartid_mask;
+
+ cpumask_clear(&hartid_mask);
+ mb();
+ for_each_cpu(cpuid, mask) {
+ set_bit(op, &ipi_data[cpuid].bits);
+ hartid = cpuid_to_hartid_map(cpuid);
+ cpumask_set_cpu(hartid, &hartid_mask);
+ }
+ mb();
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static void send_ipi_single(int cpu, enum ipi_message_type op)
+{
+ send_ipi_mask(cpumask_of(cpu), op);
+}
+
+static inline void clear_ipi(void)
+{
+ csr_clear(CSR_SIP, SIE_SSIE);
+}
+
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
- /* Clear pending IPI */
- csr_clear(CSR_SIP, SIE_SSIE);
+ clear_ipi();
while (true) {
unsigned long ops;
@@ -118,23 +143,6 @@ void riscv_software_interrupt(void)
}
}
-static void
-send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
-{
- int cpuid, hartid;
- struct cpumask hartid_mask;
-
- cpumask_clear(&hartid_mask);
- mb();
- for_each_cpu(cpuid, to_whom) {
- set_bit(operation, &ipi_data[cpuid].bits);
- hartid = cpuid_to_hartid_map(cpuid);
- cpumask_set_cpu(hartid, &hartid_mask);
- }
- mb();
- sbi_send_ipi(cpumask_bits(&hartid_mask));
-}
-
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
@@ -156,12 +164,12 @@ void show_ipi_stats(struct seq_file *p, int prec)
void arch_send_call_function_ipi_mask(struct cpumask *mask)
{
- send_ipi_message(mask, IPI_CALL_FUNC);
+ send_ipi_mask(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+ send_ipi_single(cpu, IPI_CALL_FUNC);
}
void smp_send_stop(void)
@@ -176,7 +184,7 @@ void smp_send_stop(void)
if (system_state <= SYSTEM_RUNNING)
pr_crit("SMP: stopping secondary CPUs\n");
- send_ipi_message(&mask, IPI_CPU_STOP);
+ send_ipi_mask(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */
@@ -191,6 +199,5 @@ void smp_send_stop(void)
void smp_send_reschedule(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+ send_ipi_single(cpu, IPI_RESCHEDULE);
}
-
--
2.20.1
Powered by blists - more mailing lists