[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190610221621.10938-8-hch@lst.de>
Date: Tue, 11 Jun 2019 00:16:11 +0200
From: Christoph Hellwig <hch@....de>
To: Palmer Dabbelt <palmer@...ive.com>
Cc: Damien Le Moal <damien.lemoal@....com>,
linux-riscv@...ts.infradead.org, uclinux-dev@...inux.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH 07/17] riscv: refactor the IPI code
This prepare for adding native non-SBI IPI code.
Signed-off-by: Christoph Hellwig <hch@....de>
---
arch/riscv/kernel/smp.c | 55 +++++++++++++++++++++++------------------
1 file changed, 31 insertions(+), 24 deletions(-)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index b2537ffa855c..91164204496c 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -89,13 +89,38 @@ static void ipi_stop(void)
wait_for_interrupt();
}
+static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+ int cpuid, hartid;
+ struct cpumask hartid_mask;
+
+ cpumask_clear(&hartid_mask);
+ mb();
+ for_each_cpu(cpuid, mask) {
+ set_bit(op, &ipi_data[cpuid].bits);
+ hartid = cpuid_to_hartid_map(cpuid);
+ cpumask_set_cpu(hartid, &hartid_mask);
+ }
+ mb();
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static void send_ipi_single(int cpu, enum ipi_message_type op)
+{
+ send_ipi_mask(cpumask_of(cpu), op);
+}
+
+static inline void clear_ipi(void)
+{
+ csr_clear(CSR_SIP, SIE_SSIE);
+}
+
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
- /* Clear pending IPI */
- csr_clear(CSR_SIP, SIE_SSIE);
+ clear_ipi();
while (true) {
unsigned long ops;
@@ -129,23 +154,6 @@ void riscv_software_interrupt(void)
}
}
-static void
-send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
-{
- int cpuid, hartid;
- struct cpumask hartid_mask;
-
- cpumask_clear(&hartid_mask);
- mb();
- for_each_cpu(cpuid, to_whom) {
- set_bit(operation, &ipi_data[cpuid].bits);
- hartid = cpuid_to_hartid_map(cpuid);
- cpumask_set_cpu(hartid, &hartid_mask);
- }
- mb();
- sbi_send_ipi(cpumask_bits(&hartid_mask));
-}
-
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
@@ -167,12 +175,12 @@ void show_ipi_stats(struct seq_file *p, int prec)
void arch_send_call_function_ipi_mask(struct cpumask *mask)
{
- send_ipi_message(mask, IPI_CALL_FUNC);
+ send_ipi_mask(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+ send_ipi_single(cpu, IPI_CALL_FUNC);
}
void smp_send_stop(void)
@@ -187,7 +195,7 @@ void smp_send_stop(void)
if (system_state <= SYSTEM_RUNNING)
pr_crit("SMP: stopping secondary CPUs\n");
- send_ipi_message(&mask, IPI_CPU_STOP);
+ send_ipi_mask(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */
@@ -202,6 +210,5 @@ void smp_send_stop(void)
void smp_send_reschedule(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+ send_ipi_single(cpu, IPI_RESCHEDULE);
}
-
--
2.20.1
Powered by blists - more mailing lists