[rtems commit] bsp/riscv: Add PLIC support

Sebastian Huber sebh at rtems.org
Wed Jul 25 08:12:34 UTC 2018


Module:    rtems
Branch:    master
Commit:    adede135e7fefc1ba2020ff2d64da3f4185ba85c
Changeset: http://git.rtems.org/rtems/commit/?id=adede135e7fefc1ba2020ff2d64da3f4185ba85c

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Jul 24 13:27:54 2018 +0200

bsp/riscv: Add PLIC support

Update #3433.

---

 bsps/include/bsp/fatal.h           |   6 +-
 bsps/riscv/riscv/include/bsp.h     |   2 +
 bsps/riscv/riscv/include/bsp/irq.h |  15 +++
 bsps/riscv/riscv/irq/irq.c         | 241 ++++++++++++++++++++++++++++++++++++-
 bsps/riscv/riscv/start/bspsmp.c    |   2 +-
 5 files changed, 263 insertions(+), 3 deletions(-)

diff --git a/bsps/include/bsp/fatal.h b/bsps/include/bsp/fatal.h
index 2679af1..f9eca33 100644
--- a/bsps/include/bsp/fatal.h
+++ b/bsps/include/bsp/fatal.h
@@ -147,7 +147,11 @@ typedef enum {
   RISCV_FATAL_CLOCK_IRQ_INSTALL,
   RISCV_FATAL_NO_CLINT_REG_IN_DEVICE_TREE,
   RISCV_FATAL_INVALID_HART_REG_IN_DEVICE_TREE,
-  RISCV_FATAL_INVALID_CLINT_IRQS_EXTENDED_IN_DEVICE_TREE
+  RISCV_FATAL_INVALID_CLINT_IRQS_EXTENDED_IN_DEVICE_TREE,
+  RISCV_FATAL_NO_PLIC_REG_IN_DEVICE_TREE,
+  RISCV_FATAL_INVALID_PLIC_NDEV_IN_DEVICE_TREE,
+  RISCV_FATAL_TOO_LARGE_PLIC_NDEV_IN_DEVICE_TREE,
+  RISCV_FATAL_INVALID_INTERRUPT_AFFINITY
 } bsp_fatal_code;
 
 RTEMS_NO_RETURN static inline void
diff --git a/bsps/riscv/riscv/include/bsp.h b/bsps/riscv/riscv/include/bsp.h
index 5ec916e..d8d0347 100644
--- a/bsps/riscv/riscv/include/bsp.h
+++ b/bsps/riscv/riscv/include/bsp.h
@@ -44,6 +44,8 @@
 extern "C" {
 #endif
 
+#define BSP_FEATURE_IRQ_EXTENSION
+
 #define BSP_FDT_IS_SUPPORTED
 
 #ifdef __cplusplus
diff --git a/bsps/riscv/riscv/include/bsp/irq.h b/bsps/riscv/riscv/include/bsp/irq.h
index 353005f..cf88443 100644
--- a/bsps/riscv/riscv/include/bsp/irq.h
+++ b/bsps/riscv/riscv/include/bsp/irq.h
@@ -42,6 +42,7 @@
 #include <bsp.h>
 #include <rtems/irq.h>
 #include <rtems/irq-extension.h>
+#include <rtems/score/processormask.h>
 
 #define RISCV_INTERRUPT_VECTOR_SOFTWARE 0
 
@@ -49,10 +50,24 @@
 
 #define RISCV_INTERRUPT_VECTOR_EXTERNAL(x) ((x) + 2)
 
+#define RISCV_INTERRUPT_VECTOR_IS_EXTERNAL(x) ((x) >= 2)
+
+#define RISCV_INTERRUPT_VECTOR_EXTERNAL_TO_INDEX(x) ((x) - 2)
+
 #define BSP_INTERRUPT_VECTOR_MIN 0
 
 #define BSP_INTERRUPT_VECTOR_MAX RISCV_INTERRUPT_VECTOR_EXTERNAL(RISCV_MAXIMUM_EXTERNAL_INTERRUPTS - 1)
 
+void bsp_interrupt_set_affinity(
+  rtems_vector_number vector,
+  const Processor_mask *affinity
+);
+
+void bsp_interrupt_get_affinity(
+  rtems_vector_number vector,
+  Processor_mask *affinity
+);
+
 #endif /* ASM */
 
 #endif /* LIBBSP_GENERIC_RISCV_IRQ_H */
diff --git a/bsps/riscv/riscv/irq/irq.c b/bsps/riscv/riscv/irq/irq.c
index a3a17f5..64cb68b 100644
--- a/bsps/riscv/riscv/irq/irq.c
+++ b/bsps/riscv/riscv/irq/irq.c
@@ -48,6 +48,24 @@
 
 volatile RISCV_CLINT_regs *riscv_clint;
 
+static volatile RISCV_PLIC_regs *riscv_plic;
+
+/*
+ * The lovely PLIC has an interrupt enable bit per hart for each interrupt
+ * source.  This makes the interrupt enable/disable a bit difficult.  We have
+ * to store the interrupt distribution in software.  To keep it simple, we
+ * support only a one-to-one and one-to-all interrupt to processor
+ * distribution.  For a one-to-one distribution, the array member must point to
+ * the enable register block of the corresponding.  For a one-to-all
+ * distribution, the array member must be NULL.  The array index is the
+ * external interrupt index minus one (external interrupt index zero is a
+ * special value, see PLIC documentation).
+ */
+static volatile uint32_t *
+riscv_plic_irq_to_cpu[RISCV_MAXIMUM_EXTERNAL_INTERRUPTS];
+
+RTEMS_INTERRUPT_LOCK_DEFINE(static, riscv_plic_lock, "PLIC")
+
 void _RISCV_Interrupt_dispatch(uintptr_t mcause, Per_CPU_Control *cpu_self)
 {
   /*
@@ -59,7 +77,17 @@ void _RISCV_Interrupt_dispatch(uintptr_t mcause, Per_CPU_Control *cpu_self)
   if (mcause == (RISCV_INTERRUPT_TIMER_MACHINE << 1)) {
     bsp_interrupt_handler_dispatch(RISCV_INTERRUPT_VECTOR_TIMER);
   } else if (mcause == (RISCV_INTERRUPT_EXTERNAL_MACHINE << 1)) {
-    /* TODO: Handle PLIC interrupt */
+    volatile RISCV_PLIC_hart_regs *plic_hart_regs;
+    uint32_t interrupt_index;
+
+    plic_hart_regs = cpu_self->cpu_per_cpu.plic_hart_regs;
+
+    while ((interrupt_index = plic_hart_regs->claim_complete) != 0) {
+      bsp_interrupt_handler_dispatch(
+        RISCV_INTERRUPT_VECTOR_EXTERNAL(interrupt_index)
+      );
+      plic_hart_regs->claim_complete = interrupt_index;
+    }
   } else if (mcause == (RISCV_INTERRUPT_SOFTWARE_MACHINE << 1)) {
 #ifdef RTEMS_SMP
     clear_csr(mip, MIP_MSIP);
@@ -110,20 +138,231 @@ static void riscv_clint_init(const void *fdt)
 #endif
 }
 
+static void riscv_plic_init(const void *fdt)
+{
+  volatile RISCV_PLIC_regs *plic;
+  int node;
+  int i;
+  const uint32_t *val;
+  int len;
+  uint32_t interrupt_index;
+  uint32_t ndev;
+  Per_CPU_Control *cpu;
+
+  node = fdt_node_offset_by_compatible(fdt, -1, "riscv,plic0");
+
+  plic = riscv_fdt_get_address(fdt, node);
+  if (plic == NULL) {
+    bsp_fatal(RISCV_FATAL_NO_PLIC_REG_IN_DEVICE_TREE);
+  }
+
+  riscv_plic = plic;
+
+  val = fdt_getprop(fdt, node, "riscv,ndev", &len);
+  if (val == NULL || len != 4) {
+    bsp_fatal(RISCV_FATAL_INVALID_PLIC_NDEV_IN_DEVICE_TREE);
+  }
+
+  ndev = fdt32_to_cpu(val[0]);
+  if (ndev > RISCV_MAXIMUM_EXTERNAL_INTERRUPTS) {
+    bsp_fatal(RISCV_FATAL_TOO_LARGE_PLIC_NDEV_IN_DEVICE_TREE);
+  }
+
+  val = fdt_getprop(fdt, node, "interrupts-extended", &len);
+
+  for (i = 0; i < len; i += 8) {
+    uint32_t hart_index;
+
+    hart_index = riscv_get_hart_index_by_phandle(fdt32_to_cpu(val[i / 4]));
+    if (hart_index >= rtems_configuration_get_maximum_processors()) {
+      continue;
+    }
+
+    interrupt_index = fdt32_to_cpu(val[i / 4 + 1]);
+    if (interrupt_index != RISCV_INTERRUPT_EXTERNAL_MACHINE) {
+      continue;
+    }
+
+    plic->harts[i / 8].priority_threshold = 0;
+
+    cpu = _Per_CPU_Get_by_index(hart_index);
+    cpu->cpu_per_cpu.plic_hart_regs = &plic->harts[i / 8];
+    cpu->cpu_per_cpu.plic_m_ie = &plic->enable[i / 8][0];
+  }
+
+  cpu = _Per_CPU_Get_by_index(0);
+
+  for (interrupt_index = 1; interrupt_index <= ndev; ++interrupt_index) {
+    plic->priority[interrupt_index] = 1;
+    riscv_plic_irq_to_cpu[interrupt_index - 1] = cpu->cpu_per_cpu.plic_m_ie;
+  }
+
+  /*
+   * External M-mode interrupts on secondary processors are enabled in
+   * bsp_start_on_secondary_processor().
+   */
+  set_csr(mie, MIP_MEIP);
+}
+
 rtems_status_code bsp_interrupt_facility_initialize(void)
 {
   const void *fdt;
 
   fdt = bsp_fdt_get();
   riscv_clint_init(fdt);
+  riscv_plic_init(fdt);
 
   return RTEMS_SUCCESSFUL;
 }
 
 void bsp_interrupt_vector_enable(rtems_vector_number vector)
 {
+  bsp_interrupt_assert(bsp_interrupt_is_valid_vector(vector));
+
+  if (RISCV_INTERRUPT_VECTOR_IS_EXTERNAL(vector)) {
+    uint32_t interrupt_index;
+    volatile uint32_t *enable;
+    uint32_t group;
+    uint32_t bit;
+    rtems_interrupt_lock_context lock_context;
+
+    interrupt_index = RISCV_INTERRUPT_VECTOR_EXTERNAL_TO_INDEX(vector);
+    enable = riscv_plic_irq_to_cpu[interrupt_index - 1];
+    group = interrupt_index / 32;
+    bit = UINT32_C(1) << (interrupt_index % 32);
+
+    rtems_interrupt_lock_acquire(&riscv_plic_lock, &lock_context);
+
+    if (enable != NULL) {
+      enable[group] |= bit;
+    } else {
+      uint32_t cpu_index;
+      uint32_t cpu_count;
+
+      cpu_count = _SMP_Get_processor_count();
+
+      for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
+        Per_CPU_Control *cpu;
+
+        cpu = _Per_CPU_Get_by_index(cpu_index);
+        enable = cpu->cpu_per_cpu.plic_m_ie;
+
+        if (enable != NULL) {
+          enable[group] |= bit;
+        }
+      }
+    }
+
+    rtems_interrupt_lock_release(&riscv_plic_lock, &lock_context);
+  }
 }
 
 void bsp_interrupt_vector_disable(rtems_vector_number vector)
 {
+  bsp_interrupt_assert(bsp_interrupt_is_valid_vector(vector));
+
+  if (RISCV_INTERRUPT_VECTOR_IS_EXTERNAL(vector)) {
+    uint32_t interrupt_index;
+    volatile uint32_t *enable;
+    uint32_t group;
+    uint32_t bit;
+    rtems_interrupt_lock_context lock_context;
+
+    interrupt_index = RISCV_INTERRUPT_VECTOR_EXTERNAL_TO_INDEX(vector);
+    enable = riscv_plic_irq_to_cpu[interrupt_index - 1];
+    group = interrupt_index / 32;
+    bit = UINT32_C(1) << (interrupt_index % 32);
+
+    rtems_interrupt_lock_acquire(&riscv_plic_lock, &lock_context);
+
+    if (enable != NULL) {
+      enable[group] &= ~bit;
+    } else {
+      uint32_t cpu_index;
+      uint32_t cpu_count;
+
+      cpu_count = _SMP_Get_processor_count();
+
+      for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
+        Per_CPU_Control *cpu;
+
+        cpu = _Per_CPU_Get_by_index(cpu_index);
+        enable = cpu->cpu_per_cpu.plic_m_ie;
+
+        if (enable != NULL) {
+          enable[group] &= ~bit;
+        }
+      }
+    }
+
+    rtems_interrupt_lock_release(&riscv_plic_lock, &lock_context);
+  }
+}
+
+void bsp_interrupt_set_affinity(
+  rtems_vector_number vector,
+  const Processor_mask *affinity
+)
+{
+  if (RISCV_INTERRUPT_VECTOR_IS_EXTERNAL(vector)) {
+    uint32_t interrupt_index;
+    Processor_mask mask;
+
+    interrupt_index = RISCV_INTERRUPT_VECTOR_EXTERNAL_TO_INDEX(vector);
+
+    _Processor_mask_And(&mask, affinity, _SMP_Get_online_processors());
+
+    if (_Processor_mask_Is_equal(&mask, _SMP_Get_online_processors())) {
+      riscv_plic_irq_to_cpu[interrupt_index - 1] = NULL;
+      return;
+    }
+
+    if (_Processor_mask_Count(&mask) == 1) {
+      uint32_t cpu_index;
+      Per_CPU_Control *cpu;
+
+      cpu_index = _Processor_mask_Find_last_set(&mask) - 1;
+      cpu = _Per_CPU_Get_by_index(cpu_index);
+      riscv_plic_irq_to_cpu[interrupt_index - 1] = cpu->cpu_per_cpu.plic_m_ie;
+      return;
+    }
+
+    bsp_fatal(RISCV_FATAL_INVALID_INTERRUPT_AFFINITY);
+  }
+}
+
+void bsp_interrupt_get_affinity(
+  rtems_vector_number vector,
+  Processor_mask *affinity
+)
+{
+  _Processor_mask_Zero(affinity);
+
+  if (RISCV_INTERRUPT_VECTOR_IS_EXTERNAL(vector)) {
+    uint32_t interrupt_index;
+    volatile uint32_t *enable;
+
+    interrupt_index = RISCV_INTERRUPT_VECTOR_EXTERNAL_TO_INDEX(vector);
+    enable = riscv_plic_irq_to_cpu[interrupt_index - 1];
+
+    if (enable != NULL) {
+      uint32_t cpu_index;
+      uint32_t cpu_count;
+
+      cpu_count = _SMP_Get_processor_count();
+
+      for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
+        Per_CPU_Control *cpu;
+
+        cpu = _Per_CPU_Get_by_index(cpu_index);
+
+        if (enable == cpu->cpu_per_cpu.plic_m_ie) {
+          _Processor_mask_Set(affinity, cpu_index);
+          break;
+        }
+      }
+    } else {
+      _Processor_mask_Assign(affinity, _SMP_Get_online_processors());
+    }
+  }
 }
diff --git a/bsps/riscv/riscv/start/bspsmp.c b/bsps/riscv/riscv/start/bspsmp.c
index 8e5540d..4f1b3c9 100644
--- a/bsps/riscv/riscv/start/bspsmp.c
+++ b/bsps/riscv/riscv/start/bspsmp.c
@@ -40,7 +40,7 @@ void bsp_start_on_secondary_processor(Per_CPU_Control *cpu_self)
     cpu_index_self < rtems_configuration_get_maximum_processors()
       && _SMP_Should_start_processor(cpu_index_self)
   ) {
-    set_csr(mie, MIP_MSIP);
+    set_csr(mie, MIP_MSIP | MIP_MEIP);
     _SMP_Start_multitasking_on_secondary_processor(cpu_self);
   } else {
     _CPU_Thread_Idle_body(0);




More information about the vc mailing list