[PATCH] rtems: Improve rtems_interrupt_server_create()

Sebastian Huber sebastian.huber at embedded-brains.de
Mon Nov 23 15:40:34 UTC 2020


Also start interrupt server tasks on processors which do not have a
scheduler.  Applications may dynamically manage processors using
rtems_scheduler_remove_processor() and rtems_scheduler_add_processor().
---
 bsps/shared/irq/irq-server.c         | 30 +++++++------
 testsuites/smptests/smpirqs01/init.c | 65 +++++++++++++++++++++++++++-
 2 files changed, 80 insertions(+), 15 deletions(-)

diff --git a/bsps/shared/irq/irq-server.c b/bsps/shared/irq/irq-server.c
index 19d03dc33f..2c8df4952c 100644
--- a/bsps/shared/irq/irq-server.c
+++ b/bsps/shared/irq/irq-server.c
@@ -506,9 +506,6 @@ static rtems_status_code bsp_interrupt_server_create(
   cpu_set_t cpu;
 #endif
 
-  rtems_interrupt_lock_initialize(&s->lock, "Interrupt Server");
-  rtems_chain_initialize_empty(&s->entries);
-
   sc = rtems_task_create(
     rtems_build_name('I', 'R', 'Q', 'S'),
     priority,
@@ -518,23 +515,30 @@ static rtems_status_code bsp_interrupt_server_create(
     &s->server
   );
   if (sc != RTEMS_SUCCESSFUL) {
+    (*s->destroy)(s);
     return sc;
   }
 
+  rtems_interrupt_lock_initialize(&s->lock, "Interrupt Server");
+  rtems_chain_initialize_empty(&s->entries);
+
 #if defined(RTEMS_SMP)
   sc = rtems_scheduler_ident_by_processor(cpu_index, &scheduler);
-  if (sc != RTEMS_SUCCESSFUL) {
-    /* Do not start an interrupt server on a processor without a scheduler */
-    return RTEMS_SUCCESSFUL;
-  }
 
-  sc = rtems_task_set_scheduler(s->server, scheduler, priority);
-  _Assert(sc == RTEMS_SUCCESSFUL);
+  /*
+   * If a scheduler exists for the processor, then move it to this scheduler
+   * and try to set the affinity to the processor, otherwise keep the scheduler
+   * of the executing thread.
+   */
+  if (sc == RTEMS_SUCCESSFUL) {
+    sc = rtems_task_set_scheduler(s->server, scheduler, priority);
+    _Assert(sc == RTEMS_SUCCESSFUL);
 
-  /* Set the task to processor affinity on a best-effort basis */
-  CPU_ZERO(&cpu);
-  CPU_SET(cpu_index, &cpu);
-  (void) rtems_task_set_affinity(s->server, sizeof(cpu), &cpu);
+    /* Set the task to processor affinity on a best-effort basis */
+    CPU_ZERO(&cpu);
+    CPU_SET(cpu_index, &cpu);
+    (void) rtems_task_set_affinity(s->server, sizeof(cpu), &cpu);
+  }
 #else
   (void) cpu_index;
 #endif
diff --git a/testsuites/smptests/smpirqs01/init.c b/testsuites/smptests/smpirqs01/init.c
index 4350647ca5..03238227fd 100644
--- a/testsuites/smptests/smpirqs01/init.c
+++ b/testsuites/smptests/smpirqs01/init.c
@@ -64,11 +64,10 @@ static void ensure_server_termination(void)
   T_rsc_success(sc);
 }
 
-T_TEST_CASE(InterruptServerSMPInitializeDestroy)
+T_TEST_CASE(InterruptServerSMPInitializeIncorrectState)
 {
   rtems_status_code sc;
   uint32_t server_count;
-  void *greedy;
 
   T_assert_eq_u32(rtems_scheduler_get_processor_maximum(), 2);
 
@@ -104,6 +103,14 @@ T_TEST_CASE(InterruptServerSMPInitializeDestroy)
   sc = rtems_interrupt_server_delete(1);
   T_rsc_success(sc);
   ensure_server_termination();
+}
+
+T_TEST_CASE(InterruptServerSMPInitializeInvalidPriority)
+{
+  rtems_status_code sc;
+  uint32_t server_count;
+
+  T_assert_eq_u32(rtems_scheduler_get_processor_maximum(), 2);
 
   server_count = 456;
   sc = rtems_interrupt_server_initialize(
@@ -134,6 +141,15 @@ T_TEST_CASE(InterruptServerSMPInitializeDestroy)
   sc = rtems_interrupt_server_delete(1);
   T_rsc_success(sc);
   ensure_server_termination();
+}
+
+T_TEST_CASE(InterruptServerSMPInitializeNoMemory)
+{
+  rtems_status_code sc;
+  uint32_t server_count;
+  void *greedy;
+
+  T_assert_eq_u32(rtems_scheduler_get_processor_maximum(), 2);
 
   greedy = rtems_heap_greedy_allocate(NULL, 0);
 
@@ -158,6 +174,51 @@ T_TEST_CASE(InterruptServerSMPInitializeDestroy)
   T_rsc(sc, RTEMS_INVALID_ID);
 }
 
+T_TEST_CASE(InterruptServerSMPInitializeNoScheduler)
+{
+  rtems_status_code sc;
+  uint32_t server_count;
+  rtems_id scheduler_id;
+  rtems_task_priority prio;
+
+  T_assert_eq_u32(rtems_scheduler_get_processor_maximum(), 2);
+
+  scheduler_id = 0;
+  sc = rtems_scheduler_ident_by_processor(1, &scheduler_id);
+  T_rsc_success(sc);
+  T_ne_u32(scheduler_id, 0);
+
+  sc = rtems_scheduler_remove_processor(scheduler_id, 1);
+  T_rsc_success(sc);
+
+  server_count = 456;
+  sc = rtems_interrupt_server_initialize(
+    123,
+    RTEMS_MINIMUM_STACK_SIZE,
+    RTEMS_DEFAULT_MODES,
+    RTEMS_DEFAULT_ATTRIBUTES,
+    &server_count
+  );
+  T_rsc_success(sc);
+  T_eq_u32(server_count, 2);
+
+  sc = rtems_interrupt_server_delete(0);
+  T_rsc_success(sc);
+
+  sc = rtems_interrupt_server_delete(1);
+  T_rsc_success(sc);
+
+  prio = 0;
+  sc = rtems_task_set_priority(RTEMS_SELF, 124, &prio);
+  T_rsc_success(sc);
+
+  sc = rtems_task_set_priority(RTEMS_SELF, prio, &prio);
+  T_rsc_success(sc);
+
+  sc = rtems_scheduler_add_processor(scheduler_id, 1);
+  T_rsc_success(sc);
+}
+
 const char rtems_test_name[] = "SMPIRQS 1";
 
 static void Init(rtems_task_argument argument)
-- 
2.26.2



More information about the devel mailing list