[rtems commit] validation: Support a partial thread queue flush

Sebastian Huber sebh at rtems.org
Wed Aug 31 09:21:07 UTC 2022


Module:    rtems
Branch:    master
Commit:    1dca588f63cb8b2752a5012a3401e5150081b285
Changeset: http://git.rtems.org/rtems/commit/?id=1dca588f63cb8b2752a5012a3401e5150081b285

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed Aug 31 11:26:00 2022 +0200

validation: Support a partial thread queue flush

Update #3716.

---

 testsuites/validation/tc-futex-wake.c              |  4 +-
 testsuites/validation/tc-sem-flush.c               |  6 ++-
 testsuites/validation/tr-tq-flush-fifo.c           |  4 +-
 .../validation/tr-tq-flush-priority-inherit.c      |  4 +-
 testsuites/validation/tr-tq-flush-priority.c       |  4 +-
 testsuites/validation/tx-thread-queue.c            | 12 ++++--
 testsuites/validation/tx-thread-queue.h            | 50 ++++++++++++++--------
 7 files changed, 53 insertions(+), 31 deletions(-)

diff --git a/testsuites/validation/tc-futex-wake.c b/testsuites/validation/tc-futex-wake.c
index 1d996d1d2b..75364e6976 100644
--- a/testsuites/validation/tc-futex-wake.c
+++ b/testsuites/validation/tc-futex-wake.c
@@ -185,7 +185,7 @@ static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
   return STATUS_BUILD( 0, eno );
 }
 
-static void Flush( TQContext *tq_ctx )
+static uint32_t Flush( TQContext *tq_ctx, uint32_t thread_count, bool all )
 {
   Context *ctx;
   int      count;
@@ -199,6 +199,8 @@ static void Flush( TQContext *tq_ctx )
 
   count = _Futex_Wake( &ctx->futex, INT_MAX );
   T_eq_int( count, how_many > 1 ? how_many - 1 : 0 );
+
+  return thread_count;
 }
 
 static void NewlibReqFutexWake_Pre_Count_Prepare(
diff --git a/testsuites/validation/tc-sem-flush.c b/testsuites/validation/tc-sem-flush.c
index af8f5407cd..59b836dba8 100644
--- a/testsuites/validation/tc-sem-flush.c
+++ b/testsuites/validation/tc-sem-flush.c
@@ -228,12 +228,16 @@ static Status_Control Enqueue( TQContext *tq_ctx, TQWait wait )
   return STATUS_BUILD( STATUS_SUCCESSFUL, 0 );
 }
 
-static void Flush( TQContext *tq_ctx )
+static uint32_t Flush( TQContext *tq_ctx, uint32_t thread_count, bool all )
 {
   rtems_status_code sc;
 
+  (void) all;
+
   sc = rtems_semaphore_flush( tq_ctx->thread_queue_id );
   T_rsc_success( sc );
+
+  return thread_count;
 }
 
 static void RtemsSemReqFlush_Pre_Class_Prepare(
diff --git a/testsuites/validation/tr-tq-flush-fifo.c b/testsuites/validation/tr-tq-flush-fifo.c
index 9b6821ac27..a50e41a66e 100644
--- a/testsuites/validation/tr-tq-flush-fifo.c
+++ b/testsuites/validation/tr-tq-flush-fifo.c
@@ -147,7 +147,7 @@ static void Flush( void *arg )
 
   ctx = arg;
   TQSchedulerRecordStart( ctx->tq_ctx );
-  TQFlush( ctx->tq_ctx );
+  TQFlush( ctx->tq_ctx, true );
 }
 
 static void SchedulerEvent(
@@ -288,7 +288,7 @@ static void ScoreTqReqFlushFifo_Action( ScoreTqReqFlushFifo_Context *ctx )
     TQSend( ctx->tq_ctx, TQ_BLOCKER_D, TQ_EVENT_ENQUEUE );
   } else {
     TQSchedulerRecordStart( ctx->tq_ctx );
-    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
   }
 
   TQSchedulerRecordStop( ctx->tq_ctx );
diff --git a/testsuites/validation/tr-tq-flush-priority-inherit.c b/testsuites/validation/tr-tq-flush-priority-inherit.c
index beb48ebbe7..7c018496f0 100644
--- a/testsuites/validation/tr-tq-flush-priority-inherit.c
+++ b/testsuites/validation/tr-tq-flush-priority-inherit.c
@@ -179,7 +179,7 @@ static void Flush( void *arg )
 
   ctx = arg;
   TQSchedulerRecordStart( ctx->tq_ctx );
-  TQFlush( ctx->tq_ctx );
+  TQFlush( ctx->tq_ctx, true );
 }
 
 static void SchedulerEvent(
@@ -434,7 +434,7 @@ static void ScoreTqReqFlushPriorityInherit_Action(
     );
   } else {
     TQSchedulerRecordStart( ctx->tq_ctx );
-    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
   }
 
   TQSchedulerRecordStop( ctx->tq_ctx );
diff --git a/testsuites/validation/tr-tq-flush-priority.c b/testsuites/validation/tr-tq-flush-priority.c
index 26e2a7fc3b..6d4d23b329 100644
--- a/testsuites/validation/tr-tq-flush-priority.c
+++ b/testsuites/validation/tr-tq-flush-priority.c
@@ -154,7 +154,7 @@ static void Flush( void *arg )
 
   ctx = arg;
   TQSchedulerRecordStart( ctx->tq_ctx );
-  TQFlush( ctx->tq_ctx );
+  TQFlush( ctx->tq_ctx, true );
 }
 
 static void SchedulerEvent(
@@ -315,7 +315,7 @@ static void ScoreTqReqFlushPriority_Action(
     );
   } else {
     TQSchedulerRecordStart( ctx->tq_ctx );
-    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH );
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_FLUSH_ALL );
   }
 
   TQSchedulerRecordStop( ctx->tq_ctx );
diff --git a/testsuites/validation/tx-thread-queue.c b/testsuites/validation/tx-thread-queue.c
index 1b0e8665c7..9716b73988 100644
--- a/testsuites/validation/tx-thread-queue.c
+++ b/testsuites/validation/tx-thread-queue.c
@@ -333,8 +333,12 @@ static void Worker( rtems_task_argument arg, TQWorkerKind worker )
       _Thread_Dispatch_direct( cpu_self );
     }
 
-    if ( ( events & TQ_EVENT_FLUSH ) != 0 ) {
-      TQFlush( ctx );
+    if ( ( events & TQ_EVENT_FLUSH_ALL ) != 0 ) {
+      TQFlush( ctx, true );
+    }
+
+    if ( ( events & TQ_EVENT_FLUSH_PARTIAL ) != 0 ) {
+      TQFlush( ctx, false );
     }
 
     if ( ( events & TQ_EVENT_ENQUEUE_DONE ) != 0 ) {
@@ -647,9 +651,9 @@ Status_Control TQSurrender( TQContext *ctx )
   return ( *ctx->surrender )( ctx );
 }
 
-void TQFlush( TQContext *ctx )
+void TQFlush( TQContext *ctx, bool flush_all )
 {
-  ( *ctx->flush )( ctx );
+  ctx->flush_count = ( *ctx->flush )( ctx, ctx->how_many, flush_all );
 }
 
 rtems_tcb *TQGetOwner( TQContext *ctx )
diff --git a/testsuites/validation/tx-thread-queue.h b/testsuites/validation/tx-thread-queue.h
index d9a1a4db8d..bd9f3ffce3 100644
--- a/testsuites/validation/tx-thread-queue.h
+++ b/testsuites/validation/tx-thread-queue.h
@@ -120,23 +120,24 @@ typedef enum {
   TQ_EVENT_MUTEX_B_OBTAIN = RTEMS_EVENT_10,
   TQ_EVENT_MUTEX_B_RELEASE = RTEMS_EVENT_11,
   TQ_EVENT_BUSY_WAIT = RTEMS_EVENT_12,
-  TQ_EVENT_FLUSH = RTEMS_EVENT_13,
-  TQ_EVENT_SCHEDULER_RECORD_START = RTEMS_EVENT_14,
-  TQ_EVENT_SCHEDULER_RECORD_STOP = RTEMS_EVENT_15,
-  TQ_EVENT_TIMEOUT = RTEMS_EVENT_16,
-  TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN = RTEMS_EVENT_17,
-  TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE = RTEMS_EVENT_18,
-  TQ_EVENT_ENQUEUE_FATAL = RTEMS_EVENT_19,
-  TQ_EVENT_MUTEX_C_OBTAIN = RTEMS_EVENT_20,
-  TQ_EVENT_MUTEX_C_RELEASE = RTEMS_EVENT_21,
-  TQ_EVENT_MUTEX_FIFO_OBTAIN = RTEMS_EVENT_22,
-  TQ_EVENT_MUTEX_FIFO_RELEASE = RTEMS_EVENT_23,
-  TQ_EVENT_ENQUEUE_TIMED = RTEMS_EVENT_24,
-  TQ_EVENT_MUTEX_D_OBTAIN = RTEMS_EVENT_25,
-  TQ_EVENT_MUTEX_D_RELEASE = RTEMS_EVENT_26,
-  TQ_EVENT_PIN = RTEMS_EVENT_27,
-  TQ_EVENT_UNPIN = RTEMS_EVENT_28,
-  TQ_EVENT_COUNT = RTEMS_EVENT_29
+  TQ_EVENT_FLUSH_ALL = RTEMS_EVENT_13,
+  TQ_EVENT_FLUSH_PARTIAL = RTEMS_EVENT_14,
+  TQ_EVENT_SCHEDULER_RECORD_START = RTEMS_EVENT_15,
+  TQ_EVENT_SCHEDULER_RECORD_STOP = RTEMS_EVENT_16,
+  TQ_EVENT_TIMEOUT = RTEMS_EVENT_17,
+  TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN = RTEMS_EVENT_18,
+  TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE = RTEMS_EVENT_19,
+  TQ_EVENT_ENQUEUE_FATAL = RTEMS_EVENT_20,
+  TQ_EVENT_MUTEX_C_OBTAIN = RTEMS_EVENT_21,
+  TQ_EVENT_MUTEX_C_RELEASE = RTEMS_EVENT_22,
+  TQ_EVENT_MUTEX_FIFO_OBTAIN = RTEMS_EVENT_23,
+  TQ_EVENT_MUTEX_FIFO_RELEASE = RTEMS_EVENT_24,
+  TQ_EVENT_ENQUEUE_TIMED = RTEMS_EVENT_25,
+  TQ_EVENT_MUTEX_D_OBTAIN = RTEMS_EVENT_26,
+  TQ_EVENT_MUTEX_D_RELEASE = RTEMS_EVENT_27,
+  TQ_EVENT_PIN = RTEMS_EVENT_28,
+  TQ_EVENT_UNPIN = RTEMS_EVENT_29,
+  TQ_EVENT_COUNT = RTEMS_EVENT_30
 } TQEvent;
 
 typedef enum {
@@ -262,6 +263,12 @@ typedef struct TQContext {
    */
   uint32_t how_many;
 
+  /**
+   * @brief This this member contains the count of the least recently flushed
+   *   threads.
+   */
+  uint32_t flush_count;
+
   /**
    * @brief This this member provides a context to jump back to before the
    *   enqueue.
@@ -290,8 +297,13 @@ typedef struct TQContext {
 
   /**
    * @brief This member provides the thread queue flush handler.
+   *
+   * The second parameter specifies the count of enqueued threads.  While the
+   * third parameter is true, all enqueued threads shall be extracted,
+   * otherwise the thread queue shall be partially flushed.  The handler shall
+   * return the count of flushed threads.
    */
-  void ( *flush )( struct TQContext * );
+  uint32_t ( *flush )( struct TQContext *, uint32_t, bool );
 
   /**
    * @brief This member provides the get owner handler.
@@ -390,7 +402,7 @@ void TQEnqueueDone( TQContext *ctx );
 
 Status_Control TQSurrender( TQContext *ctx );
 
-void TQFlush( TQContext *ctx );
+void TQFlush( TQContext *ctx, bool flush_all );
 
 rtems_tcb *TQGetOwner( TQContext *ctx );
 



More information about the vc mailing list