[rtems-central commit] spec: Specify rtems_task_restart()

Sebastian Huber sebh at rtems.org
Wed May 26 11:46:35 UTC 2021


Module:    rtems-central
Branch:    master
Commit:    c9abed803ff6ededb5cb760a4525c630ae4dc2ac
Changeset: http://git.rtems.org/rtems-central/commit/?id=c9abed803ff6ededb5cb760a4525c630ae4dc2ac

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed May 19 13:39:30 2021 +0200

spec: Specify rtems_task_restart()

---

 spec/rtems/task/req/restart.yml | 1519 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 1411 insertions(+), 108 deletions(-)

diff --git a/spec/rtems/task/req/restart.yml b/spec/rtems/task/req/restart.yml
index 7e53350..d24c210 100644
--- a/spec/rtems/task/req/restart.yml
+++ b/spec/rtems/task/req/restart.yml
@@ -27,14 +27,57 @@ post-conditions:
     text: |
       The return status of ${../if/restart:/name} shall be
       ${../../status/if/incorrect-state:/name}.
+  - name: NoReturn
+    test-code: |
+      T_rsc( ctx->status, RTEMS_NOT_IMPLEMENTED );
+    text: |
+      The ${../if/restart:/name} call shall not return.
+  test-epilogue: null
+  test-prologue: null
+- name: FatalError
+  states:
+  - name: 'Yes'
+    test-code: |
+      T_eq_u32( ctx->calls.fatal, 1 );
+    text: |
+      The fatal error with a fatal source of
+      ${/score/interr/if/internal-error-core:/name} and a fatal code of
+      ${/score/interr/if/bad-thread-dispatch-disable-level:/name} shall occur
+      by the ${../if/restart:/name} call.
+  - name: Nop
+    test-code: |
+      T_eq_u32( ctx->calls.fatal, 0 );
+    text: |
+      No fatal error shall occur by the ${../if/restart:/name} call.
   test-epilogue: null
   test-prologue: null
 - name: Argument
   states:
   - name: Set
     test-code: |
-      T_eq_u32( ctx->actual_argument, ctx->argument );
-      T_eq_u32( ctx->counter, 2 );
+      if ( ctx->restart_counter != 0 ) {
+        #if CPU_SIZEOF_POINTER > 4
+        T_eq_u64( ctx->actual_argument, RESTART_ARGUMENT );
+        #else
+        T_eq_u32( ctx->actual_argument, RESTART_ARGUMENT );
+        #endif
+
+        T_eq_u32( ctx->restart_counter, 1 );
+      } else {
+        #if CPU_SIZEOF_POINTER > 4
+        T_eq_u64(
+          ctx->worker_thread->Start.Entry.Kinds.Numeric.argument,
+          RESTART_ARGUMENT
+        );
+        T_eq_u64( ctx->actual_argument, UNSET_ARGUMENT );
+        #else
+        T_eq_u32(
+          ctx->worker_thread->Start.Entry.Kinds.Numeric.argument,
+          RESTART_ARGUMENT
+        );
+        T_eq_u32( ctx->actual_argument, UNSET_ARGUMENT );
+        #endif
+      }
     text: |
       The entry point argument of the task specified by the
       ${../if/restart:/params[0]/name} parameter shall be set to the value
@@ -42,60 +85,260 @@ post-conditions:
       is unblocked by the ${../if/restart:/name} call.
   - name: Nop
     test-code: |
-      T_eq_u32( ctx->actual_argument, 0 );
-
-      if ( ctx->start ) {
-        T_eq_u32( ctx->counter, 1 );
-      } else {
-        T_eq_u32( ctx->counter, 0 );
-      }
+      T_eq_u32( ctx->actual_argument, UNSET_ARGUMENT );
+      T_eq_u32( ctx->restart_counter, 0 );
     text: |
       No entry point argument of a task shall be modified by the
       ${../if/restart:/name} call.
   test-epilogue: null
   test-prologue: null
-- name: Unblock
+- name: State
   states:
-  - name: 'Yes'
+  - name: Dormant
     test-code: |
-      if ( ctx->suspend ) {
-        T_eq_sz( ctx->scheduler_log.header.recorded, 1 );
-        T_eq_int(
-          ctx->scheduler_log.events[ 0 ].operation,
-          T_SCHEDULER_UNBLOCK
-        );
+      T_eq_u32( ctx->worker_state, STATES_DORMANT )
+
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_NOP );
+    text: |
+      The state of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be dormant after the ${../if/restart:/name} call.
+  - name: DormantSuspended
+    test-code: |
+      T_eq_u32( ctx->worker_state, STATES_DORMANT | STATES_SUSPENDED )
+
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_NOP );
+    text: |
+      The state of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be dormant and suspended after the ${../if/restart:/name}
+      call.
+  - name: Blocked
+    test-code: |
+      T_ne_u32( ctx->worker_state & STATES_BLOCKED, 0 )
+      T_eq_u32( ctx->worker_state & STATES_BLOCKED, ctx->worker_state )
+
+      if ( ctx->suspended && !ctx->blocked ) {
+        event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+        T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+        T_eq_u32( event->thread->Object.id, ctx->worker_id );
+      }
+
+      if ( !ctx->real_priority_is_initial ) {
+        event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+        T_eq_int( event->operation, T_SCHEDULER_UPDATE_PRIORITY );
+        T_eq_u32( event->thread->Object.id, ctx->worker_id );
+      }
+
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_NOP );
+    text: |
+      The state of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be blocked after the ${../if/restart:/name} call.
+  - name: Ready
+    test-code: |
+      T_eq_u32( ctx->worker_state, STATES_READY )
+
+      if ( ctx->protected ) {
+        if ( ctx->suspended ) {
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+        }
       } else {
-        T_eq_sz( ctx->scheduler_log.header.recorded, 2 );
-        T_eq_int(
-          ctx->scheduler_log.events[ 0 ].operation,
-          T_SCHEDULER_BLOCK
-        );
-        T_eq_int(
-          ctx->scheduler_log.events[ 1 ].operation,
-          T_SCHEDULER_UNBLOCK
-        );
+        if ( ctx->suspended || ctx->blocked ) {
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+        } else {
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_BLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+        }
+      }
+
+      if ( !ctx->real_priority_is_initial ) {
+        event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+        T_eq_int( event->operation, T_SCHEDULER_UPDATE_PRIORITY );
+        T_eq_u32( event->thread->Object.id, ctx->worker_id );
+      }
+
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_NOP );
+    text: |
+      The state of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be ready after the ${../if/restart:/name} call.
+  - name: Zombie
+    test-code: |
+      T_eq_u32( ctx->worker_state, STATES_ZOMBIE )
+
+      if ( ctx->protected ) {
+        if ( ctx->suspended ) {
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+        }
+      } else {
+        if ( ctx->suspended ) {
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+        } else {
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_BLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+
+          event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+          T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+          T_eq_u32( event->thread->Object.id, ctx->worker_id );
+        }
+      }
+
+      if ( !ctx->real_priority_is_initial ) {
+        event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+        T_eq_int( event->operation, T_SCHEDULER_UPDATE_PRIORITY );
+        T_eq_u32( event->thread->Object.id, ctx->worker_id );
       }
+
+      /* Set zombie state */
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_BLOCK );
+      T_eq_u32( event->thread->Object.id, ctx->worker_id );
+
+      /* Wake up deleter */
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_UNBLOCK );
+      T_eq_u32( event->thread->Object.id, ctx->deleter_id );
+
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_NOP );
     text: |
-      The task specified by the ${../if/restart:/params[0]/name} parameter shall
-      be unblocked by the ${../if/restart:/name} call.
+      The state of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be the zombie state after the ${../if/restart:/name}
+      call.
   - name: Nop
     test-code: |
-      T_eq_sz( ctx->scheduler_log.header.recorded, 0 );
+      T_ne_u32( ctx->worker_state & STATES_LIFE_IS_CHANGING, 0 )
+
+      if ( !ctx->real_priority_is_initial ) {
+        event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+        T_eq_int( event->operation, T_SCHEDULER_UPDATE_PRIORITY );
+        T_eq_u32( event->thread->Object.id, ctx->worker_id );
+      }
+
+      event = T_scheduler_next_any( &ctx->scheduler_log.header, &index );
+      T_eq_int( event->operation, T_SCHEDULER_NOP );
+    text: |
+      The state of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall not be modified by the ${../if/restart:/name} call.
+  test-epilogue: null
+  test-prologue: |
+    const T_scheduler_event *event;
+    size_t                   index;
+
+    index = 0;
+- name: Enqueued
+  states:
+  - name: 'Yes'
+    test-code: |
+      T_not_null( ctx->worker_thread->Wait.queue );
+    text: |
+      The task specified by the ${../if/restart:/params[0]/name} parameter
+      shall be enqueued on a ${/glossary/waitqueue:/term}
+      after the ${../if/restart:/name} call.
+  - name: 'No'
+    test-code: |
+      T_null( ctx->worker_thread->Wait.queue );
+    text: |
+      The task specified by the ${../if/restart:/params[0]/name} parameter
+      shall not be enqueued on a ${/glossary/waitqueue:/term} after the
+      ${../if/restart:/name} call.
+  test-epilogue: null
+  test-prologue: null
+- name: Timer
+  states:
+  - name: Active
+    test-code: |
+      GetTaskTimerInfoByThread( ctx->worker_thread, &info);
+      T_eq_int( info.state, TASK_TIMER_TICKS );
+    text: |
+      The timer of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be active after the ${../if/restart:/name} call.
+  - name: Inactive
+    test-code: |
+      GetTaskTimerInfoByThread( ctx->worker_thread, &info);
+      T_eq_int( info.state, TASK_TIMER_INACTIVE );
+    text: |
+      The timer of the task specified by the ${../if/restart:/params[0]/name}
+      parameter shall be inactive after the ${../if/restart:/name} call.
+  test-epilogue: null
+  test-prologue: |
+    TaskTimerInfo info;
+- name: Restarting
+  states:
+  - name: 'Yes'
+    test-code: |
+      T_ne_int( ctx->worker_life_state & THREAD_LIFE_RESTARTING, 0 );
+    text: |
+      The task specified by the ${../if/restart:/params[0]/name} parameter
+      shall be restarting after the ${../if/restart:/name} call.
+  - name: 'No'
+    test-code: |
+      T_eq_int( ctx->worker_life_state & THREAD_LIFE_RESTARTING, 0 );
+    text: |
+      The task specified by the ${../if/restart:/params[0]/name} parameter
+      shall not be restarting after the ${../if/restart:/name} call.
+  test-epilogue: null
+  test-prologue: null
+- name: Terminating
+  states:
+  - name: 'Yes'
+    test-code: |
+      T_ne_int( ctx->worker_life_state & THREAD_LIFE_TERMINATING, 0 );
+    text: |
+      The task specified by the ${../if/restart:/params[0]/name} parameter
+      shall be terminating after the ${../if/restart:/name} call.
+  - name: 'No'
+    test-code: |
+      T_eq_int( ctx->worker_life_state & THREAD_LIFE_TERMINATING, 0 );
+    text: |
+      The task specified by the ${../if/restart:/params[0]/name} parameter
+      shall not be terminating after the ${../if/restart:/name} call.
+  test-epilogue: null
+  test-prologue: null
+- name: Protected
+  states:
+  - name: 'Yes'
+    test-code: |
+      T_ne_int( ctx->worker_life_state & THREAD_LIFE_PROTECTED, 0 );
     text: |
-      No task shall be unblocked by the ${../if/restart:/name} call.
+      The thread life of the task specified by the
+      ${../if/restart:/params[0]/name} parameter be protected after the
+      ${../if/restart:/name} call.
+  - name: 'No'
+    test-code: |
+      T_eq_int( ctx->worker_life_state & THREAD_LIFE_PROTECTED, 0 );
+    text: |
+      The thread life of the task specified by the
+      ${../if/restart:/params[0]/name} parameter shall not be protected after
+      the ${../if/restart:/name} call.
   test-epilogue: null
   test-prologue: null
 - name: RestartExtensions
   states:
   - name: 'Yes'
     test-code: |
-      T_eq_u32( ctx->restart_extension_calls, 1 );
+      T_eq_u32( ctx->calls_after_restart.thread_restart, 1 );
     text: |
       The thread restart user extensions shall be invoked by the
       ${../if/restart:/name} call.
   - name: Nop
     test-code: |
-      T_eq_u32( ctx->restart_extension_calls, 0 );
+      T_eq_u32( ctx->calls_after_restart.thread_restart, 0 );
     text: |
       The thread restart user extensions shall not be invoked by the
       ${../if/restart:/name} call.
@@ -110,135 +353,468 @@ pre-conditions:
     text: |
       While the ${../if/restart:/params[0]/name} parameter is not associated with
       a task.
-  - name: Task
+  - name: Executing
+    test-code: |
+      ctx->id = RTEMS_SELF;
+    text: |
+      While the ${../if/restart:/params[0]/name} parameter is associated with
+      the calling task.
+  - name: Other
     test-code: |
       ctx->id = ctx->worker_id;
     text: |
       While the ${../if/restart:/params[0]/name} parameter is associated with a
-      task.
+      task other than the calling task.
   test-epilogue: null
   test-prologue: null
-- name: Argument
+- name: Dormant
   states:
-  - name: Pointer
+  - name: 'Yes'
     test-code: |
-      ctx->argument = (rtems_task_argument) ctx;
+      ctx->dormant = true;
     text: |
-      While the entry point argument specified by the
-      ${../if/restart:/params[1]/name} parameter is a pointer.
-  - name: Number
+      While the task specified by the ${../if/restart:/params[0]/name} parameter
+      is dormant.
+  - name: 'No'
     test-code: |
-      ctx->argument = UINT32_C( 0x87654321 );
+      ctx->dormant = false;
     text: |
-      While the entry point argument specified by the
-      ${../if/restart:/params[1]/name} parameter is a 32-bit number.
+      While the task specified by the ${../if/restart:/params[0]/name} parameter
+      is not dormant.
   test-epilogue: null
   test-prologue: null
-- name: Dormant
+- name: Suspended
   states:
   - name: 'Yes'
     test-code: |
-      ctx->start = false;
+      ctx->suspended = true;
     text: |
       While the task specified by the ${../if/restart:/params[0]/name} parameter
-      is dormant.
+      is suspended.
   - name: 'No'
     test-code: |
-      ctx->start = true;
+      ctx->suspended = false;
     text: |
       While the task specified by the ${../if/restart:/params[0]/name} parameter
-      is not dormant.
+      is not suspended.
   test-epilogue: null
   test-prologue: null
-- name: Suspended
+- name: Restarting
   states:
   - name: 'Yes'
     test-code: |
-      ctx->suspend = true;
+      ctx->restarting = true;
     text: |
       While the task specified by the ${../if/restart:/params[0]/name} parameter
-      is suspended.
+      is restarting.
   - name: 'No'
     test-code: |
-      ctx->suspend = false;
+      ctx->restarting = false;
     text: |
       While the task specified by the ${../if/restart:/params[0]/name} parameter
-      is not suspended.
+      is not restarting.
+  test-epilogue: null
+  test-prologue: null
+- name: Terminating
+  states:
+  - name: 'Yes'
+    test-code: |
+      ctx->terminating = true;
+    text: |
+      While the task specified by the ${../if/restart:/params[0]/name} parameter
+      is terminating.
+  - name: 'No'
+    test-code: |
+      ctx->terminating = false;
+    text: |
+      While the task specified by the ${../if/restart:/params[0]/name} parameter
+      is not terminating.
+  test-epilogue: null
+  test-prologue: null
+- name: Protected
+  states:
+  - name: 'Yes'
+    test-code: |
+      ctx->protected = true;
+    text: |
+      While thread life of the the task specified by the
+      ${../if/restart:/params[0]/name} parameter is protected.
+  - name: 'No'
+    test-code: |
+      ctx->protected = false;
+    text: |
+      While thread life of the the task specified by the
+      ${../if/restart:/params[0]/name} parameter is not protected.
+  test-epilogue: null
+  test-prologue: null
+- name: Context
+  states:
+  - name: Task
+    test-code: |
+      ctx->interrupt = false;
+      ctx->nested_request = false;
+    text: |
+      While the ${../if/restart:/name} directive is called from within task
+      context.
+  - name: Interrupt
+    test-code: |
+      ctx->interrupt = true;
+      ctx->nested_request = false;
+    text: |
+      While the ${../if/restart:/name} directive is called from within
+      interrupt context.
+  - name: NestedRequest
+    test-code: |
+      ctx->interrupt = false;
+      ctx->nested_request = true;
+    text: |
+      While the ${../if/restart:/name} directive is called during another
+      ${../if/restart:/name} call with the same task as a nested request.
+  test-epilogue: null
+  test-prologue: null
+- name: State
+  states:
+  - name: Ready
+    test-code: |
+      ctx->blocked = false;
+    text: |
+      While the task specified by the ${../if/restart:/params[0]/name}
+      parameter is a ${/glossary/readytask:/term} or
+      ${/glossary/scheduledtask:/term}.
+  - name: Blocked
+    test-code: |
+      ctx->blocked = true;
+      ctx->enqueued = false;
+    text: |
+      While the task specified by the ${../if/restart:/params[0]/name}
+      parameter is blocked.
+  - name: Enqueued
+    test-code: |
+      ctx->blocked = true;
+      ctx->enqueued = true;
+    text: |
+      While the task specified by the ${../if/restart:/params[0]/name}
+      parameter is enqueued on a ${/glossary/waitqueue:/term}.
+  test-epilogue: null
+  test-prologue: null
+- name: Timer
+  states:
+  - name: Inactive
+    test-code: |
+      ctx->timer_active = false;
+    text: |
+      While timer of the task specified by the ${../if/restart:/params[0]/name}
+      parameter is inactive.
+  - name: Active
+    test-code: |
+      ctx->timer_active = true;
+    text: |
+      While timer of the task specified by the ${../if/restart:/params[0]/name}
+      parameter is active.
+  test-epilogue: null
+  test-prologue: null
+- name: RealPriority
+  states:
+  - name: Initial
+    test-code: |
+      ctx->real_priority_is_initial = true;
+    text: |
+      While ${/glossary/priority-real:/term} of the task specified by the
+      ${../if/restart:/params[0]/name} parameter is equal to the initial
+      priority.
+  - name: Changed
+    test-code: |
+      ctx->real_priority_is_initial = false;
+    text: |
+      While ${/glossary/priority-real:/term} of the task specified by the
+      ${../if/restart:/params[0]/name} parameter is not equal to the initial
+      priority.
+  test-epilogue: null
+  test-prologue: null
+- name: ThreadDispatch
+  states:
+  - name: Disabled
+    test-code: |
+      ctx->dispatch_disabled = true;
+    text: |
+      While thread dispatching is disabled for the calling task.
+  - name: Enabled
+    test-code: |
+      ctx->dispatch_disabled = false;
+    text: |
+      While thread dispatching is enabled for the calling task.
   test-epilogue: null
   test-prologue: null
 rationale: null
 references: []
 requirement-type: functional
-skip-reasons: {}
+skip-reasons:
+  ExecutingIsNotDormant: |
+    An executing thread was started and thus is never dormant.
+  ExecutingIsNotBlocked: |
+    An executing thread is not blocked.
+  NotBlockedHasInactiveTimer: |
+    The timer of a not blocked thread is inactive.
+  ThreadDispatchDisabled: |
+    While ISRs or nested requests are processed, the thread dispatching is
+    disabled.
+  NestedRequestNotDormant: |
+    A nested request can only happen if the thread is not dormant.
+  NestedRequestNotProtected: |
+    A nested request can only happen if the thread life protection of the task
+    is disabled.
+  NestedRequestNeedsLifeChanging: |
+    A nested request can only happen if the thread life is changing.
+  NestedRequestNoNestedExit: |
+    A nested request cannot happen during a thread exit.
+  NestedRequestNeedsTask: |
+    A nested request needs a task to restart.
 test-action: |
-  T_scheduler_log *log;
+  rtems_status_code sc;
 
-  if ( ctx->start ) {
-    StartTask( ctx->worker_id, Worker, 0 );
-    Yield();
-  }
+  if ( ctx->id != INVALID_ID ) {
+    if ( ctx->dormant ) {
+      PrepareRealPriority( ctx );
+    } else {
+      StartTask( ctx->worker_id, Worker, NULL );
 
-  if ( ctx->suspend ) {
-    SuspendTask( ctx->worker_id );
-  }
+      /* Let the worker catch signals and set the thread life protection state */
+      Yield();
 
-  ctx->restart_extension_calls = 0;
+      sc = rtems_signal_send( ctx->worker_id, RTEMS_SIGNAL_0 );
+      T_rsc_success( sc );
 
-  log = T_scheduler_record_2( &ctx->scheduler_log );
-  T_null( log );
+      if (
+        ctx->restarting &&
+        ( !ctx->nested_request || ( ctx->nested_request && ctx->terminating ) )
+      ) {
+        sc = rtems_task_restart( ctx->worker_id, (rtems_task_argument) ctx );
+        T_rsc_success( sc );
+      }
 
-  ctx->status = rtems_task_restart( ctx->id, ctx->argument );
+      if ( ctx->terminating && !ctx->nested_request ) {
+        sc = rtems_task_restart( ctx->deleter_id, (rtems_task_argument) ctx );
+        T_rsc_success( sc );
+      } else {
+        PrepareRealPriority( ctx );
+        Yield();
+      }
+    }
+  }
 
-  log = T_scheduler_record( NULL );
-  T_eq_ptr( &log->header, &ctx->scheduler_log.header );
+  if ( ctx->id == RTEMS_SELF ) {
+    CaptureWorkerState( ctx );
+  } else {
+    if ( ctx->nested_request ) {
+      if ( ctx->terminating ) {
+        sc = rtems_task_restart( ctx->deleter_id, (rtems_task_argument) ctx );
+        T_rsc_success( sc );
+      } else {
+        PrepareNestedRequest( ctx );
 
-  Yield();
+        sc = rtems_task_restart( ctx->worker_id, (rtems_task_argument) ctx );
+        T_rsc_success( sc );
+      }
+    } else {
+      SetSelfPriority( PRIO_VERY_HIGH );
+
+      if ( ctx->interrupt ) {
+        CallWithinISR( Restart, ctx );
+      } else {
+        Restart( ctx );
+      }
+    }
+  }
 test-brief: null
 test-cleanup: |
-  DeleteTask( ctx->worker_id );
+  SetSelfPriority( PRIO_VERY_LOW );
+
+  if ( ctx->protected && ctx->blocked ) {
+    if ( ctx->enqueued ) {
+      ReleaseMutex( ctx->mutex_id );
+      ObtainMutex( ctx->mutex_id );
+    } else {
+      SendEvents( ctx->worker_id, RTEMS_EVENT_0 );
+    }
+  }
+
+  if ( ctx->id == INVALID_ID || ctx->dormant || !ctx->terminating ) {
+    DeleteTask( ctx->worker_id );
+  }
+
+  SetSelfPriority( PRIO_NORMAL );
 test-context:
 - brief: |
     This member provides the scheduler operation records.
   description: null
   member: |
-    T_scheduler_log_2 scheduler_log
+    T_scheduler_log_10 scheduler_log
+- brief: |
+    This member provides a jump context to resume a thread dispatch.
+  description: null
+  member: |
+    jmp_buf thread_dispatch_context;
 - brief: |
-    This member contains the identifier of a task.
+    This member provides an call within ISR request.
+  description: null
+  member: |
+    CallWithinISRRequest isr_request
+- brief: |
+    This member may reference wrapped thread queue operations used to trigger a
+    nested request.
+  description: null
+  member: |
+    const Thread_queue_Operations *wrapped_operations
+- brief: |
+    This member provides a thread queue used to trigger a nested request.
+  description: null
+  member: |
+    Thread_queue_Control thread_queue
+- brief: |
+    This member contains the identifier of the runner scheduler.
+  description: null
+  member: |
+    rtems_id scheduler_id
+- brief: |
+    This member contains the identifier of the runner task.
+  description: null
+  member: |
+    rtems_id runner_id
+- brief: |
+    This member contains the identifier of the mutex.
+  description: null
+  member: |
+    rtems_id mutex_id
+- brief: |
+    This member contains the identifier of the worker task.
   description: null
   member: |
     rtems_id worker_id
 - brief: |
+    This member references the TCB of the worker task.
+  description: null
+  member: |
+    rtems_tcb *worker_thread
+- brief: |
+    This member contains the worker state at the end of the
+    ${../if/restart:/name} call.
+  description: null
+  member: |
+    States_Control worker_state
+- brief: |
+    This member contains the worker thread life state at the end of the
+    ${../if/restart:/name} call.
+  description: null
+  member: |
+    Thread_Life_state worker_life_state
+- brief: |
+    This member contains the identifier of the deleter task.
+  description: null
+  member: |
+    rtems_id deleter_id
+- brief: |
     This member contains the identifier of the test user extensions.
   description: null
   member: |
     rtems_id extension_id
 - brief: |
-    This member contains the count of thread restart extension calls.
+    This member contains extension calls.
   description: null
   member: |
-    uint32_t restart_extension_calls
+    ExtensionCalls calls;
+- brief: |
+    This member contains extension calls after the ${../if/restart:/name} call.
+  description: null
+  member: |
+    ExtensionCalls calls_after_restart;
 - brief: |
     This member contains the actual argument passed to the entry point.
   description: null
   member: |
     rtems_task_argument actual_argument
 - brief: |
-    This member contains the entry point counter.
+    This member contains the restart counter.
   description: null
   member: |
-    uint32_t counter
+    uint32_t restart_counter
 - brief: |
-    If this member is true, then the worker is started before the
+    If this member is true, then the worker shall be dormant before the
     ${../if/restart:/name} call.
   description: null
   member: |
-    bool start
+    bool dormant
 - brief: |
-    If this member is true, then the worker is suspended before the
+    If this member is true, then the worker shall be suspended before the
     ${../if/restart:/name} call.
   description: null
   member: |
-    bool suspend
+    bool suspended
+- brief: |
+    If this member is true, then the thread life of the worker shall be
+    protected before the ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool protected
+- brief: |
+    If this member is true, then the worker shall be restarting before the
+    ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool restarting
+- brief: |
+    If this member is true, then the worker shall be terminating before the
+    ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool terminating
+- brief: |
+    If this member is true, then the ${../if/restart:/name} shall be called
+    from within interrupt context.
+  description: null
+  member: |
+    bool interrupt
+- brief: |
+    If this member is true, then the ${../if/restart:/name} shall be called
+    during another ${../if/restart:/name} call with the same task as a nested
+    request.
+  description: null
+  member: |
+    bool nested_request
+- brief: |
+    If this member is true, then the worker shall be blocked before the
+    ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool blocked
+- brief: |
+    If this member is true, then the worker shall be enqueued on a
+    ${/glossary/waitqueue:/term} before the ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool enqueued
+- brief: |
+    If this member is true, then the timer of the worker shall be active before
+    the ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool timer_active
+- brief: |
+    If this member is true, then the real priority of the worker shall be equal
+    to its initial priority before the ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool real_priority_is_initial
+- brief: |
+    If this member is true, then thread dispatching is disabled by the worker
+    task before the ${../if/restart:/name} call.
+  description: null
+  member: |
+    bool dispatch_disabled
+- brief: |
+    If this member is true, then it is expected to delete the worker.
+  description: null
+  member: |
+    bool delete_worker_expected
 - brief: |
     This member contains the return value of the ${../if/restart:/name}
     call.
@@ -251,63 +827,485 @@ test-context:
   description: null
   member: |
     rtems_id id
-- brief: |
-    This member specifies if the ${../if/restart:/params[1]/name}
-    parameter value.
-  description: null
-  member: |
-    rtems_task_argument argument
 test-context-support: null
 test-description: null
 test-header: null
 test-includes:
+- limits.h
 - rtems.h
+- setjmp.h
 - rtems/test-scheduler.h
+- rtems/score/statesimpl.h
+- rtems/score/threaddispatch.h
+- rtems/score/threadimpl.h
+- rtems/score/threadqimpl.h
 test-local-includes:
 - tx-support.h
 test-prepare: |
-  ctx->actual_argument = 0;
-  ctx->counter = 0;
-  ctx->worker_id = CreateTask( "WORK", PRIO_DEFAULT );
+  ctx->status = RTEMS_NOT_IMPLEMENTED;
+  ctx->actual_argument = UNSET_ARGUMENT;
+  ctx->restart_counter = 0;
+
+  ctx->delete_worker_expected = false;
+  ctx->worker_id = CreateTask( "WORK", PRIO_NORMAL );
+  ctx->delete_worker_expected = true;
+
+  ctx->worker_thread = GetThread( ctx->worker_id );
+  ctx->worker_state = UINT32_MAX;
+  ctx->worker_life_state = INT_MAX;
+
+  SetPriority( ctx->deleter_id, PRIO_HIGH );
 test-setup:
   brief: null
   code: |
     rtems_status_code sc;
 
+    ctx->runner_id = rtems_task_self();
+    ctx->scheduler_id = GetSelfScheduler();
+    ctx->mutex_id = CreateMutexNoProtocol();
+    ObtainMutex( ctx->mutex_id );
+
+    ctx->isr_request.handler = Restart;
+    ctx->isr_request.arg = ctx;
+
+    _Thread_queue_Initialize( &ctx->thread_queue, "Nested Request" );
+
     sc = rtems_extension_create(
       rtems_build_name( 'T', 'E', 'S', 'T' ),
       &extensions,
       &ctx->extension_id
     );
     T_rsc_success( sc );
+
+    SetFatalExtension( Fatal );
+    SetSelfPriority( PRIO_NORMAL );
+
+    ctx->deleter_id = CreateTask( "DELE", PRIO_HIGH );
+    StartTask( ctx->deleter_id, Deleter, NULL );
   description: null
 test-stop: null
 test-support: |
+  #if CPU_SIZEOF_POINTER > 4
+  #define RESTART_ARGUMENT 0xfedcba0987654321U
+  #else
+  #define RESTART_ARGUMENT 0x87654321U
+  #endif
+
+  #define UNSET_ARGUMENT 1
+
   typedef RtemsTaskReqRestart_Context Context;
 
-  static void Worker( rtems_task_argument arg )
+  static void PrepareRealPriority( Context *ctx )
+  {
+    if ( !ctx->real_priority_is_initial ) {
+      SetScheduler( ctx->worker_id, ctx->scheduler_id, PRIO_LOW );
+      SetPriority( ctx->worker_id, PRIO_NORMAL );
+    }
+  }
+
+  static void CaptureWorkerState( Context *ctx )
+  {
+    T_scheduler_log *log;
+
+    log = T_scheduler_record( NULL );
+
+    if ( log != NULL ) {
+      T_eq_ptr( &log->header, &ctx->scheduler_log.header );
+
+      ctx->worker_state = ctx->worker_thread->current_state;
+      ctx->worker_life_state = ctx->worker_thread->Life.state;
+      CopyExtensionCalls( &ctx->calls, &ctx->calls_after_restart );
+    }
+  }
+
+  static void VerifyTaskPreparation( const Context *ctx )
+  {
+    if ( ctx->id != INVALID_ID ) {
+      States_Control state;
+      Thread_Life_state life_state;
+
+      state = STATES_READY;
+      life_state = ctx->worker_thread->Life.state;
+
+      if ( ctx->suspended ) {
+        state |= STATES_SUSPENDED;
+      }
+
+      if ( ctx->dormant ) {
+        T_eq_int( life_state, 0 );
+        state |= STATES_DORMANT;
+      } else {
+        T_eq( ctx->protected, ( life_state & THREAD_LIFE_PROTECTED ) != 0 );
+        T_eq( ctx->restarting, ( life_state & THREAD_LIFE_RESTARTING ) != 0 );
+        T_eq( ctx->terminating, ( life_state & THREAD_LIFE_TERMINATING ) != 0 );
+
+        if ( ctx->blocked ) {
+          if ( ctx->enqueued ) {
+            state |= STATES_WAITING_FOR_MUTEX;
+          } else {
+            state |= STATES_WAITING_FOR_EVENT;
+          }
+        }
+
+        if ( ctx->nested_request ) {
+          state |= STATES_LIFE_IS_CHANGING;
+        }
+      }
+
+      T_eq_u32( ctx->worker_thread->current_state, state );
+    }
+  }
+
+  static void Restart( void *arg )
+  {
+    Context         *ctx;
+    T_scheduler_log *log;
+
+    ctx = arg;
+
+    if ( ctx->suspended && ctx->id != INVALID_ID ) {
+      if ( ctx->id != RTEMS_SELF || ctx->interrupt ) {
+        SuspendTask( ctx->worker_id );
+      } else {
+        Per_CPU_Control *cpu_self;
+
+        /*
+         * Where the system was built with SMP support enabled, a suspended
+         * executing thread during the ${../if/restart:/name} call can happen
+         * if the thread was suspended by another processor and the inter
+         * interrupt interrupt did not yet arrive.  Where the system was built
+         * with SMP support disabled, this state cannot happen with the current
+         * implementation.  However, we still specify and validate this
+         * behaviour unconditionally since there exist alternative
+         * implementations which would lead to such a state if the executing
+         * thread is suspended by an ISR.
+         */
+        cpu_self = _Thread_Dispatch_disable();
+        SuspendSelf();
+        cpu_self->dispatch_necessary = false;
+        _Thread_Dispatch_enable( cpu_self );
+      }
+    }
+
+    if ( ctx->dispatch_disabled ) {
+      _Thread_Dispatch_disable();
+    }
+
+    VerifyTaskPreparation( ctx );
+    ClearExtensionCalls( &ctx->calls );
+
+    log = T_scheduler_record_10( &ctx->scheduler_log );
+    T_null( log );
+
+    ctx->status = rtems_task_restart( ctx->id, RESTART_ARGUMENT );
+
+    CaptureWorkerState( ctx );
+
+    if ( ctx->dispatch_disabled ) {
+      _Thread_Dispatch_enable( _Per_CPU_Get() );
+    }
+  }
+
+  static void Block( const Context *ctx )
+  {
+    rtems_interval ticks;
+
+    if ( ctx->timer_active ) {
+      ticks = UINT32_MAX;
+    } else {
+      ticks = RTEMS_NO_TIMEOUT;
+    }
+
+    if ( ctx->enqueued ) {
+      ObtainMutexTimed( ctx->mutex_id, ticks );
+    } else {
+      (void) ReceiveAnyEventsTimed( ticks );
+    }
+  }
+
+  static void BlockDone( const Context *ctx )
+  {
+    if ( ctx->enqueued ) {
+      ReleaseMutex( ctx->mutex_id );
+    }
+  }
+
+  static void Fatal(
+    rtems_fatal_source source,
+    bool               always_set_to_false,
+    rtems_fatal_code   code
+  )
+  {
+    Context         *ctx;
+    Per_CPU_Control *cpu_self;
+
+    T_eq_int( source, INTERNAL_ERROR_CORE );
+    T_false( always_set_to_false );
+    T_eq_ulong( code, INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
+
+    ctx = T_fixture_context();
+    ++ctx->calls.fatal;
+    T_assert_eq_int( ctx->calls.fatal, 1 );
+
+    CaptureWorkerState( ctx );
+
+    cpu_self = _Per_CPU_Get();
+    _Thread_Dispatch_unnest( cpu_self );
+    _Thread_Dispatch_direct_no_return( cpu_self );
+  }
+
+  static void ResumeThreadDispatch(
+    rtems_fatal_source source,
+    bool               always_set_to_false,
+    rtems_fatal_code   code
+  )
+  {
+    Context *ctx;
+
+    T_eq_int( source, INTERNAL_ERROR_CORE );
+    T_false( always_set_to_false );
+    T_eq_ulong( code, INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
+
+    SetFatalExtension( Fatal );
+
+    ctx = T_fixture_context();
+    longjmp( ctx->thread_dispatch_context, 1 );
+  }
+
+  static void TriggerNestedRequest(
+    Thread_queue_Queue   *queue,
+    Thread_Control       *thread,
+    Thread_queue_Context *queue_context
+  )
+  {
+    Context *ctx;
+
+    ctx = T_fixture_context();
+    CallWithinISRSubmit( &ctx->isr_request );
+
+    if ( ctx->wrapped_operations ) {
+      thread->Wait.operations = ctx->wrapped_operations;
+      ( *thread->Wait.operations->extract )( queue, thread, queue_context );
+    }
+  }
+
+  const Thread_queue_Operations trigger_nested_request = {
+    .extract = TriggerNestedRequest
+  };
+
+  static void PrepareNestedRequest( Context *ctx )
+  {
+    Thread_Control *thread;
+
+    thread = ctx->worker_thread;
+
+    if ( thread->Wait.queue != NULL ) {
+      ctx->wrapped_operations = thread->Wait.operations;
+      thread->Wait.operations = &trigger_nested_request;
+    } else {
+      Thread_queue_Context queue_context;
+
+      ctx->wrapped_operations = NULL;
+      _Thread_queue_Context_initialize( &queue_context );
+      _Thread_queue_Acquire( &ctx->thread_queue, &queue_context );
+      _Thread_Wait_flags_set(
+        thread,
+        THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK
+      );
+      _Thread_Wait_claim( thread, &ctx->thread_queue.Queue );
+      _Thread_Wait_claim_finalize( thread, &trigger_nested_request );
+      _Thread_queue_Release( &ctx->thread_queue, &queue_context );
+    }
+  }
+
+  static void TriggerNestedRequestViaSelfRestart(
+    Context         *ctx,
+    Per_CPU_Control *cpu_self
+  )
+  {
+    PrepareNestedRequest( ctx );
+    SetFatalExtension( ResumeThreadDispatch );
+
+    if ( setjmp( ctx->thread_dispatch_context ) == 0 ) {
+      (void) rtems_task_restart(
+        RTEMS_SELF,
+        (rtems_task_argument) ctx
+      );
+    } else {
+      _Thread_Dispatch_unnest( cpu_self );
+    }
+  }
+
+  static void Signal( rtems_signal_set signals )
+  {
+    Context *ctx;
+
+    (void) signals;
+    ctx = T_fixture_context();
+
+    if ( ctx->id == RTEMS_SELF ) {
+      Per_CPU_Control *cpu_self;
+
+      SetPriority( ctx->runner_id, PRIO_VERY_LOW );
+      SetPriority( ctx->deleter_id, PRIO_VERY_LOW );
+
+      if ( ctx->interrupt || ctx->nested_request ) {
+        if ( ctx->blocked ) {
+          SetFatalExtension( ResumeThreadDispatch );
+          cpu_self = _Thread_Dispatch_disable();
+
+          if ( setjmp( ctx->thread_dispatch_context ) == 0 ) {
+            Block( ctx );
+          } else {
+            _Thread_Dispatch_unnest( cpu_self );
+          }
+
+          if ( ctx->interrupt ) {
+            CallWithinISR( Restart, ctx );
+          } else {
+            TriggerNestedRequestViaSelfRestart( ctx, cpu_self );
+          }
+
+          _Thread_Dispatch_direct( cpu_self );
+          BlockDone( ctx );
+        } else {
+          if ( ctx->interrupt ) {
+            CallWithinISR( Restart, ctx );
+          } else {
+            cpu_self = _Thread_Dispatch_disable();
+            TriggerNestedRequestViaSelfRestart( ctx, cpu_self );
+            _Thread_Dispatch_direct( cpu_self );
+          }
+        }
+      } else {
+        Restart( ctx );
+      }
+    } else {
+      if ( ctx->blocked ) {
+        Block( ctx );
+        BlockDone( ctx );
+      } else if ( ctx->nested_request ) {
+        Yield();
+      } else {
+        SetPriority( ctx->runner_id, PRIO_VERY_HIGH );
+      }
+    }
+
+    if ( ctx->protected ) {
+      _Thread_Set_life_protection( 0 );
+    }
+  }
+
+  static void Deleter( rtems_task_argument arg )
   {
     Context *ctx;
 
-    ctx = &RtemsTaskReqRestart_Instance;
+    ctx = (Context *) arg;
+
+    if ( ctx != NULL ) {
+      if ( ctx->real_priority_is_initial ) {
+        /*
+         * We have to prevent a potential priority boost in the task delete
+         * below.
+         */
+        if ( ctx->nested_request ) {
+          rtems_status_code sc;
+          rtems_id          id;
+
+          /*
+           * This sequence lowers the priority to PRIO_NORMAL without an
+           * implicit yield.
+           */
+
+          sc = rtems_semaphore_create(
+            rtems_build_name( 'M', 'U', 'T', 'X' ),
+            0,
+            RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING,
+            PRIO_HIGH,
+            &id
+          );
+          T_rsc_success( sc );
+
+          SetSelfPriority( PRIO_NORMAL );
+          ReleaseMutex( id );
+          DeleteMutex( id );
+        } else {
+          SetScheduler( ctx->worker_id, ctx->scheduler_id, PRIO_HIGH );
+        }
+      }
+
+      if ( ctx->nested_request ) {
+        PrepareNestedRequest( ctx );
+        DeleteTask( ctx->worker_id );
+      } else {
+        DeleteTask( ctx->worker_id );
+      }
+    }
+
+    SuspendSelf();
+  }
+
+  static void Worker( rtems_task_argument arg )
+  {
+    Context          *ctx;
+    rtems_status_code sc;
+
+    ctx = T_fixture_context();
+
+    if ( arg == 0 ) {
+      sc = rtems_signal_catch( Signal, RTEMS_NO_ASR );
+      T_rsc_success( sc );
+
+      if ( ctx->protected ) {
+        _Thread_Set_life_protection( THREAD_LIFE_PROTECTED );
+      }
 
-    while ( true ) {
-      ctx->actual_argument += arg;
-      ++ctx->counter;
       Yield();
+    } else {
+      ctx->actual_argument = arg;
+      ++ctx->restart_counter;
+
+      CaptureWorkerState( ctx );
+      SuspendSelf();
+    }
+  }
+
+  static void ThreadDelete( rtems_tcb *executing, rtems_tcb *deleted )
+  {
+    Context *ctx;
+
+    ctx = T_fixture_context();
+    ++ctx->calls.thread_delete;
+
+    T_eq_u32( executing->Object.id, ctx->runner_id );
+
+    if ( ctx->delete_worker_expected ) {
+      T_eq_u32( deleted->Object.id, ctx->worker_id );
     }
   }
 
   static void ThreadRestart( rtems_tcb *executing, rtems_tcb *restarted )
   {
-    (void) executing;
-    (void) restarted;
+    Context *ctx;
 
-    ++RtemsTaskReqRestart_Instance.restart_extension_calls;
+    ctx = T_fixture_context();
+    ++ctx->calls.thread_restart;
+  }
+
+  static void ThreadTerminate( rtems_tcb *executing )
+  {
+    Context *ctx;
+
+    ctx = T_fixture_context();
+    ++ctx->calls.thread_terminate;
+
+    T_eq_u32( executing->Object.id, ctx->worker_id );
   }
 
   static const rtems_extensions_table extensions = {
-    .thread_restart = ThreadRestart
+    .thread_delete = ThreadDelete,
+    .thread_restart = ThreadRestart,
+    .thread_terminate = ThreadTerminate
   };
 test-target: testsuites/validation/tc-task-restart.c
 test-teardown:
@@ -317,45 +1315,350 @@ test-teardown:
 
     sc = rtems_extension_delete( ctx->extension_id );
     T_rsc_success( sc );
+
+    SetFatalExtension( NULL );
+    DeleteTask( ctx->deleter_id );
+    ReleaseMutex( ctx->mutex_id );
+    DeleteMutex( ctx->mutex_id );
+    RestoreRunnerASR();
+    RestoreRunnerPriority();
+    _Thread_queue_Destroy( &ctx->thread_queue );
   description: null
 text: ${.:text-template}
 transition-map:
 - enabled-by: true
   post-conditions:
-    Status: Ok
+    Status:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+      then: NoReturn
+    - else: Ok
+    FatalError:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+          ThreadDispatch: Disabled
+      then: 'Yes'
+    - else: Nop
     Argument: Set
-    Unblock: 'Yes'
-    RestartExtensions: 'Yes'
+    State:
+    - if:
+        pre-conditions:
+          Context: NestedRequest
+      then: Nop
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+          Terminating: 'Yes'
+          ThreadDispatch: Enabled
+      then: Zombie
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+      then: Ready
+    - if:
+        pre-conditions:
+          Protected: 'No'
+      then: Ready
+    - if:
+        pre-conditions:
+          State:
+          - Blocked
+          - Enqueued
+      then: Blocked
+    - else: Ready
+    Enqueued:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+      then: 'No'
+    - if:
+        pre-conditions:
+          State: Enqueued
+          Protected: 'Yes'
+      then: 'Yes'
+    - else: 'No'
+    Timer:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+      then: Inactive
+    - if:
+        pre-conditions:
+          Timer: Active
+          Protected: 'Yes'
+      then: Active
+    - else: Inactive
+    Restarting:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+          Terminating: 'No'
+          ThreadDispatch: Enabled
+      then: 'No'
+    - else: 'Yes'
+    Terminating:
+    - specified-by: Terminating
+    Protected:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+          Terminating: 'Yes'
+          ThreadDispatch: Enabled
+      then: 'Yes'
+    - if:
+        pre-conditions:
+          Id: Executing
+          Context: Task
+          ThreadDispatch: Enabled
+      then: 'No'
+    - specified-by: Protected
+    RestartExtensions:
+    - if:
+        pre-conditions:
+          Id: Executing
+          Terminating: 'No'
+          Context: Task
+          ThreadDispatch: Enabled
+      then: 'Yes'
+    - else: Nop
   pre-conditions:
     Id:
-    - Task
-    Argument: all
+    - Executing
+    - Other
     Dormant:
     - 'No'
     Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context: all
+    State: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
 - enabled-by: true
   post-conditions:
     Status: InvId
-    Argument: Nop
-    Unblock: Nop
+    FatalError: Nop
+    Argument: N/A
+    State: N/A
+    Enqueued: N/A
+    Timer: N/A
+    Restarting: N/A
+    Terminating: N/A
+    Protected: N/A
     RestartExtensions: Nop
   pre-conditions:
     Id:
     - Invalid
-    Argument: all
     Dormant: N/A
     Suspended: N/A
+    Restarting: N/A
+    Terminating: N/A
+    Protected: N/A
+    Context: all
+    State: N/A
+    Timer: N/A
+    RealPriority: N/A
+    ThreadDispatch: all
 - enabled-by: true
   post-conditions:
     Status: IncStat
+    FatalError: Nop
     Argument: Nop
-    Unblock: Nop
+    State:
+    - if:
+        pre-conditions:
+          Suspended: 'Yes'
+      then: DormantSuspended
+    - else: Dormant
+    Enqueued: 'No'
+    Timer: Inactive
+    Restarting: 'No'
+    Terminating: 'No'
+    Protected: 'No'
     RestartExtensions: Nop
   pre-conditions:
     Id:
+    - Other
+    Dormant:
+    - 'Yes'
+    Suspended: all
+    Restarting: N/A
+    Terminating: all
+    Protected: N/A
+    Context: all
+    State: N/A
+    Timer: N/A
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: ExecutingIsNotDormant
+  pre-conditions:
+    Id:
+    - Executing
+    Dormant:
+    - 'Yes'
+    Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context: all
+    State: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: ExecutingIsNotBlocked
+  pre-conditions:
+    Id:
+    - Executing
+    Dormant:
+    - 'No'
+    Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context:
     - Task
-    Argument: all
+    State:
+    - Blocked
+    - Enqueued
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: NotBlockedHasInactiveTimer
+  pre-conditions:
+    Id:
+    - Executing
+    - Other
     Dormant:
+    - 'No'
+    Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context: all
+    State:
+    - Ready
+    Timer:
+    - Active
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: ThreadDispatchDisabled
+  pre-conditions:
+    Id: all
+    Dormant: all
+    Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context:
+    - Interrupt
+    - NestedRequest
+    State: all
+    Enqueued: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch:
+    - Enabled
+- enabled-by: true
+  post-conditions: NestedRequestNotDormant
+  pre-conditions:
+    Id: all
+    Dormant:
+    - 'Yes'
+    Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context:
+    - NestedRequest
+    State: all
+    Enqueued: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: NestedRequestNotProtected
+  pre-conditions:
+    Id: all
+    Dormant: all
+    Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected:
     - 'Yes'
+    Context:
+    - NestedRequest
+    State: all
+    Enqueued: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: NestedRequestNeedsLifeChanging
+  pre-conditions:
+    Id: all
+    Dormant: all
+    Suspended: all
+    Restarting:
+    - 'No'
+    Terminating:
+    - 'No'
+    Protected: all
+    Context:
+    - NestedRequest
+    State: all
+    Enqueued: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: NestedRequestNoNestedExit
+  pre-conditions:
+    Id:
+    - Executing
+    Dormant: all
+    Suspended: all
+    Restarting: all
+    Terminating:
+    - 'Yes'
+    Protected: all
+    Context:
+    - NestedRequest
+    State: all
+    Enqueued: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
+- enabled-by: true
+  post-conditions: NestedRequestNeedsTask
+  pre-conditions:
+    Id:
+    - Invalid
+    Dormant: all
     Suspended: all
+    Restarting: all
+    Terminating: all
+    Protected: all
+    Context:
+    - NestedRequest
+    State: all
+    Enqueued: all
+    Timer: all
+    RealPriority: all
+    ThreadDispatch: all
 type: requirement



More information about the vc mailing list