[rtems commit] validation: Test scheduler operations

Sebastian Huber sebh at rtems.org
Thu Mar 24 10:01:58 UTC 2022


Module:    rtems
Branch:    master
Commit:    ff50664c5c08cf8d184b0406ca029b7e913a6e7e
Changeset: http://git.rtems.org/rtems/commit/?id=ff50664c5c08cf8d184b0406ca029b7e913a6e7e

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Thu Dec  9 16:22:28 2021 +0100

validation: Test scheduler operations

The test source code is generated from specification items
by the "./spec2modules.py" script contained in the
git://git.rtems.org/rtems-central.git Git repository.

Please read the "How-To" section in the "Software Requirements Engineering"
chapter of the RTEMS Software Engineering manual to get more information about
the process.

Update #3716.

---

 .../validation/validation-no-clock-0.yml           |    1 +
 .../validation/validation-smp-only-0.yml           |    3 +
 .../validation/tc-sched-smp-edf-set-affinity.c     | 1628 ++++++++++++++++++++
 testsuites/validation/tc-sched-smp-edf.c           |  112 ++
 testsuites/validation/tc-sched-smp.c               | 1263 +++++++++++++++
 testsuites/validation/tc-sched-yield.c             |  845 ++++++++++
 6 files changed, 3852 insertions(+)

diff --git a/spec/build/testsuites/validation/validation-no-clock-0.yml b/spec/build/testsuites/validation/validation-no-clock-0.yml
index ef241f3..96921a8 100644
--- a/spec/build/testsuites/validation/validation-no-clock-0.yml
+++ b/spec/build/testsuites/validation/validation-no-clock-0.yml
@@ -67,6 +67,7 @@ source:
 - testsuites/validation/tc-scheduler-ident-by-processor-set.c
 - testsuites/validation/tc-scheduler-ident.c
 - testsuites/validation/tc-scheduler-remove-processor.c
+- testsuites/validation/tc-sched-yield.c
 - testsuites/validation/tc-score-fatal.c
 - testsuites/validation/tc-sem-create.c
 - testsuites/validation/tc-sem-flush.c
diff --git a/spec/build/testsuites/validation/validation-smp-only-0.yml b/spec/build/testsuites/validation/validation-smp-only-0.yml
index 4bef002..33f3b3a 100644
--- a/spec/build/testsuites/validation/validation-smp-only-0.yml
+++ b/spec/build/testsuites/validation/validation-smp-only-0.yml
@@ -17,6 +17,9 @@ source:
 - testsuites/validation/tc-bsp-interrupt-spurious.c
 - testsuites/validation/tc-intr-smp-only.c
 - testsuites/validation/tc-scheduler-smp-only.c
+- testsuites/validation/tc-sched-smp.c
+- testsuites/validation/tc-sched-smp-edf.c
+- testsuites/validation/tc-sched-smp-edf-set-affinity.c
 - testsuites/validation/tc-score-tq-smp.c
 - testsuites/validation/tc-sem-smp.c
 - testsuites/validation/tc-sem-mrsp-obtain.c
diff --git a/testsuites/validation/tc-sched-smp-edf-set-affinity.c b/testsuites/validation/tc-sched-smp-edf-set-affinity.c
new file mode 100644
index 0000000..4e5fab7
--- /dev/null
+++ b/testsuites/validation/tc-sched-smp-edf-set-affinity.c
@@ -0,0 +1,1628 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreSchedSmpEdfReqSetAffinity
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated.  If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual.  The manual is provided as a part of
+ * a release.  For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/threaddispatch.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreSchedSmpEdfReqSetAffinity \
+ *   spec:/score/sched/smp/edf/req/set-affinity
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @{
+ */
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Before_All,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Before_X,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Before_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_Before;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_After_All,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_After_X,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_After_Y,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_After_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_After;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_High,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_Low,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_Priority;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_State_Ready,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_State_Blocked,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_State_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_State;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_Yes,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_No,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_Yes,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_No,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_High,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_Low,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_All,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_X,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_Yes,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_No,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_High,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_Low,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_All,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_Y,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_Yes,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_No,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_NA
+} ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_Task,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_TaskIdle,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_Alpha,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_AlphaIdle,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_Beta,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_BetaIdle,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_NA
+} ScoreSchedSmpEdfReqSetAffinity_Post_X;
+
+typedef enum {
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_Task,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_TaskIdle,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_Alpha,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_AlphaIdle,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_Beta,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_BetaIdle,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_NA
+} ScoreSchedSmpEdfReqSetAffinity_Post_Y;
+
+typedef struct {
+  uint32_t Skip : 1;
+  uint32_t Pre_Before_NA : 1;
+  uint32_t Pre_After_NA : 1;
+  uint32_t Pre_Priority_NA : 1;
+  uint32_t Pre_State_NA : 1;
+  uint32_t Pre_Sticky_NA : 1;
+  uint32_t Pre_Pinned_NA : 1;
+  uint32_t Pre_AlphaPriority_NA : 1;
+  uint32_t Pre_AlphaAffinity_NA : 1;
+  uint32_t Pre_AlphaIdle_NA : 1;
+  uint32_t Pre_BetaPriority_NA : 1;
+  uint32_t Pre_BetaAffinity_NA : 1;
+  uint32_t Pre_BetaIdle_NA : 1;
+  uint32_t Post_X : 3;
+  uint32_t Post_Y : 3;
+} ScoreSchedSmpEdfReqSetAffinity_Entry;
+
+/**
+ * @brief Test context for spec:/score/sched/smp/edf/req/set-affinity test
+ *   case.
+ */
+typedef struct {
+  /**
+   * @brief This member contains the thread queue test context.
+   */
+  TQContext tq_ctx;
+
+  /**
+   * @brief This member specifies the task affinity before changing the
+   *   affinity.
+   */
+  cpu_set_t task_affinity_before;
+
+  /**
+   * @brief This member specifies the task affinity after changing the
+   *   affinity.
+   */
+  cpu_set_t task_affinity_after;
+
+  /**
+   * @brief This member specifies the priority of the task.
+   */
+  rtems_task_priority task_priority;
+
+  /**
+   * @brief If this member is true, then the task state shall be ready.
+   */
+  bool task_ready;
+
+  /**
+   * @brief If this member is true, then the task shall have obtained a sticky
+   *   mutex.
+   */
+  bool task_sticky;
+
+  /**
+   * @brief If this member is true, then the task shall be pinned.
+   */
+  bool task_pinned;
+
+  /**
+   * @brief This member specifies the priority of the alpha task.
+   */
+  rtems_task_priority alpha_priority;
+
+  /**
+   * @brief This member specifies the affinity of the alpha task.
+   */
+  cpu_set_t alpha_affinity;
+
+  /**
+   * @brief If this member is true, then an idle task shall execute on behalf
+   *   of the alpha task.
+   */
+  bool alpha_idle;
+
+  /**
+   * @brief This member specifies the priority of the beta task.
+   */
+  rtems_task_priority beta_priority;
+
+  /**
+   * @brief This member specifies the affinity of the beta task.
+   */
+  cpu_set_t beta_affinity;
+
+  /**
+   * @brief If this member is true, then an idle task shall execute on behalf
+   *   of the beta task.
+   */
+  bool beta_idle;
+
+  struct {
+    /**
+     * @brief This member defines the pre-condition states for the next action.
+     */
+    size_t pcs[ 12 ];
+
+    /**
+     * @brief If this member is true, then the test action loop is executed.
+     */
+    bool in_action_loop;
+
+    /**
+     * @brief This member contains the next transition map index.
+     */
+    size_t index;
+
+    /**
+     * @brief This member contains the current transition map entry.
+     */
+    ScoreSchedSmpEdfReqSetAffinity_Entry entry;
+
+    /**
+     * @brief If this member is true, then the current transition variant
+     *   should be skipped.
+     */
+    bool skip;
+  } Map;
+} ScoreSchedSmpEdfReqSetAffinity_Context;
+
+static ScoreSchedSmpEdfReqSetAffinity_Context
+  ScoreSchedSmpEdfReqSetAffinity_Instance;
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_Before[] = {
+  "All",
+  "X",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_After[] = {
+  "All",
+  "X",
+  "Y",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_Priority[] = {
+  "High",
+  "Low",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_State[] = {
+  "Ready",
+  "Blocked",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_Sticky[] = {
+  "Yes",
+  "No",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_Pinned[] = {
+  "Yes",
+  "No",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_AlphaPriority[] = {
+  "High",
+  "Low",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_AlphaAffinity[] = {
+  "All",
+  "X",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_AlphaIdle[] = {
+  "Yes",
+  "No",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_BetaPriority[] = {
+  "High",
+  "Low",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_BetaAffinity[] = {
+  "All",
+  "Y",
+  "NA"
+};
+
+static const char * const ScoreSchedSmpEdfReqSetAffinity_PreDesc_BetaIdle[] = {
+  "Yes",
+  "No",
+  "NA"
+};
+
+static const char * const * const ScoreSchedSmpEdfReqSetAffinity_PreDesc[] = {
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_Before,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_After,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_Priority,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_State,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_Sticky,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_Pinned,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_AlphaPriority,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_AlphaAffinity,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_AlphaIdle,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_BetaPriority,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_BetaAffinity,
+  ScoreSchedSmpEdfReqSetAffinity_PreDesc_BetaIdle,
+  NULL
+};
+
+#define TASK TQ_BLOCKER_C
+
+#define ALPHA TQ_BLOCKER_A
+
+#define BETA TQ_BLOCKER_B
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_Before_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context   *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Before state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Before_All: {
+      /*
+       * While task ``T`` is affine to all processors of its home scheduler
+       * before the new thread to processor affinity is set.
+       */
+      CPU_FILL( &ctx->task_affinity_before );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Before_X: {
+      /*
+       * While task ``T`` is affine to processor ``X`` before the new thread to
+       * processor affinity is set.
+       */
+      CPU_ZERO( &ctx->task_affinity_before );
+      CPU_SET( 0, &ctx->task_affinity_before );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Before_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_After_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context  *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_After state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_After_All: {
+      /*
+       * While task ``T`` is set to be affine to all processors of its home
+       * scheduler.
+       */
+      CPU_FILL( &ctx->task_affinity_after );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_After_X: {
+      /*
+       * While task ``T`` is set to be affine to processor ``X``.
+       */
+      CPU_ZERO( &ctx->task_affinity_after );
+      CPU_SET( 0, &ctx->task_affinity_after );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_After_Y: {
+      /*
+       * While task ``T`` is set to be affine to processor ``Y``.
+       */
+      CPU_ZERO( &ctx->task_affinity_after );
+      CPU_SET( 1, &ctx->task_affinity_after );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_After_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context     *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Priority state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_High: {
+      /*
+       * While task ``T`` has a high priority.
+       */
+      ctx->task_priority = PRIO_HIGH;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_Low: {
+      /*
+       * While task ``T`` has a low priority.
+       */
+      ctx->task_priority = PRIO_NORMAL;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_State_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context  *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_State state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_State_Ready: {
+      /*
+       * While task ``T`` is ready.
+       */
+      ctx->task_ready = true;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_State_Blocked: {
+      /*
+       * While task ``T`` is blocked.
+       */
+      ctx->task_ready = false;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_State_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context   *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_Yes: {
+      /*
+       * While task ``T`` is sticky.
+       */
+      ctx->task_sticky = true;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_No: {
+      /*
+       * While task ``T`` is not sticky.
+       */
+      ctx->task_sticky = false;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context   *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_Yes: {
+      /*
+       * While task ``T`` is pinned to a processor.
+       */
+      ctx->task_pinned = true;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_No: {
+      /*
+       * While task ``T`` is not pinned to a processor.
+       */
+      ctx->task_pinned = false;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context          *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_High: {
+      /*
+       * While task ``A`` has a high priority.
+       */
+      ctx->alpha_priority = PRIO_HIGH;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_Low: {
+      /*
+       * While task ``A`` has a low priority.
+       */
+      ctx->alpha_priority = PRIO_NORMAL;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context          *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_All: {
+      /*
+       * While task ``A`` is affine to all processors of its home scheduler.
+       */
+      CPU_FILL( &ctx->alpha_affinity );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_X: {
+      /*
+       * While task ``A`` is affine to processor ``X``.
+       */
+      CPU_ZERO( &ctx->alpha_affinity );
+      CPU_SET( 0, &ctx->alpha_affinity );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context      *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_Yes: {
+      /*
+       * While task ``A`` is sticky, while task ``A`` is blocked.
+       */
+      ctx->alpha_idle = true;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_No: {
+      /*
+       * While task ``A`` is not sticky.
+       */
+      ctx->alpha_idle = false;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context         *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_High: {
+      /*
+       * While task ``B`` has a high priority.
+       */
+      ctx->beta_priority = PRIO_HIGH;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_Low: {
+      /*
+       * While task ``B`` has a low priority.
+       */
+      ctx->beta_priority = PRIO_NORMAL;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context         *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_All: {
+      /*
+       * While task ``B`` is affine to all processors of its home scheduler.
+       */
+      CPU_FILL( &ctx->beta_affinity );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_Y: {
+      /*
+       * While task ``B`` is affine to processor ``Y``.
+       */
+      CPU_ZERO( &ctx->beta_affinity );
+      CPU_SET( 1, &ctx->beta_affinity );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_Prepare(
+  ScoreSchedSmpEdfReqSetAffinity_Context     *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle state
+)
+{
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_Yes: {
+      /*
+       * While task ``B`` is sticky, while task ``B`` is blocked.
+       */
+      ctx->beta_idle = true;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_No: {
+      /*
+       * While task ``B`` is not sticky,
+       */
+      ctx->beta_idle = false;
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Post_X_Check(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Post_X   state
+)
+{
+  const Per_CPU_Control *cpu;
+  const Thread_Control  *scheduled;
+  const Scheduler_Node  *scheduler_node;
+
+  cpu = _Per_CPU_Get_by_index( 0 );
+  scheduled = cpu->heir;
+
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_Task: {
+      /*
+       * The task ``T`` shall be scheduled on processor ``X``.
+       */
+      T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ TASK ] );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_TaskIdle: {
+      /*
+       * An idle task on behalf of task ``T`` shall be scheduled on processor
+       * ``X``.
+       */
+      T_true( scheduled->is_idle );
+      scheduler_node = _Thread_Scheduler_get_home_node(
+        ctx->tq_ctx.worker_tcb[ TASK ]
+      );
+      T_eq_ptr( scheduler_node->user, scheduled );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_Alpha: {
+      /*
+       * The task ``A`` shall be scheduled on processor ``X``.
+       */
+      T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ ALPHA ] );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_AlphaIdle: {
+      /*
+       * An idle task on behalf of task ``A`` shall be scheduled on processor
+       * ``X``.
+       */
+      T_true( scheduled->is_idle );
+      scheduler_node = _Thread_Scheduler_get_home_node(
+        ctx->tq_ctx.worker_tcb[ ALPHA ]
+      );
+      T_eq_ptr( scheduler_node->user, scheduled );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_Beta: {
+      /*
+       * The task ``B`` shall be scheduled on processor ``X``.
+       */
+      T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ BETA ] );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_BetaIdle: {
+      /*
+       * An idle task on behalf of task ``B`` shall be scheduled on processor
+       * ``X``.
+       */
+      T_true( scheduled->is_idle );
+      scheduler_node = _Thread_Scheduler_get_home_node(
+        ctx->tq_ctx.worker_tcb[ BETA ]
+      );
+      T_eq_ptr( scheduler_node->user, scheduled );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_X_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Post_Y_Check(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx,
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y   state
+)
+{
+  const Per_CPU_Control *cpu;
+  const Thread_Control  *scheduled;
+  const Scheduler_Node  *scheduler_node;
+
+  cpu = _Per_CPU_Get_by_index( 1 );
+  scheduled = cpu->heir;
+
+  switch ( state ) {
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_Task: {
+      /*
+       * The task ``T`` shall be scheduled on processor ``Y``.
+       */
+      T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ TASK ] );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_TaskIdle: {
+      /*
+       * An idle task on behalf of task ``T`` shall be scheduled on processor
+       * ``Y``.
+       */
+      T_true( scheduled->is_idle );
+      scheduler_node = _Thread_Scheduler_get_home_node(
+        ctx->tq_ctx.worker_tcb[ TASK ]
+      );
+      T_eq_ptr( scheduler_node->user, scheduled );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_Alpha: {
+      /*
+       * The task ``A`` shall be scheduled on processor ``Y``.
+       */
+      T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ ALPHA ] );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_AlphaIdle: {
+      /*
+       * An idle task on behalf of task ``A`` shall be scheduled on processor
+       * ``Y``.
+       */
+      T_true( scheduled->is_idle );
+      scheduler_node = _Thread_Scheduler_get_home_node(
+        ctx->tq_ctx.worker_tcb[ ALPHA ]
+      );
+      T_eq_ptr( scheduler_node->user, scheduled );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_Beta: {
+      /*
+       * The task ``B`` shall be scheduled on processor ``Y``.
+       */
+      T_eq_ptr( scheduled, ctx->tq_ctx.worker_tcb[ BETA ] );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_BetaIdle: {
+      /*
+       * An idle task on behalf of task ``B`` shall be scheduled on processor
+       * ``Y``.
+       */
+      T_true( scheduled->is_idle );
+      scheduler_node = _Thread_Scheduler_get_home_node(
+        ctx->tq_ctx.worker_tcb[ BETA ]
+      );
+      T_eq_ptr( scheduler_node->user, scheduled );
+      break;
+    }
+
+    case ScoreSchedSmpEdfReqSetAffinity_Post_Y_NA:
+      break;
+  }
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Setup(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx
+)
+{
+  rtems_status_code sc;
+  rtems_id          mutex_a;
+  rtems_id          mutex_b;
+  rtems_id          mutex_c;
+
+  memset( ctx, 0, sizeof( *ctx ) );
+  ctx->tq_ctx.deadlock = TQ_DEADLOCK_STATUS;
+  ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+  ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+  ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+  ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+  ctx->tq_ctx.get_owner = TQGetOwnerClassicSem;
+  ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+  TQInitialize( &ctx->tq_ctx );
+
+  DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_A ] );
+  DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ] );
+  DeleteMutex( ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ] );
+
+  mutex_a = 0;
+  sc = rtems_semaphore_create(
+    rtems_build_name( 'M', 'T', 'X', 'A' ),
+    1,
+    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+    PRIO_LOW,
+    &mutex_a
+  );
+  T_rsc_success( sc );
+
+  mutex_b = 0;
+  sc = rtems_semaphore_create(
+    rtems_build_name( 'M', 'T', 'X', 'B' ),
+    1,
+    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+    PRIO_VERY_LOW,
+    &mutex_b
+  );
+  T_rsc_success( sc );
+
+  mutex_c = 0;
+  sc = rtems_semaphore_create(
+    rtems_build_name( 'M', 'T', 'X', 'C' ),
+    1,
+    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+    PRIO_NORMAL,
+    &mutex_c
+  );
+  T_rsc_success( sc );
+
+  ctx->tq_ctx.mutex_id[ TQ_MUTEX_A ] = mutex_a;
+  ctx->tq_ctx.mutex_id[ TQ_MUTEX_B ] = mutex_b;
+  ctx->tq_ctx.mutex_id[ TQ_MUTEX_C ] = mutex_c;
+
+  RemoveProcessor( SCHEDULER_B_ID, 1 );
+  AddProcessor( SCHEDULER_A_ID, 1 );
+
+  TQSetPriority( &ctx->tq_ctx, TASK, PRIO_NORMAL );
+  TQSetPriority( &ctx->tq_ctx, ALPHA, PRIO_LOW );
+  TQSetPriority( &ctx->tq_ctx, BETA, PRIO_VERY_LOW );
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Setup_Wrap( void *arg )
+{
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx;
+
+  ctx = arg;
+  ctx->Map.in_action_loop = false;
+  ScoreSchedSmpEdfReqSetAffinity_Setup( ctx );
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Teardown(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx
+)
+{
+  SetSelfAffinityAll();
+  TQDestroy( &ctx->tq_ctx );
+  RemoveProcessor( SCHEDULER_A_ID, 1 );
+  AddProcessor( SCHEDULER_B_ID, 1 );
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Teardown_Wrap( void *arg )
+{
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx;
+
+  ctx = arg;
+  ctx->Map.in_action_loop = false;
+  ScoreSchedSmpEdfReqSetAffinity_Teardown( ctx );
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Action(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx
+)
+{
+  rtems_event_set events;
+
+  SetSelfPriority( PRIO_ULTRA_HIGH );
+  SetSelfAffinityAll();
+
+  if ( ctx->beta_idle ) {
+    events = TQ_EVENT_MUTEX_B_OBTAIN;
+    TQSendAndWaitForExecutionStop( &ctx->tq_ctx, BETA, events );
+  } else {
+    ctx->tq_ctx.busy_wait[ BETA ] = true;
+    events = TQ_EVENT_BUSY_WAIT;
+    TQSendAndSynchronizeRunner( &ctx->tq_ctx, BETA, events );
+  }
+
+  if ( ctx->alpha_idle ) {
+    events = TQ_EVENT_MUTEX_A_OBTAIN;
+    TQSendAndWaitForExecutionStop( &ctx->tq_ctx, ALPHA, events );
+  } else {
+    ctx->tq_ctx.busy_wait[ ALPHA ] = true;
+    events = TQ_EVENT_BUSY_WAIT;
+    TQSendAndSynchronizeRunner( &ctx->tq_ctx, ALPHA, events );
+  }
+
+  if ( ctx->task_pinned ) {
+    SetSelfAffinityOne( 1 );
+    TQSendAndSynchronizeRunner( &ctx->tq_ctx, TASK, TQ_EVENT_PIN );
+    SetSelfAffinityAll();
+  }
+
+  if ( ctx->task_ready ) {
+    ctx->tq_ctx.busy_wait[ TASK ] = true;
+    events = TQ_EVENT_BUSY_WAIT;
+  } else {
+    events = 0;
+  }
+
+  if ( ctx->task_sticky ) {
+    events |= TQ_EVENT_MUTEX_C_OBTAIN;
+  }
+
+  TQSendAndSynchronizeRunner( &ctx->tq_ctx, TASK, events );
+
+  if ( !ctx->task_ready ) {
+    TQWaitForExecutionStop( &ctx->tq_ctx, TASK );
+  }
+
+  (void) _Thread_Dispatch_disable();
+
+  SetAffinity( ctx->tq_ctx.worker_id[ TASK ], &ctx->task_affinity_before );
+  SetAffinity( ctx->tq_ctx.worker_id[ ALPHA ], &ctx->alpha_affinity );
+  SetAffinity( ctx->tq_ctx.worker_id[ BETA ], &ctx->beta_affinity );
+  SetSelfAffinityOne( 1 );
+  TQSetPriority( &ctx->tq_ctx, TASK, ctx->task_priority );
+  SetSelfPriority( PRIO_ULTRA_LOW );
+  TQSetPriority( &ctx->tq_ctx, ALPHA, ctx->alpha_priority );
+  TQSetPriority( &ctx->tq_ctx, BETA, ctx->beta_priority );
+
+  SetAffinity( ctx->tq_ctx.worker_id[ TASK ], &ctx->task_affinity_after );
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_Cleanup(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx
+)
+{
+  rtems_event_set events;
+
+  SetSelfPriority( PRIO_ULTRA_HIGH );
+  _Thread_Dispatch_enable( _Per_CPU_Get() );
+
+  SetSelfAffinityAll();
+
+  ctx->tq_ctx.busy_wait[ TASK ] = false;
+  ctx->tq_ctx.busy_wait[ ALPHA ] = false;
+  ctx->tq_ctx.busy_wait[ BETA ] = false;
+
+  TQSetPriority( &ctx->tq_ctx, TASK, PRIO_NORMAL );
+  TQSetPriority( &ctx->tq_ctx, ALPHA, PRIO_LOW );
+  TQSetPriority( &ctx->tq_ctx, BETA, PRIO_VERY_LOW );
+
+  if ( ctx->task_sticky ) {
+    events = TQ_EVENT_MUTEX_C_RELEASE;
+  } else {
+    events = 0;
+  }
+
+  if ( ctx->task_pinned ) {
+    events |= TQ_EVENT_UNPIN;
+  }
+
+  if ( events != 0 ) {
+    TQSendAndWaitForExecutionStop( &ctx->tq_ctx, TASK, events );
+  } else {
+    TQWaitForExecutionStop( &ctx->tq_ctx, TASK );
+  }
+
+  SetAffinityAll( ctx->tq_ctx.worker_id[ TASK ] );
+  SetAffinityAll( ctx->tq_ctx.worker_id[ ALPHA ] );
+
+  if ( ctx->alpha_idle ) {
+    events = TQ_EVENT_MUTEX_A_RELEASE;
+  } else {
+    events = 0;
+  }
+
+  if ( events != 0 ) {
+    TQSendAndWaitForExecutionStop( &ctx->tq_ctx, ALPHA, events );
+  } else {
+    TQWaitForExecutionStop( &ctx->tq_ctx, ALPHA );
+  }
+
+  SetAffinityAll( ctx->tq_ctx.worker_id[ BETA ] );
+
+  if ( ctx->beta_idle ) {
+    events = TQ_EVENT_MUTEX_B_RELEASE;
+  } else {
+    events = 0;
+  }
+
+  if ( events != 0 ) {
+    TQSendAndWaitForExecutionStop( &ctx->tq_ctx, BETA, events );
+  } else {
+    TQWaitForExecutionStop( &ctx->tq_ctx, BETA );
+  }
+}
+
+static const ScoreSchedSmpEdfReqSetAffinity_Entry
+ScoreSchedSmpEdfReqSetAffinity_Entries[] = {
+  { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_NA,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_NA },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_AlphaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_BetaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_AlphaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Beta },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Alpha,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_BetaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Alpha,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Beta },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Task,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_BetaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Task,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Beta },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Task,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_AlphaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Task,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Alpha },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_AlphaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Task },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Alpha,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Task },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_BetaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_AlphaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Beta,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_AlphaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_BetaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Alpha },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Beta,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Alpha },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_AlphaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_TaskIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Alpha,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_TaskIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_TaskIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_BetaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_TaskIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Beta },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_TaskIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_AlphaIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_TaskIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Alpha },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_BetaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Task },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Beta,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_Task },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_BetaIdle,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_TaskIdle },
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    ScoreSchedSmpEdfReqSetAffinity_Post_X_Beta,
+    ScoreSchedSmpEdfReqSetAffinity_Post_Y_TaskIdle }
+};
+
+static const uint8_t
+ScoreSchedSmpEdfReqSetAffinity_Map[] = {
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 7, 7, 9, 9, 13, 14, 3, 4,
+  8, 8, 10, 10, 1, 2, 1, 2, 9, 9, 9, 9, 3, 4, 3, 4, 10, 10, 10, 10, 5, 6, 5, 6,
+  7, 7, 9, 9, 5, 6, 5, 6, 8, 8, 10, 10, 5, 6, 5, 6, 9, 9, 9, 9, 5, 6, 5, 6, 10,
+  10, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6,
+  5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8,
+  5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 7, 7, 9, 9, 13,
+  14, 3, 4, 8, 8, 10, 10, 1, 2, 1, 2, 9, 9, 9, 9, 3, 4, 3, 4, 10, 10, 10, 10,
+  5, 6, 5, 6, 7, 7, 9, 9, 5, 6, 5, 6, 8, 8, 10, 10, 5, 6, 5, 6, 9, 9, 9, 9, 5,
+  6, 5, 6, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2,
+  19, 19, 15, 15, 13, 14, 3, 4, 20, 20, 16, 16, 1, 2, 1, 2, 15, 15, 15, 15, 3,
+  4, 3, 4, 16, 16, 16, 16, 17, 18, 17, 18, 19, 19, 15, 15, 17, 18, 17, 18, 20,
+  20, 16, 16, 17, 18, 17, 18, 15, 15, 15, 15, 17, 18, 17, 18, 16, 16, 16, 16,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13,
+  14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12,
+  1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3,
+  4, 11, 12, 1, 2, 7, 7, 7, 7, 13, 14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2, 1,
+  2, 3, 4, 3, 4, 3, 4, 3, 4, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5,
+  6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 11, 12, 1, 2, 13,
+  14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2,
+  1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12,
+  1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 7,
+  7, 7, 7, 13, 14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6,
+  5, 6, 5, 6, 5, 6, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 5, 6,
+  5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6,
+  8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 7,
+  7, 7, 7, 13, 14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6,
+  5, 6, 5, 6, 5, 6, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 19,
+  19, 19, 19, 13, 14, 3, 4, 20, 20, 20, 20, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4,
+  3, 4, 3, 4, 17, 18, 17, 18, 19, 19, 19, 19, 17, 18, 17, 18, 20, 20, 20, 20,
+  17, 18, 17, 18, 17, 18, 17, 18, 17, 18, 17, 18, 17, 18, 17, 18, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2,
+  1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4,
+  3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1,
+  2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4,
+  3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 11, 12,
+  1, 2, 7, 7, 7, 7, 13, 14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3,
+  4, 3, 4, 3, 4, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5,
+  6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4,
+  13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11,
+  12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14,
+  3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1,
+  2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3,
+  4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1,
+  2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3,
+  4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3,
+  4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1,
+  2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3,
+  4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 9, 9, 9, 9, 13,
+  14, 3, 4, 10, 10, 10, 10, 1, 2, 1, 2, 9, 9, 9, 9, 3, 4, 3, 4, 10, 10, 10, 10,
+  21, 22, 1, 2, 9, 9, 9, 9, 21, 22, 3, 4, 10, 10, 10, 10, 21, 22, 1, 2, 9, 9,
+  9, 9, 21, 22, 3, 4, 10, 10, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8,
+  8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 7, 7, 7, 7,
+  5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11,
+  12, 1, 2, 9, 9, 9, 9, 13, 14, 3, 4, 10, 10, 10, 10, 1, 2, 1, 2, 9, 9, 9, 9,
+  3, 4, 3, 4, 10, 10, 10, 10, 21, 22, 1, 2, 9, 9, 9, 9, 21, 22, 3, 4, 10, 10,
+  10, 10, 21, 22, 1, 2, 9, 9, 9, 9, 21, 22, 3, 4, 10, 10, 10, 10, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 15, 15, 15, 15, 13, 14, 3, 4, 16, 16,
+  16, 16, 1, 2, 1, 2, 15, 15, 15, 15, 3, 4, 3, 4, 16, 16, 16, 16, 23, 24, 1, 2,
+  15, 15, 15, 15, 23, 24, 3, 4, 16, 16, 16, 16, 23, 24, 1, 2, 15, 15, 15, 15,
+  23, 24, 3, 4, 16, 16, 16, 16, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11,
+  12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3,
+  4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1,
+  2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 11, 12, 1, 2, 7, 7, 7, 7, 13, 14, 3, 4, 8,
+  8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 5, 6, 5, 6, 7, 7, 7,
+  7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6,
+  11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1,
+  2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3,
+  4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13,
+  14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 11, 12, 1, 2, 7, 7, 9, 9, 13, 14, 3, 4, 8, 8, 10, 10, 1, 2, 1, 2,
+  9, 9, 9, 9, 3, 4, 3, 4, 10, 10, 10, 10, 5, 6, 5, 6, 7, 7, 9, 9, 5, 6, 5, 6,
+  8, 8, 10, 10, 5, 6, 5, 6, 9, 9, 9, 9, 5, 6, 5, 6, 10, 10, 10, 10, 7, 7, 7, 7,
+  7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6,
+  5, 6, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6,
+  5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 7, 7, 9, 9, 13, 14, 3, 4, 8, 8, 10, 10,
+  1, 2, 1, 2, 9, 9, 9, 9, 3, 4, 3, 4, 10, 10, 10, 10, 5, 6, 5, 6, 7, 7, 9, 9,
+  5, 6, 5, 6, 8, 8, 10, 10, 5, 6, 5, 6, 9, 9, 9, 9, 5, 6, 5, 6, 10, 10, 10, 10,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 19, 19, 15, 15, 13, 14, 3,
+  4, 20, 20, 16, 16, 1, 2, 1, 2, 15, 15, 15, 15, 3, 4, 3, 4, 16, 16, 16, 16,
+  17, 18, 17, 18, 19, 19, 15, 15, 17, 18, 17, 18, 20, 20, 16, 16, 17, 18, 17,
+  18, 15, 15, 15, 15, 17, 18, 17, 18, 16, 16, 16, 16, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2,
+  1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4,
+  3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1,
+  2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13,
+  14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 11, 12, 1, 2, 7, 7,
+  7, 7, 13, 14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3,
+  4, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5,
+  6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3,
+  4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13,
+  14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12,
+  1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3,
+  4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3,
+  4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1,
+  2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3,
+  4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1,
+  2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 7, 7, 7, 7, 13, 14, 3, 4, 8,
+  8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 5, 6, 5, 6, 7, 7, 7,
+  7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 7,
+  7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5,
+  6, 5, 6, 5, 6, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5,
+  6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 7, 7, 7, 7, 13, 14, 3, 4, 8,
+  8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 5, 6, 5, 6, 7, 7, 7,
+  7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 19, 19, 19, 19, 13, 14, 3, 4,
+  20, 20, 20, 20, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 17, 18, 17,
+  18, 19, 19, 19, 19, 17, 18, 17, 18, 20, 20, 20, 20, 17, 18, 17, 18, 17, 18,
+  17, 18, 17, 18, 17, 18, 17, 18, 17, 18, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4,
+  3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2,
+  1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11,
+  12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 11, 12, 1, 2, 7, 7, 7, 7, 13,
+  14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 5, 6,
+  5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6,
+  5, 6, 5, 6, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1,
+  2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4,
+  13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3,
+  4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2,
+  1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4,
+  3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
+  3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 9, 9, 9, 9, 13, 14, 3, 4, 10, 10, 10,
+  10, 1, 2, 1, 2, 9, 9, 9, 9, 3, 4, 3, 4, 10, 10, 10, 10, 21, 22, 1, 2, 9, 9,
+  9, 9, 21, 22, 3, 4, 10, 10, 10, 10, 21, 22, 1, 2, 9, 9, 9, 9, 21, 22, 3, 4,
+  10, 10, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 5, 6, 5, 6,
+  5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8,
+  8, 8, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 9, 9, 9,
+  9, 13, 14, 3, 4, 10, 10, 10, 10, 1, 2, 1, 2, 9, 9, 9, 9, 3, 4, 3, 4, 10, 10,
+  10, 10, 21, 22, 1, 2, 9, 9, 9, 9, 21, 22, 3, 4, 10, 10, 10, 10, 21, 22, 1, 2,
+  9, 9, 9, 9, 21, 22, 3, 4, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  11, 12, 1, 2, 15, 15, 15, 15, 13, 14, 3, 4, 16, 16, 16, 16, 1, 2, 1, 2, 15,
+  15, 15, 15, 3, 4, 3, 4, 16, 16, 16, 16, 23, 24, 1, 2, 15, 15, 15, 15, 23, 24,
+  3, 4, 16, 16, 16, 16, 23, 24, 1, 2, 15, 15, 15, 15, 23, 24, 3, 4, 16, 16, 16,
+  16, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2,
+  1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4,
+  3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11, 12, 1, 2, 13, 14, 3, 4,
+  13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 11,
+  12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4,
+  3, 4, 11, 12, 1, 2, 7, 7, 7, 7, 13, 14, 3, 4, 8, 8, 8, 8, 1, 2, 1, 2, 1, 2,
+  1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 5, 6, 5, 6, 7, 7, 7, 7, 5, 6, 5, 6, 8, 8, 8, 8,
+  5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 11, 12, 1, 2, 11, 12, 1, 2,
+  13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4,
+  1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2,
+  3, 4, 3, 4, 3, 4, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 1, 2, 11,
+  12, 1, 2, 13, 14, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3,
+  4, 3, 4, 1, 2, 1, 2, 11, 12, 1, 2, 3, 4, 3, 4, 13, 14, 3, 4, 1, 2, 1, 2, 1,
+  2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3,
+  4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3,
+  4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1,
+  2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3,
+  4, 3, 4, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 1,
+  2, 3, 4, 3, 4, 3, 4, 3, 4
+};
+
+static size_t ScoreSchedSmpEdfReqSetAffinity_Scope(
+  void  *arg,
+  char  *buf,
+  size_t n
+)
+{
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx;
+
+  ctx = arg;
+
+  if ( ctx->Map.in_action_loop ) {
+    return T_get_scope(
+      ScoreSchedSmpEdfReqSetAffinity_PreDesc,
+      buf,
+      n,
+      ctx->Map.pcs
+    );
+  }
+
+  return 0;
+}
+
+static T_fixture ScoreSchedSmpEdfReqSetAffinity_Fixture = {
+  .setup = ScoreSchedSmpEdfReqSetAffinity_Setup_Wrap,
+  .stop = NULL,
+  .teardown = ScoreSchedSmpEdfReqSetAffinity_Teardown_Wrap,
+  .scope = ScoreSchedSmpEdfReqSetAffinity_Scope,
+  .initial_context = &ScoreSchedSmpEdfReqSetAffinity_Instance
+};
+
+static inline ScoreSchedSmpEdfReqSetAffinity_Entry
+ScoreSchedSmpEdfReqSetAffinity_PopEntry(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx
+)
+{
+  size_t index;
+
+  index = ctx->Map.index;
+  ctx->Map.index = index + 1;
+  return ScoreSchedSmpEdfReqSetAffinity_Entries[
+    ScoreSchedSmpEdfReqSetAffinity_Map[ index ]
+  ];
+}
+
+static void ScoreSchedSmpEdfReqSetAffinity_TestVariant(
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx
+)
+{
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Before_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_After_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_Prepare(
+    ctx,
+    ctx->Map.pcs[ 2 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_State_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_Prepare( ctx, ctx->Map.pcs[ 5 ] );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_Prepare(
+    ctx,
+    ctx->Map.pcs[ 6 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_Prepare(
+    ctx,
+    ctx->Map.pcs[ 7 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_Prepare(
+    ctx,
+    ctx->Map.pcs[ 8 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_Prepare(
+    ctx,
+    ctx->Map.pcs[ 9 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_Prepare(
+    ctx,
+    ctx->Map.pcs[ 10 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_Prepare(
+    ctx,
+    ctx->Map.pcs[ 11 ]
+  );
+  ScoreSchedSmpEdfReqSetAffinity_Action( ctx );
+  ScoreSchedSmpEdfReqSetAffinity_Post_X_Check( ctx, ctx->Map.entry.Post_X );
+  ScoreSchedSmpEdfReqSetAffinity_Post_Y_Check( ctx, ctx->Map.entry.Post_Y );
+}
+
+/**
+ * @fn void T_case_body_ScoreSchedSmpEdfReqSetAffinity( void )
+ */
+T_TEST_CASE_FIXTURE(
+  ScoreSchedSmpEdfReqSetAffinity,
+  &ScoreSchedSmpEdfReqSetAffinity_Fixture
+)
+{
+  ScoreSchedSmpEdfReqSetAffinity_Context *ctx;
+
+  ctx = T_fixture_context();
+  ctx->Map.in_action_loop = true;
+  ctx->Map.index = 0;
+
+  for (
+    ctx->Map.pcs[ 0 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_Before_All;
+    ctx->Map.pcs[ 0 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_Before_NA;
+    ++ctx->Map.pcs[ 0 ]
+  ) {
+    for (
+      ctx->Map.pcs[ 1 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_After_All;
+      ctx->Map.pcs[ 1 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_After_NA;
+      ++ctx->Map.pcs[ 1 ]
+    ) {
+      for (
+        ctx->Map.pcs[ 2 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_High;
+        ctx->Map.pcs[ 2 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_Priority_NA;
+        ++ctx->Map.pcs[ 2 ]
+      ) {
+        for (
+          ctx->Map.pcs[ 3 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_State_Ready;
+          ctx->Map.pcs[ 3 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_State_NA;
+          ++ctx->Map.pcs[ 3 ]
+        ) {
+          for (
+            ctx->Map.pcs[ 4 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_Yes;
+            ctx->Map.pcs[ 4 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_Sticky_NA;
+            ++ctx->Map.pcs[ 4 ]
+          ) {
+            for (
+              ctx->Map.pcs[ 5 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_Yes;
+              ctx->Map.pcs[ 5 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_Pinned_NA;
+              ++ctx->Map.pcs[ 5 ]
+            ) {
+              for (
+                ctx->Map.pcs[ 6 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_High;
+                ctx->Map.pcs[ 6 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaPriority_NA;
+                ++ctx->Map.pcs[ 6 ]
+              ) {
+                for (
+                  ctx->Map.pcs[ 7 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_All;
+                  ctx->Map.pcs[ 7 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaAffinity_NA;
+                  ++ctx->Map.pcs[ 7 ]
+                ) {
+                  for (
+                    ctx->Map.pcs[ 8 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_Yes;
+                    ctx->Map.pcs[ 8 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_AlphaIdle_NA;
+                    ++ctx->Map.pcs[ 8 ]
+                  ) {
+                    for (
+                      ctx->Map.pcs[ 9 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_High;
+                      ctx->Map.pcs[ 9 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_BetaPriority_NA;
+                      ++ctx->Map.pcs[ 9 ]
+                    ) {
+                      for (
+                        ctx->Map.pcs[ 10 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_All;
+                        ctx->Map.pcs[ 10 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_BetaAffinity_NA;
+                        ++ctx->Map.pcs[ 10 ]
+                      ) {
+                        for (
+                          ctx->Map.pcs[ 11 ] = ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_Yes;
+                          ctx->Map.pcs[ 11 ] < ScoreSchedSmpEdfReqSetAffinity_Pre_BetaIdle_NA;
+                          ++ctx->Map.pcs[ 11 ]
+                        ) {
+                          ctx->Map.entry =
+                          ScoreSchedSmpEdfReqSetAffinity_PopEntry( ctx );
+
+                          if ( ctx->Map.entry.Skip ) {
+                            continue;
+                          }
+
+                          ScoreSchedSmpEdfReqSetAffinity_TestVariant( ctx );
+                          ScoreSchedSmpEdfReqSetAffinity_Cleanup( ctx );
+                        }
+                      }
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sched-smp-edf.c b/testsuites/validation/tc-sched-smp-edf.c
new file mode 100644
index 0000000..94d685b
--- /dev/null
+++ b/testsuites/validation/tc-sched-smp-edf.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreSchedSmpEdfValEdf
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated.  If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual.  The manual is provided as a part of
+ * a release.  For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreSchedSmpEdfValEdf \
+ *   spec:/score/sched/smp/edf/val/edf
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @brief Tests for operations of the EDF SMP scheduler.
+ *
+ * This test case performs the following actions:
+ *
+ * - Validate a set affinity error case with an unsupported subset.
+ *
+ * @{
+ */
+
+/**
+ * @brief Validate a set affinity error case with an unsupported subset.
+ */
+static void ScoreSchedSmpEdfValEdf_Action_0( void )
+{
+  if ( rtems_scheduler_get_processor_maximum() >= 3 ) {
+    rtems_status_code sc;
+    cpu_set_t         affinity;
+
+    CPU_ZERO( &affinity );
+    CPU_SET( 0, &affinity );
+    CPU_SET( 1, &affinity );
+
+    RemoveProcessor( SCHEDULER_B_ID, 1 );
+    RemoveProcessor( SCHEDULER_C_ID, 2 );
+    AddProcessor( SCHEDULER_A_ID, 1 );
+    AddProcessor( SCHEDULER_B_ID, 2 );
+
+    sc = rtems_task_set_affinity( RTEMS_SELF, sizeof( affinity), &affinity );
+    T_rsc( sc, RTEMS_INVALID_NUMBER );
+
+    RemoveProcessor( SCHEDULER_A_ID, 1 );
+    RemoveProcessor( SCHEDULER_B_ID, 2 );
+    AddProcessor( SCHEDULER_B_ID, 1 );
+    AddProcessor( SCHEDULER_C_ID, 2 );
+  }
+}
+
+/**
+ * @fn void T_case_body_ScoreSchedSmpEdfValEdf( void )
+ */
+T_TEST_CASE( ScoreSchedSmpEdfValEdf )
+{
+  ScoreSchedSmpEdfValEdf_Action_0();
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sched-smp.c b/testsuites/validation/tc-sched-smp.c
new file mode 100644
index 0000000..957a978
--- /dev/null
+++ b/testsuites/validation/tc-sched-smp.c
@@ -0,0 +1,1263 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreSchedSmpValSmp
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated.  If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual.  The manual is provided as a part of
+ * a release.  For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/test-scheduler.h>
+#include <rtems/score/percpu.h>
+#include <rtems/score/threadimpl.h>
+
+#include "tx-support.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreSchedSmpValSmp spec:/score/sched/smp/val/smp
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationSmpOnly0
+ *
+ * @brief Tests SMP-specific scheduler behaviour.
+ *
+ * This test case performs the following actions:
+ *
+ * - Construct a system state in which a sticky thread is blocked while an idle
+ *   thread executes on behalf of the thread.
+ *
+ *   - Block the sticky worker A while it uses an idle thread in the home
+ *     scheduler.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a thread is preempted while it is
+ *   blocked.
+ *
+ *   - Block worker A and preempt it before the withdraw node operations are
+ *     performed for worker A.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a thread is rescheduled  while it is not
+ *   scheduled on another scheduler.
+ *
+ *   - Reschedule worker A by the home scheduler while worker A is not
+ *     scheduled on another scheduler.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which an ask for help request is cancelled
+ *   while it is processed on another processor.
+ *
+ *   - Unblock worker A.  It cannot be scheduled on its home scheduler.
+ *     Intercept the ask for help request.  Block the worker A.  This will
+ *     cancel the ask for help request.  Remove the request while the other
+ *     processor tries to cancel the request.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a block operation.
+ *
+ *   - Block the runner thread while the owner thread of the highest priority
+ *     ready node is already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is blocked during a block operation.
+ *
+ *   - Block the runner thread while the owner thread of the highest priority
+ *     ready node is blocked.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a set affinity operation.
+ *
+ *   - Set the affinity of the runner thread while the owner thread of the
+ *     highest priority ready node is already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a set affinity operation
+ *   while a sticky node is involved.
+ *
+ *   - Set the affinity of the runner thread while the owner thread of the
+ *     highest priority ready node is already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is blocked during a set affinity operation.
+ *
+ *   - Set the affinity of the runner thread while the owner thread of the
+ *     highest priority ready node is blocked.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is blocked during a set affinity operation while a
+ *   sticky node is involved.
+ *
+ *   - Set the affinity of the runner thread while the owner thread of the
+ *     highest priority ready node is blocked.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a set priority operation.
+ *
+ *   - Set the priority of the runner thread while the owner thread of the
+ *     highest priority ready node is already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a set priority operation
+ *   while a sticky node is involved.
+ *
+ *   - Set the priority of the runner thread while the owner thread of the
+ *     highest priority ready node is already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is blocked during a set priority operation.
+ *
+ *   - Set the priority of the runner thread while the owner thread of the
+ *     highest priority ready node is blocked.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a yield operation.
+ *
+ *   - Yield while the owner thread of the highest priority ready node is
+ *     already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is already scheduled during a yield operation while a
+ *   sticky node is involved.
+ *
+ *   - Yield while the owner thread of the highest priority ready node is
+ *     already scheduled.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is blocked during a yield operation.
+ *
+ *   - Yield while the owner thread of the highest priority ready node is
+ *     blocked.
+ *
+ *   - Clean up all used resources.
+ *
+ * - Construct a system state in which a scheduler tries to schedule a node
+ *   those owner thread is blocked during a yield operation while a sticky node
+ *   is involved.
+ *
+ *   - Yield while the owner thread of the highest priority ready node is
+ *     blocked.
+ *
+ *   - Clean up all used resources.
+ *
+ * @{
+ */
+
+typedef enum {
+  WORKER_A,
+  WORKER_B,
+  WORKER_C,
+  WORKER_COUNT
+} WorkerIndex;
+
+/**
+ * @brief Test context for spec:/score/sched/smp/val/smp test case.
+ */
+typedef struct {
+  /**
+   * @brief This member contains the runner identifier.
+   */
+  rtems_id runner_id;
+
+  /**
+   * @brief This member contains the worker identifiers.
+   */
+  rtems_id worker_id[ WORKER_COUNT ];
+
+  /**
+   * @brief This member contains the mutex identifier.
+   */
+  rtems_id mutex_id;
+
+  /**
+   * @brief This member contains the sticky mutex identifier.
+   */
+  rtems_id sticky_id;
+
+  /**
+   * @brief This member contains the worker busy status.
+   */
+  volatile bool busy[ WORKER_COUNT ];;
+
+  /**
+   * @brief If this member is true, then the worker shall be in the busy loop.
+   */
+  volatile bool is_busy[ WORKER_COUNT ];;
+
+  /**
+   * @brief This member contains the per-CPU jobs.
+   */
+  Per_CPU_Job job[ 2 ];
+
+  /**
+   * @brief This member contains the per-CPU job contexts.
+   */
+  Per_CPU_Job_context job_context[ 2 ];
+
+  /**
+   * @brief This member contains the call within ISR request.
+   */
+  CallWithinISRRequest request;;
+} ScoreSchedSmpValSmp_Context;
+
+static ScoreSchedSmpValSmp_Context
+  ScoreSchedSmpValSmp_Instance;
+
+typedef ScoreSchedSmpValSmp_Context Context;
+
+typedef enum {
+  EVENT_OBTAIN = RTEMS_EVENT_0,
+  EVENT_RELEASE = RTEMS_EVENT_1,
+  EVENT_STICKY_OBTAIN = RTEMS_EVENT_2,
+  EVENT_STICKY_RELEASE = RTEMS_EVENT_3,
+  EVENT_SYNC_RUNNER = RTEMS_EVENT_4,
+  EVENT_BUSY = RTEMS_EVENT_5
+} Event;
+
+static void SendAndSync( Context *ctx, WorkerIndex worker, Event event )
+{
+  SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | event );
+  ReceiveAllEvents( EVENT_SYNC_RUNNER );
+  WaitForExecutionStop( ctx->worker_id[ worker ] );
+}
+
+static void MakeBusy( Context *ctx, WorkerIndex worker )
+{
+  ctx->is_busy[ worker ] = false;
+  ctx->busy[ worker ] = true;
+  SendEvents( ctx->worker_id[ worker ], EVENT_BUSY );
+}
+
+static void WaitForBusy( Context *ctx, WorkerIndex worker )
+{
+  while ( !ctx->is_busy[ worker ] ) {
+    /* Wait */
+  }
+}
+
+static void StopBusy( Context *ctx, WorkerIndex worker )
+{
+  ctx->busy[ worker ] = false;
+  WaitForExecutionStop( ctx->worker_id[ worker ] );
+}
+
+static void MakeSticky( const Context *ctx )
+{
+  ObtainMutex( ctx->sticky_id );
+}
+
+static void CleanSticky( const Context *ctx )
+{
+  ReleaseMutex( ctx->sticky_id );
+}
+
+static void Block( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  SuspendTask( ctx->runner_id );
+  ResumeTask( ctx->runner_id );
+}
+
+static void OperationStopBusyC(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when,
+  T_scheduler_operation    op
+)
+{
+  Context *ctx;
+
+  ctx = arg;
+
+  if ( when == T_SCHEDULER_BEFORE && event->operation == op ) {
+    T_scheduler_set_event_handler( NULL, NULL );
+    StopBusy( ctx, WORKER_C );
+  }
+}
+
+static void BlockStopBusyC(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationStopBusyC( arg, event, when, T_SCHEDULER_BLOCK );
+}
+
+static void SetAffinityStopBusyC(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationStopBusyC( arg, event, when, T_SCHEDULER_SET_AFFINITY );
+}
+
+static void UpdatePriorityStopBusyC(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationStopBusyC( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY );
+}
+
+static void YieldStopBusyC(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationStopBusyC( arg, event, when, T_SCHEDULER_YIELD );
+}
+
+static void SuspendA( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  SuspendTask( ctx->worker_id[ WORKER_A ] );
+}
+
+static void OperationSuspendA(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when,
+  T_scheduler_operation    op
+)
+{
+  Context *ctx;
+
+  ctx = arg;
+
+  if ( when == T_SCHEDULER_BEFORE && event->operation == op ) {
+    const rtems_tcb *worker_a;
+
+    T_scheduler_set_event_handler( NULL, NULL );
+    ctx->job_context[ 0 ].handler = SuspendA;
+    _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+
+    worker_a = GetThread( ctx->worker_id[ WORKER_A ] );
+
+    while ( worker_a->Scheduler.state != THREAD_SCHEDULER_BLOCKED ) {
+      RTEMS_COMPILER_MEMORY_BARRIER();
+    }
+  }
+}
+
+static void BlockSuspendA(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationSuspendA( arg, event, when, T_SCHEDULER_BLOCK );
+}
+
+static void SetAffinitySuspendA(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationSuspendA( arg, event, when, T_SCHEDULER_SET_AFFINITY );
+}
+
+static void UpdatePrioritySuspendA(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationSuspendA( arg, event, when, T_SCHEDULER_UPDATE_PRIORITY );
+}
+
+static void YieldSuspendA(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  OperationSuspendA( arg, event, when, T_SCHEDULER_YIELD );
+}
+
+static void GuideAskForHelp( void *arg )
+{
+  Context         *ctx;
+  Per_CPU_Control *cpu;
+  ISR_lock_Context lock_context;
+
+  ctx = arg;
+  cpu = _Per_CPU_Get_by_index( 0 );
+
+  _ISR_lock_ISR_disable( &lock_context );
+  _Per_CPU_Acquire( cpu, &lock_context );
+
+  ISRLockWaitForOthers( &cpu->Lock, 1 );
+
+  ctx->job_context[ 0 ].handler = SuspendA;
+  _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+  ISRLockWaitForOthers( &cpu->Lock, 2 );
+
+  _Per_CPU_Release( cpu, &lock_context );
+  _ISR_lock_ISR_enable( &lock_context );
+}
+
+static void InterceptAskForHelp( void *arg )
+{
+  Context         *ctx;
+  Per_CPU_Control *cpu_self;
+
+  ctx = arg;
+  cpu_self = _Per_CPU_Get();
+
+  if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+    ctx->job_context[ 1 ].handler = GuideAskForHelp;
+    _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 2 ), &ctx->job[ 1 ] );
+    ISRLockWaitForOwned( &cpu_self->Lock );
+  } else {
+    ISR_lock_Context lock_context;
+    Chain_Node      *node;
+    Thread_Control  *thread;
+
+    _ISR_lock_ISR_disable( &lock_context );
+    _Per_CPU_Acquire( cpu_self, &lock_context );
+    ctx->job_context[ 0 ].handler = SuspendA;
+    _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+    ISRLockWaitForOthers( &cpu_self->Lock, 1 );
+
+    /* See _Thread_Preemption_intervention() */
+    node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
+    thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
+    T_assert_eq_ptr( thread, GetThread( ctx->worker_id[ WORKER_A ] ) );
+    thread->Scheduler.ask_for_help_cpu = NULL;
+
+    _Per_CPU_Release( cpu_self, &lock_context );
+    _ISR_lock_ISR_enable( &lock_context );
+  }
+}
+
+static void UnblockAskForHelp(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  Context *ctx;
+
+  ctx = arg;
+
+  if (
+    when == T_SCHEDULER_BEFORE &&
+    event->operation == T_SCHEDULER_UNBLOCK
+  ) {
+    T_scheduler_set_event_handler( NULL, NULL );
+    ctx->request.handler = InterceptAskForHelp;
+    ctx->request.arg = ctx;
+    CallWithinISRSubmit( &ctx->request );
+  }
+}
+
+static void RaiseWorkerPriorityWithIdleRunner( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  SuspendTask( ctx->runner_id );
+  T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+  ResumeTask( ctx->runner_id );
+}
+
+static void MakeReady( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  MakeBusy( ctx, WORKER_C );
+}
+
+static void UpdatePriorityMakeReady(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  Context *ctx;
+
+  ctx = arg;
+
+  if (
+    when == T_SCHEDULER_BEFORE &&
+    event->operation == T_SCHEDULER_UPDATE_PRIORITY
+  ) {
+    Thread_Control  *thread;
+
+    T_scheduler_set_event_handler( NULL, NULL );
+
+    thread = GetThread( ctx->worker_id[ WORKER_A ] );
+    T_eq_int( thread->Scheduler.state, THREAD_SCHEDULER_SCHEDULED );
+
+    ctx->job_context[ 0 ].handler = MakeReady;
+    _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+
+    while ( thread->Scheduler.state != THREAD_SCHEDULER_READY ) {
+      RTEMS_COMPILER_MEMORY_BARRIER();
+    }
+  }
+}
+
+static void ReadyToScheduled( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  SuspendTask( ctx->runner_id );
+
+  T_scheduler_set_event_handler( UpdatePriorityMakeReady, ctx );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+
+  SetPriority( ctx->runner_id, PRIO_VERY_HIGH );
+  ResumeTask( ctx->runner_id );
+}
+
+static void BlockAndReuseIdle( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  SuspendTask( ctx->runner_id );
+  SuspendTask( ctx->worker_id[ WORKER_A ] );
+  ResumeTask( ctx->worker_id[ WORKER_A ] );
+  SetPriority( ctx->runner_id, PRIO_HIGH );
+  ResumeTask( ctx->runner_id );
+}
+
+static void Preempt( void *arg )
+{
+  Context *ctx;
+
+  ctx = arg;
+  MakeBusy( ctx, WORKER_C );
+}
+
+static void BlockAndPreempt(
+  void                    *arg,
+  const T_scheduler_event *event,
+  T_scheduler_when         when
+)
+{
+  Context *ctx;
+
+  ctx = arg;
+
+  if ( when == T_SCHEDULER_AFTER && event->operation == T_SCHEDULER_BLOCK ) {
+    Thread_Control  *thread;
+
+    T_scheduler_set_event_handler( NULL, NULL );
+
+    thread = GetThread( ctx->worker_id[ WORKER_A ] );
+    T_eq_int( thread->Scheduler.state, THREAD_SCHEDULER_BLOCKED );
+
+    ctx->job_context[ 0 ].handler = Preempt;
+    _Per_CPU_Submit_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+    _Per_CPU_Wait_for_job( _Per_CPU_Get_by_index( 1 ), &ctx->job[ 0 ] );
+  }
+}
+
+static void PrepareOwnerScheduled( Context *ctx )
+{
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+  SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+  MakeBusy( ctx, WORKER_C );
+  MakeBusy( ctx, WORKER_A );
+}
+
+static void CleanupOwnerScheduled( Context *ctx )
+{
+  StopBusy( ctx, WORKER_A );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+  SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+static void PrepareOwnerBlocked( Context *ctx )
+{
+  SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_B_ID, PRIO_NORMAL );
+  SendAndSync( ctx, WORKER_A, EVENT_OBTAIN );
+  SendEvents( ctx->worker_id[ WORKER_B ], EVENT_OBTAIN );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+  MakeBusy( ctx, WORKER_C );
+  SetPriority( ctx->worker_id[ WORKER_B ], PRIO_LOW );
+  MakeBusy( ctx, WORKER_A );
+  SetPriority( ctx->worker_id[ WORKER_B ], PRIO_NORMAL );
+}
+
+static void CleanupOwnerBlocked( Context *ctx )
+{
+  StopBusy( ctx, WORKER_C );
+  ResumeTask( ctx->worker_id[ WORKER_A ] );
+  StopBusy( ctx, WORKER_A );
+  SendAndSync( ctx, WORKER_A, EVENT_RELEASE );
+  SetPriority( ctx->worker_id[ WORKER_B ], PRIO_HIGH );
+  SendEvents( ctx->worker_id[ WORKER_B ], EVENT_RELEASE );
+  SetScheduler( ctx->worker_id[ WORKER_A ], SCHEDULER_A_ID, PRIO_HIGH );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+static void Worker( rtems_task_argument arg, WorkerIndex worker )
+{
+  Context *ctx;
+
+  ctx = (Context *) arg;
+
+  while ( true ) {
+    rtems_event_set events;
+
+    events = ReceiveAnyEvents();
+
+    if ( ( events & EVENT_SYNC_RUNNER ) != 0 ) {
+      SendEvents( ctx->runner_id, EVENT_SYNC_RUNNER );
+    }
+
+    if ( ( events & EVENT_OBTAIN ) != 0 ) {
+      ObtainMutex( ctx->mutex_id );
+    }
+
+    if ( ( events & EVENT_RELEASE ) != 0 ) {
+      ReleaseMutex( ctx->mutex_id );
+    }
+
+    if ( ( events & EVENT_STICKY_OBTAIN ) != 0 ) {
+      ObtainMutex( ctx->sticky_id );
+    }
+
+    if ( ( events & EVENT_STICKY_RELEASE ) != 0 ) {
+      ReleaseMutex( ctx->sticky_id );
+    }
+
+    if ( ( events & EVENT_BUSY ) != 0 ) {
+      ctx->is_busy[ worker ] = true;
+
+      while ( ctx->busy[ worker ] ) {
+        /* Wait */
+      }
+
+      ctx->is_busy[ worker ] = false;
+    }
+  }
+}
+
+static void WorkerA( rtems_task_argument arg )
+{
+  Worker( arg, WORKER_A );
+}
+
+static void WorkerB( rtems_task_argument arg )
+{
+  Worker( arg, WORKER_B );
+}
+
+static void WorkerC( rtems_task_argument arg )
+{
+  Worker( arg, WORKER_C );
+}
+
+static void ScoreSchedSmpValSmp_Setup( ScoreSchedSmpValSmp_Context *ctx )
+{
+  rtems_status_code sc;
+  size_t            i;
+
+  ctx->runner_id = rtems_task_self();
+  ctx->mutex_id = CreateMutex();
+
+  for ( i = 0; i < RTEMS_ARRAY_SIZE( ctx->job ); ++i ) {
+    ctx->job_context[ i ].arg = ctx;
+    ctx->job[ i ].context = &ctx->job_context[ i ];
+  }
+
+  sc = rtems_semaphore_create(
+    rtems_build_name( 'S', 'T', 'K', 'Y' ),
+    1,
+    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+    PRIO_NORMAL,
+    &ctx->sticky_id
+  );
+  T_rsc_success( sc );
+
+  SetSelfPriority( PRIO_NORMAL );
+
+  ctx->worker_id[ WORKER_A ] = CreateTask( "WRKA", PRIO_HIGH );
+  StartTask( ctx->worker_id[ WORKER_A ], WorkerA, ctx );
+
+  ctx->worker_id[ WORKER_B ] = CreateTask( "WRKB", PRIO_HIGH );
+  StartTask( ctx->worker_id[ WORKER_B ], WorkerB, ctx );
+
+  ctx->worker_id[ WORKER_C ] = CreateTask( "WRKC", PRIO_HIGH );
+  StartTask( ctx->worker_id[ WORKER_C ], WorkerC, ctx );
+}
+
+static void ScoreSchedSmpValSmp_Setup_Wrap( void *arg )
+{
+  ScoreSchedSmpValSmp_Context *ctx;
+
+  ctx = arg;
+  ScoreSchedSmpValSmp_Setup( ctx );
+}
+
+static void ScoreSchedSmpValSmp_Teardown( ScoreSchedSmpValSmp_Context *ctx )
+{
+  DeleteTask( ctx->worker_id[ WORKER_A ] );
+  DeleteTask( ctx->worker_id[ WORKER_B ] );
+  DeleteTask( ctx->worker_id[ WORKER_C ] );
+  DeleteMutex( ctx->mutex_id );
+  DeleteMutex( ctx->sticky_id );
+  RestoreRunnerPriority();
+}
+
+static void ScoreSchedSmpValSmp_Teardown_Wrap( void *arg )
+{
+  ScoreSchedSmpValSmp_Context *ctx;
+
+  ctx = arg;
+  ScoreSchedSmpValSmp_Teardown( ctx );
+}
+
+static T_fixture ScoreSchedSmpValSmp_Fixture = {
+  .setup = ScoreSchedSmpValSmp_Setup_Wrap,
+  .stop = NULL,
+  .teardown = ScoreSchedSmpValSmp_Teardown_Wrap,
+  .scope = NULL,
+  .initial_context = &ScoreSchedSmpValSmp_Instance
+};
+
+/**
+ * @brief Construct a system state in which a sticky thread is blocked while an
+ *   idle thread executes on behalf of the thread.
+ */
+static void ScoreSchedSmpValSmp_Action_0( ScoreSchedSmpValSmp_Context *ctx )
+{
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+  SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_STICKY_OBTAIN );
+  MakeBusy( ctx, WORKER_A );
+  WaitForBusy( ctx, WORKER_A );
+
+  /*
+   * Block the sticky worker A while it uses an idle thread in the home
+   * scheduler.
+   */
+  CallWithinISR( BlockAndReuseIdle, ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  StopBusy( ctx, WORKER_A );
+  SendAndSync( ctx, WORKER_A, EVENT_STICKY_RELEASE );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+  SetSelfPriority( PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+  SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+/**
+ * @brief Construct a system state in which a thread is preempted while it is
+ *   blocked.
+ */
+static void ScoreSchedSmpValSmp_Action_1( ScoreSchedSmpValSmp_Context *ctx )
+{
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+  SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+  MakeBusy( ctx, WORKER_A );
+  WaitForBusy( ctx, WORKER_A );
+
+  /*
+   * Block worker A and preempt it before the withdraw node operations are
+   * performed for worker A.
+   */
+  T_scheduler_set_event_handler( BlockAndPreempt, ctx );
+  SuspendTask( ctx->worker_id[ WORKER_A ] );
+
+  /*
+   * Clean up all used resources.
+   */
+  ResumeTask( ctx->worker_id[ WORKER_A ] );
+  StopBusy( ctx, WORKER_C );
+  StopBusy( ctx, WORKER_A );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+  SetSelfPriority( PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+  SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+/**
+ * @brief Construct a system state in which a thread is rescheduled  while it
+ *   is not scheduled on another scheduler.
+ */
+static void ScoreSchedSmpValSmp_Action_2( ScoreSchedSmpValSmp_Context *ctx )
+{
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+  SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_B_ID, PRIO_HIGH );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_STICKY_OBTAIN );
+  MakeBusy( ctx, WORKER_A );
+  WaitForBusy( ctx, WORKER_A );
+
+  /*
+   * Reschedule worker A by the home scheduler while worker A is not scheduled
+   * on another scheduler.
+   */
+  CallWithinISR( ReadyToScheduled, ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  StopBusy( ctx, WORKER_C );
+  StopBusy( ctx, WORKER_A );
+  SendAndSync( ctx, WORKER_A, EVENT_STICKY_RELEASE );
+  SetPriority( ctx->worker_id[ WORKER_A ], PRIO_HIGH );
+  SetSelfPriority( PRIO_NORMAL );
+  SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+  SendAndSync( ctx, WORKER_B, EVENT_RELEASE );
+  SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+  SetScheduler( ctx->worker_id[ WORKER_C ], SCHEDULER_A_ID, PRIO_HIGH );
+}
+
+/**
+ * @brief Construct a system state in which an ask for help request is
+ *   cancelled while it is processed on another processor.
+ */
+static void ScoreSchedSmpValSmp_Action_3( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Unblock worker A.  It cannot be scheduled on its home scheduler. Intercept
+   * the ask for help request.  Block the worker A.  This will cancel the ask
+   * for help request.  Remove the request while the other processor tries to
+   * cancel the request.
+   */
+  SuspendTask( ctx->worker_id[ WORKER_A ] );
+  T_scheduler_set_event_handler( UnblockAskForHelp, ctx );
+  ResumeTask( ctx->worker_id[ WORKER_A ] );
+
+  /*
+   * Clean up all used resources.
+   */
+  ResumeTask( ctx->worker_id[ WORKER_A ] );
+  StopBusy( ctx, WORKER_C );
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a block operation.
+ */
+static void ScoreSchedSmpValSmp_Action_4( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Block the runner thread while the owner thread of the highest priority
+   * ready node is already scheduled.
+   */
+  T_scheduler_set_event_handler( BlockStopBusyC, ctx );
+  CallWithinISR( Block, ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is blocked during a block operation.
+ */
+static void ScoreSchedSmpValSmp_Action_5( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerBlocked( ctx );
+
+  /*
+   * Block the runner thread while the owner thread of the highest priority
+   * ready node is blocked.
+   */
+  T_scheduler_set_event_handler( BlockSuspendA, ctx );
+  CallWithinISR( Block, ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a set affinity
+ *   operation.
+ */
+static void ScoreSchedSmpValSmp_Action_6( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Set the affinity of the runner thread while the owner thread of the
+   * highest priority ready node is already scheduled.
+   */
+  T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx );
+  SetSelfAffinityAll();
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a set affinity
+ *   operation while a sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_7( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Set the affinity of the runner thread while the owner thread of the
+   * highest priority ready node is already scheduled.
+   */
+  MakeSticky( ctx );
+  T_scheduler_set_event_handler( SetAffinityStopBusyC, ctx );
+  SetSelfAffinityAll();
+  CleanSticky( ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is blocked during a set affinity operation.
+ */
+static void ScoreSchedSmpValSmp_Action_8( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerBlocked( ctx );
+
+  /*
+   * Set the affinity of the runner thread while the owner thread of the
+   * highest priority ready node is blocked.
+   */
+  T_scheduler_set_event_handler( SetAffinitySuspendA, ctx );
+  SetSelfAffinityAll();
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is blocked during a set affinity operation while a
+ *   sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_9( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerBlocked( ctx );
+
+  /*
+   * Set the affinity of the runner thread while the owner thread of the
+   * highest priority ready node is blocked.
+   */
+  MakeSticky( ctx );
+  T_scheduler_set_event_handler( SetAffinitySuspendA, ctx );
+  SetSelfAffinityAll();
+  CleanSticky( ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a set priority
+ *   operation.
+ */
+static void ScoreSchedSmpValSmp_Action_10( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Set the priority of the runner thread while the owner thread of the
+   * highest priority ready node is already scheduled.
+   */
+  SetSelfPriority( PRIO_HIGH );
+  T_scheduler_set_event_handler( UpdatePriorityStopBusyC, ctx );
+  SetSelfPriority( PRIO_NORMAL );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a set priority
+ *   operation while a sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_11( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Set the priority of the runner thread while the owner thread of the
+   * highest priority ready node is already scheduled.
+   */
+  MakeSticky( ctx );
+  CallWithinISR( RaiseWorkerPriorityWithIdleRunner, ctx );
+  CleanSticky( ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is blocked during a set priority operation.
+ */
+static void ScoreSchedSmpValSmp_Action_12( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerBlocked( ctx );
+
+  /*
+   * Set the priority of the runner thread while the owner thread of the
+   * highest priority ready node is blocked.
+   */
+  SetSelfPriority( PRIO_HIGH );
+  T_scheduler_set_event_handler( UpdatePrioritySuspendA, ctx );
+  SetSelfPriority( PRIO_NORMAL );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a yield operation.
+ */
+static void ScoreSchedSmpValSmp_Action_13( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Yield while the owner thread of the highest priority ready node is already
+   * scheduled.
+   */
+  T_scheduler_set_event_handler( YieldStopBusyC, ctx );
+  Yield();
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is already scheduled during a yield operation
+ *   while a sticky node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_14( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerScheduled( ctx );
+
+  /*
+   * Yield while the owner thread of the highest priority ready node is already
+   * scheduled.
+   */
+  MakeSticky( ctx );
+  T_scheduler_set_event_handler( YieldStopBusyC, ctx );
+  Yield();
+  CleanSticky( ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerScheduled( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is blocked during a yield operation.
+ */
+static void ScoreSchedSmpValSmp_Action_15( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerBlocked( ctx );
+
+  /*
+   * Yield while the owner thread of the highest priority ready node is
+   * blocked.
+   */
+  T_scheduler_set_event_handler( YieldSuspendA, ctx );
+  Yield();
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @brief Construct a system state in which a scheduler tries to schedule a
+ *   node those owner thread is blocked during a yield operation while a sticky
+ *   node is involved.
+ */
+static void ScoreSchedSmpValSmp_Action_16( ScoreSchedSmpValSmp_Context *ctx )
+{
+  PrepareOwnerBlocked( ctx );
+
+  /*
+   * Yield while the owner thread of the highest priority ready node is
+   * blocked.
+   */
+  MakeSticky( ctx );
+  T_scheduler_set_event_handler( YieldSuspendA, ctx );
+  Yield();
+  CleanSticky( ctx );
+
+  /*
+   * Clean up all used resources.
+   */
+  CleanupOwnerBlocked( ctx );
+}
+
+/**
+ * @fn void T_case_body_ScoreSchedSmpValSmp( void )
+ */
+T_TEST_CASE_FIXTURE( ScoreSchedSmpValSmp, &ScoreSchedSmpValSmp_Fixture )
+{
+  ScoreSchedSmpValSmp_Context *ctx;
+
+  ctx = T_fixture_context();
+
+  ScoreSchedSmpValSmp_Action_0( ctx );
+  ScoreSchedSmpValSmp_Action_1( ctx );
+  ScoreSchedSmpValSmp_Action_2( ctx );
+  ScoreSchedSmpValSmp_Action_3( ctx );
+  ScoreSchedSmpValSmp_Action_4( ctx );
+  ScoreSchedSmpValSmp_Action_5( ctx );
+  ScoreSchedSmpValSmp_Action_6( ctx );
+  ScoreSchedSmpValSmp_Action_7( ctx );
+  ScoreSchedSmpValSmp_Action_8( ctx );
+  ScoreSchedSmpValSmp_Action_9( ctx );
+  ScoreSchedSmpValSmp_Action_10( ctx );
+  ScoreSchedSmpValSmp_Action_11( ctx );
+  ScoreSchedSmpValSmp_Action_12( ctx );
+  ScoreSchedSmpValSmp_Action_13( ctx );
+  ScoreSchedSmpValSmp_Action_14( ctx );
+  ScoreSchedSmpValSmp_Action_15( ctx );
+  ScoreSchedSmpValSmp_Action_16( ctx );
+}
+
+/** @} */
diff --git a/testsuites/validation/tc-sched-yield.c b/testsuites/validation/tc-sched-yield.c
new file mode 100644
index 0000000..8aa953f
--- /dev/null
+++ b/testsuites/validation/tc-sched-yield.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSTestCaseScoreSchedReqYield
+ */
+
+/*
+ * Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file is part of the RTEMS quality process and was automatically
+ * generated.  If you find something that needs to be fixed or
+ * worded better please post a report or patch to an RTEMS mailing list
+ * or raise a bug report:
+ *
+ * https://www.rtems.org/bugs.html
+ *
+ * For information on updating and regenerating please refer to the How-To
+ * section in the Software Requirements Engineering chapter of the
+ * RTEMS Software Engineering manual.  The manual is provided as a part of
+ * a release.  For development sources please refer to the online
+ * documentation at:
+ *
+ * https://docs.rtems.org
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/test-scheduler.h>
+#include <rtems/score/percpu.h>
+
+#include "tx-support.h"
+#include "tx-thread-queue.h"
+
+#include <rtems/test.h>
+
+/**
+ * @defgroup RTEMSTestCaseScoreSchedReqYield spec:/score/sched/req/yield
+ *
+ * @ingroup RTEMSTestSuiteTestsuitesValidationNoClock0
+ *
+ * @{
+ */
+
+typedef enum {
+  ScoreSchedReqYield_Pre_EligibleScheduler_Home,
+  ScoreSchedReqYield_Pre_EligibleScheduler_Helping,
+  ScoreSchedReqYield_Pre_EligibleScheduler_NA
+} ScoreSchedReqYield_Pre_EligibleScheduler;
+
+typedef enum {
+  ScoreSchedReqYield_Pre_UsedScheduler_Home,
+  ScoreSchedReqYield_Pre_UsedScheduler_Helping,
+  ScoreSchedReqYield_Pre_UsedScheduler_NA
+} ScoreSchedReqYield_Pre_UsedScheduler;
+
+typedef enum {
+  ScoreSchedReqYield_Pre_HomeSchedulerState_Blocked,
+  ScoreSchedReqYield_Pre_HomeSchedulerState_Scheduled,
+  ScoreSchedReqYield_Pre_HomeSchedulerState_Ready,
+  ScoreSchedReqYield_Pre_HomeSchedulerState_NA
+} ScoreSchedReqYield_Pre_HomeSchedulerState;
+
+typedef enum {
+  ScoreSchedReqYield_Pre_Sticky_Yes,
+  ScoreSchedReqYield_Pre_Sticky_No,
+  ScoreSchedReqYield_Pre_Sticky_NA
+} ScoreSchedReqYield_Pre_Sticky;
+
+typedef enum {
+  ScoreSchedReqYield_Pre_Other_Yes,
+  ScoreSchedReqYield_Pre_Other_No,
+  ScoreSchedReqYield_Pre_Other_NA
+} ScoreSchedReqYield_Pre_Other;
+
+typedef enum {
+  ScoreSchedReqYield_Post_HomeSchedulerState_Blocked,
+  ScoreSchedReqYield_Post_HomeSchedulerState_Scheduled,
+  ScoreSchedReqYield_Post_HomeSchedulerState_Ready,
+  ScoreSchedReqYield_Post_HomeSchedulerState_Idle,
+  ScoreSchedReqYield_Post_HomeSchedulerState_NA
+} ScoreSchedReqYield_Post_HomeSchedulerState;
+
+typedef enum {
+  ScoreSchedReqYield_Post_AskForHelp_Yes,
+  ScoreSchedReqYield_Post_AskForHelp_No,
+  ScoreSchedReqYield_Post_AskForHelp_NA
+} ScoreSchedReqYield_Post_AskForHelp;
+
+typedef struct {
+  uint16_t Skip : 1;
+  uint16_t Pre_EligibleScheduler_NA : 1;
+  uint16_t Pre_UsedScheduler_NA : 1;
+  uint16_t Pre_HomeSchedulerState_NA : 1;
+  uint16_t Pre_Sticky_NA : 1;
+  uint16_t Pre_Other_NA : 1;
+  uint16_t Post_HomeSchedulerState : 3;
+  uint16_t Post_AskForHelp : 2;
+} ScoreSchedReqYield_Entry;
+
+/**
+ * @brief Test context for spec:/score/sched/req/yield test case.
+ */
+typedef struct {
+  /**
+   * @brief This member contains the thread queue test context.
+   */
+  TQContext tq_ctx;
+
+  /**
+   * @brief This member contains the identifier of a sticky mutex.
+   */
+  rtems_id sticky_mutex;
+
+  /**
+   * @brief This member contains the processor index after yielding.
+   */
+  uint32_t cpu_after_yield;
+
+  /**
+   * @brief If this member is true, then the runner shall have a helping
+   *   scheduler.
+   */
+  bool has_helping;
+
+  /**
+   * @brief If this member is true, then the runner shall use a helping
+   *   scheduler.
+   */
+  bool use_helping;
+
+  /**
+   * @brief If this member is true, then the runner shall be ready in its home
+   *   scheduler.
+   */
+  bool ready;
+
+  /**
+   * @brief If this member is true, then the runner shall be sticky.
+   */
+  bool sticky;
+
+  /**
+   * @brief If this member is true, then another ready task in the home
+   *   scheduler of the runner shall be ready with an equal priority.
+   */
+  bool other_ready;
+
+  /**
+   * @brief If this member is true, then the processor zero was idle before
+   *   yielding.
+   */
+  bool is_idle_before_yield;
+
+  /**
+   * @brief If this member is true, then the processor zero was idle after
+   *   yielding.
+   */
+  bool is_idle_after_yield;
+
+  struct {
+    /**
+     * @brief This member defines the pre-condition states for the next action.
+     */
+    size_t pcs[ 5 ];
+
+    /**
+     * @brief If this member is true, then the test action loop is executed.
+     */
+    bool in_action_loop;
+
+    /**
+     * @brief This member contains the next transition map index.
+     */
+    size_t index;
+
+    /**
+     * @brief This member contains the current transition map entry.
+     */
+    ScoreSchedReqYield_Entry entry;
+
+    /**
+     * @brief If this member is true, then the current transition variant
+     *   should be skipped.
+     */
+    bool skip;
+  } Map;
+} ScoreSchedReqYield_Context;
+
+static ScoreSchedReqYield_Context
+  ScoreSchedReqYield_Instance;
+
+static const char * const ScoreSchedReqYield_PreDesc_EligibleScheduler[] = {
+  "Home",
+  "Helping",
+  "NA"
+};
+
+static const char * const ScoreSchedReqYield_PreDesc_UsedScheduler[] = {
+  "Home",
+  "Helping",
+  "NA"
+};
+
+static const char * const ScoreSchedReqYield_PreDesc_HomeSchedulerState[] = {
+  "Blocked",
+  "Scheduled",
+  "Ready",
+  "NA"
+};
+
+static const char * const ScoreSchedReqYield_PreDesc_Sticky[] = {
+  "Yes",
+  "No",
+  "NA"
+};
+
+static const char * const ScoreSchedReqYield_PreDesc_Other[] = {
+  "Yes",
+  "No",
+  "NA"
+};
+
+static const char * const * const ScoreSchedReqYield_PreDesc[] = {
+  ScoreSchedReqYield_PreDesc_EligibleScheduler,
+  ScoreSchedReqYield_PreDesc_UsedScheduler,
+  ScoreSchedReqYield_PreDesc_HomeSchedulerState,
+  ScoreSchedReqYield_PreDesc_Sticky,
+  ScoreSchedReqYield_PreDesc_Other,
+  NULL
+};
+
+#define COUNTER TQ_BLOCKER_A
+
+#define HELPER TQ_BLOCKER_B
+
+#define MOVER TQ_BLOCKER_C
+
+typedef ScoreSchedReqYield_Context Context;
+
+static void MoveToHelping( Context *ctx )
+{
+  ctx->tq_ctx.busy_wait[ MOVER ] = true;
+  TQSend( &ctx->tq_ctx, MOVER, TQ_EVENT_BUSY_WAIT );
+  TQWaitForEventsReceived( &ctx->tq_ctx, MOVER );
+  T_eq_u32( rtems_scheduler_get_processor(), 1 );
+  ctx->tq_ctx.busy_wait[ MOVER ] = false;
+  TQWaitForExecutionStop( &ctx->tq_ctx, MOVER );
+}
+
+static uint32_t GetCounter( const Context *ctx )
+{
+  return TQGetWorkerCounter( &ctx->tq_ctx, COUNTER );
+}
+
+static void ScoreSchedReqYield_Pre_EligibleScheduler_Prepare(
+  ScoreSchedReqYield_Context              *ctx,
+  ScoreSchedReqYield_Pre_EligibleScheduler state
+)
+{
+  switch ( state ) {
+    case ScoreSchedReqYield_Pre_EligibleScheduler_Home: {
+      /*
+       * While the only eligible scheduler of the thread is the home scheduler.
+       */
+      ctx->has_helping = false;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_EligibleScheduler_Helping: {
+      /*
+       * While the thread has at least one helping scheduler.
+       */
+      ctx->has_helping = true;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_EligibleScheduler_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Pre_UsedScheduler_Prepare(
+  ScoreSchedReqYield_Context          *ctx,
+  ScoreSchedReqYield_Pre_UsedScheduler state
+)
+{
+  switch ( state ) {
+    case ScoreSchedReqYield_Pre_UsedScheduler_Home: {
+      /*
+       * While the thread is scheduled on the home scheduler.
+       */
+      ctx->use_helping = false;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_UsedScheduler_Helping: {
+      /*
+       * While the thread is scheduled on a helping scheduler.
+       */
+      ctx->use_helping = true;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_UsedScheduler_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Pre_HomeSchedulerState_Prepare(
+  ScoreSchedReqYield_Context               *ctx,
+  ScoreSchedReqYield_Pre_HomeSchedulerState state
+)
+{
+  switch ( state ) {
+    case ScoreSchedReqYield_Pre_HomeSchedulerState_Blocked: {
+      /*
+       * The thread shall be blocked in its home scheduler.
+       */
+      ctx->ready = false;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_HomeSchedulerState_Scheduled: {
+      /*
+       * The thread shall be scheduled in its home scheduler.
+       */
+      ctx->ready = false;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_HomeSchedulerState_Ready: {
+      /*
+       * The thread shall be ready in its home scheduler.
+       */
+      ctx->ready = true;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_HomeSchedulerState_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Pre_Sticky_Prepare(
+  ScoreSchedReqYield_Context   *ctx,
+  ScoreSchedReqYield_Pre_Sticky state
+)
+{
+  switch ( state ) {
+    case ScoreSchedReqYield_Pre_Sticky_Yes: {
+      /*
+       * While the thread is sticky.
+       */
+      ctx->sticky = true;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_Sticky_No: {
+      /*
+       * While the thread not sticky.
+       */
+      ctx->sticky = false;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_Sticky_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Pre_Other_Prepare(
+  ScoreSchedReqYield_Context  *ctx,
+  ScoreSchedReqYield_Pre_Other state
+)
+{
+  switch ( state ) {
+    case ScoreSchedReqYield_Pre_Other_Yes: {
+      /*
+       * While at least one ready thread with a priority equal to the priority
+       * of the thread exists in the home scheduler of the thread.
+       */
+      ctx->other_ready = true;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_Other_No: {
+      /*
+       * While no ready thread with a priority equal to the priority of the
+       * thread exists in the home scheduler of the thread.
+       */
+      ctx->other_ready = false;
+      break;
+    }
+
+    case ScoreSchedReqYield_Pre_Other_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Post_HomeSchedulerState_Check(
+  ScoreSchedReqYield_Context                *ctx,
+  ScoreSchedReqYield_Post_HomeSchedulerState state
+)
+{
+  switch ( state ) {
+    case ScoreSchedReqYield_Post_HomeSchedulerState_Blocked: {
+      /*
+       * The thread shall be blocked in its home scheduler.
+       */
+      T_true( ctx->is_idle_after_yield );
+      T_eq_u32( ctx->cpu_after_yield, 1 );
+      break;
+    }
+
+    case ScoreSchedReqYield_Post_HomeSchedulerState_Scheduled: {
+      /*
+       * The thread shall be scheduled in its home scheduler.
+       */
+      T_false( ctx->is_idle_before_yield );
+      T_false( ctx->is_idle_after_yield );
+      T_eq_u32( GetCounter( ctx ), 0 );
+      T_eq_u32( ctx->cpu_after_yield, 0 );
+      break;
+    }
+
+    case ScoreSchedReqYield_Post_HomeSchedulerState_Ready: {
+      /*
+       * The thread shall be ready in its home scheduler.
+       */
+      T_eq_u32( GetCounter( ctx ), 1 );
+      break;
+    }
+
+    case ScoreSchedReqYield_Post_HomeSchedulerState_Idle: {
+      /*
+       * An idle thread shall execute on behalf of the thread in its home
+       * scheduler.
+       */
+      T_true( ctx->is_idle_before_yield );
+      T_true( ctx->is_idle_after_yield );
+      T_eq_u32( GetCounter( ctx ), 0 );
+      T_eq_u32( ctx->cpu_after_yield, 1 );
+      break;
+    }
+
+    case ScoreSchedReqYield_Post_HomeSchedulerState_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Post_AskForHelp_Check(
+  ScoreSchedReqYield_Context        *ctx,
+  ScoreSchedReqYield_Post_AskForHelp state
+)
+{
+  size_t                   index;
+  const T_scheduler_event *event;
+
+  index = 0;
+
+  switch ( state ) {
+    case ScoreSchedReqYield_Post_AskForHelp_Yes: {
+      /*
+       * The thread shall ask all its eligible scheduler for help.
+       */
+      event = TQGetNextAskForHelp( &ctx->tq_ctx, &index );
+      T_eq_ptr( event->thread, ctx->tq_ctx.runner_tcb );
+
+      event = TQGetNextAskForHelp( &ctx->tq_ctx, &index );
+      T_eq_ptr( event->thread, ctx->tq_ctx.runner_tcb );
+
+      event = TQGetNextAskForHelp( &ctx->tq_ctx, &index );
+      T_eq_ptr( event, &T_scheduler_event_null );
+      break;
+    }
+
+    case ScoreSchedReqYield_Post_AskForHelp_No: {
+      /*
+       * The thread shall not ask for help.
+       */
+      event = TQGetNextAskForHelp( &ctx->tq_ctx, &index );
+      T_eq_ptr( event, &T_scheduler_event_null );
+      break;
+    }
+
+    case ScoreSchedReqYield_Post_AskForHelp_NA:
+      break;
+  }
+}
+
+static void ScoreSchedReqYield_Setup( ScoreSchedReqYield_Context *ctx )
+{
+  rtems_status_code sc;
+
+  memset( ctx, 0, sizeof( *ctx ) );
+  ctx->tq_ctx.enqueue_prepare = TQEnqueuePrepareDefault;
+  ctx->tq_ctx.enqueue_done = TQEnqueueDoneDefault;
+  ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
+  ctx->tq_ctx.surrender = TQSurrenderClassicSem;
+  ctx->tq_ctx.convert_status = TQConvertStatusClassic;
+  TQInitialize( &ctx->tq_ctx );
+
+  sc = rtems_semaphore_create(
+    rtems_build_name( 'M', 'U', 'T', 'X' ),
+    1,
+    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY |
+      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
+    PRIO_NORMAL,
+    &ctx->sticky_mutex
+  );
+  T_rsc_success( sc );
+
+  TQSetPriority( &ctx->tq_ctx, COUNTER, PRIO_NORMAL );
+
+  #if defined(RTEMS_SMP)
+  TQSetScheduler( &ctx->tq_ctx, HELPER, SCHEDULER_B_ID, PRIO_NORMAL );
+  TQSetPriority( &ctx->tq_ctx, MOVER, PRIO_HIGH );
+  #endif
+}
+
+static void ScoreSchedReqYield_Setup_Wrap( void *arg )
+{
+  ScoreSchedReqYield_Context *ctx;
+
+  ctx = arg;
+  ctx->Map.in_action_loop = false;
+  ScoreSchedReqYield_Setup( ctx );
+}
+
+static void ScoreSchedReqYield_Teardown( ScoreSchedReqYield_Context *ctx )
+{
+  TQDestroy( &ctx->tq_ctx );
+  DeleteMutex( ctx->sticky_mutex );
+}
+
+static void ScoreSchedReqYield_Teardown_Wrap( void *arg )
+{
+  ScoreSchedReqYield_Context *ctx;
+
+  ctx = arg;
+  ctx->Map.in_action_loop = false;
+  ScoreSchedReqYield_Teardown( ctx );
+}
+
+static void ScoreSchedReqYield_Action( ScoreSchedReqYield_Context *ctx )
+{
+  const Per_CPU_Control *cpu;
+  bool                   other_busy;
+
+  if ( ctx->has_helping ) {
+    TQMutexObtain( &ctx->tq_ctx, TQ_MUTEX_A );
+    TQSendAndWaitForExecutionStop(
+      &ctx->tq_ctx,
+      HELPER,
+      TQ_EVENT_MUTEX_A_OBTAIN
+    );
+  }
+
+  if ( ctx->use_helping ) {
+    MoveToHelping( ctx );
+  }
+
+  TQResetCounter( &ctx->tq_ctx );
+
+  if ( ctx->use_helping && ctx->ready ) {
+    ctx->tq_ctx.busy_wait[ COUNTER ] = true;
+    TQSend( &ctx->tq_ctx, COUNTER, TQ_EVENT_COUNT | TQ_EVENT_BUSY_WAIT );
+    other_busy = true;
+  } else {
+    other_busy = false;
+  }
+
+  if ( ctx->sticky ) {
+    ObtainMutex( ctx->sticky_mutex );
+  }
+
+  if ( ctx->other_ready && !other_busy ) {
+    TQSend( &ctx->tq_ctx, COUNTER, TQ_EVENT_COUNT );
+  }
+
+  cpu = _Per_CPU_Get_by_index( 0 );
+  ctx->is_idle_before_yield = cpu->heir->is_idle;
+
+  TQSchedulerRecordStart( &ctx->tq_ctx );
+  Yield();
+  TQSchedulerRecordStop( &ctx->tq_ctx );
+
+  #if defined(RTEMS_SMP)
+  ctx->tq_ctx.busy_wait[ COUNTER ] = false;
+
+  while ( cpu->heir == ctx->tq_ctx.worker_tcb[ COUNTER ] ) {
+    RTEMS_COMPILER_MEMORY_BARRIER();
+  }
+  #endif
+
+  ctx->is_idle_after_yield = cpu->heir->is_idle;
+  ctx->cpu_after_yield = rtems_scheduler_get_processor();
+
+  if ( ctx->sticky ) {
+    ReleaseMutex( ctx->sticky_mutex );
+  }
+
+  if ( ctx->has_helping ) {
+    TQMutexRelease( &ctx->tq_ctx, TQ_MUTEX_A );
+    TQSendAndWaitForExecutionStop(
+      &ctx->tq_ctx,
+      HELPER,
+      TQ_EVENT_MUTEX_A_RELEASE
+    );
+  }
+}
+
+static const ScoreSchedReqYield_Entry
+ScoreSchedReqYield_Entries[] = {
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#endif
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Ready,
+    ScoreSchedReqYield_Post_AskForHelp_No },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Scheduled,
+    ScoreSchedReqYield_Post_AskForHelp_No },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Blocked,
+    ScoreSchedReqYield_Post_AskForHelp_No },
+#endif
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Ready,
+    ScoreSchedReqYield_Post_AskForHelp_No },
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Scheduled,
+    ScoreSchedReqYield_Post_AskForHelp_No },
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Ready,
+    ScoreSchedReqYield_Post_AskForHelp_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Ready,
+    ScoreSchedReqYield_Post_AskForHelp_Yes },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA },
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Scheduled,
+    ScoreSchedReqYield_Post_AskForHelp_No },
+#endif
+#if !defined(RTEMS_SMP)
+  { 1, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_NA,
+    ScoreSchedReqYield_Post_AskForHelp_NA }
+#else
+  { 0, 0, 0, 0, 0, 0, ScoreSchedReqYield_Post_HomeSchedulerState_Idle,
+    ScoreSchedReqYield_Post_AskForHelp_No }
+#endif
+};
+
+static const uint8_t
+ScoreSchedReqYield_Map[] = {
+  0, 0, 2, 2, 3, 8, 10, 11, 0, 0, 2, 2, 4, 4, 1, 1, 5, 5, 1, 1, 5, 5, 1, 1, 0,
+  0, 6, 6, 12, 8, 13, 14, 0, 0, 6, 6, 4, 4, 9, 9, 3, 15, 7, 7, 3, 3, 7, 7
+};
+
+static size_t ScoreSchedReqYield_Scope( void *arg, char *buf, size_t n )
+{
+  ScoreSchedReqYield_Context *ctx;
+
+  ctx = arg;
+
+  if ( ctx->Map.in_action_loop ) {
+    return T_get_scope( ScoreSchedReqYield_PreDesc, buf, n, ctx->Map.pcs );
+  }
+
+  return 0;
+}
+
+static T_fixture ScoreSchedReqYield_Fixture = {
+  .setup = ScoreSchedReqYield_Setup_Wrap,
+  .stop = NULL,
+  .teardown = ScoreSchedReqYield_Teardown_Wrap,
+  .scope = ScoreSchedReqYield_Scope,
+  .initial_context = &ScoreSchedReqYield_Instance
+};
+
+static inline ScoreSchedReqYield_Entry ScoreSchedReqYield_PopEntry(
+  ScoreSchedReqYield_Context *ctx
+)
+{
+  size_t index;
+
+  index = ctx->Map.index;
+  ctx->Map.index = index + 1;
+  return ScoreSchedReqYield_Entries[
+    ScoreSchedReqYield_Map[ index ]
+  ];
+}
+
+static void ScoreSchedReqYield_TestVariant( ScoreSchedReqYield_Context *ctx )
+{
+  ScoreSchedReqYield_Pre_EligibleScheduler_Prepare( ctx, ctx->Map.pcs[ 0 ] );
+  ScoreSchedReqYield_Pre_UsedScheduler_Prepare( ctx, ctx->Map.pcs[ 1 ] );
+  ScoreSchedReqYield_Pre_HomeSchedulerState_Prepare( ctx, ctx->Map.pcs[ 2 ] );
+  ScoreSchedReqYield_Pre_Sticky_Prepare( ctx, ctx->Map.pcs[ 3 ] );
+  ScoreSchedReqYield_Pre_Other_Prepare( ctx, ctx->Map.pcs[ 4 ] );
+  ScoreSchedReqYield_Action( ctx );
+  ScoreSchedReqYield_Post_HomeSchedulerState_Check(
+    ctx,
+    ctx->Map.entry.Post_HomeSchedulerState
+  );
+  ScoreSchedReqYield_Post_AskForHelp_Check(
+    ctx,
+    ctx->Map.entry.Post_AskForHelp
+  );
+}
+
+/**
+ * @fn void T_case_body_ScoreSchedReqYield( void )
+ */
+T_TEST_CASE_FIXTURE( ScoreSchedReqYield, &ScoreSchedReqYield_Fixture )
+{
+  ScoreSchedReqYield_Context *ctx;
+
+  ctx = T_fixture_context();
+  ctx->Map.in_action_loop = true;
+  ctx->Map.index = 0;
+
+  for (
+    ctx->Map.pcs[ 0 ] = ScoreSchedReqYield_Pre_EligibleScheduler_Home;
+    ctx->Map.pcs[ 0 ] < ScoreSchedReqYield_Pre_EligibleScheduler_NA;
+    ++ctx->Map.pcs[ 0 ]
+  ) {
+    for (
+      ctx->Map.pcs[ 1 ] = ScoreSchedReqYield_Pre_UsedScheduler_Home;
+      ctx->Map.pcs[ 1 ] < ScoreSchedReqYield_Pre_UsedScheduler_NA;
+      ++ctx->Map.pcs[ 1 ]
+    ) {
+      for (
+        ctx->Map.pcs[ 2 ] = ScoreSchedReqYield_Pre_HomeSchedulerState_Blocked;
+        ctx->Map.pcs[ 2 ] < ScoreSchedReqYield_Pre_HomeSchedulerState_NA;
+        ++ctx->Map.pcs[ 2 ]
+      ) {
+        for (
+          ctx->Map.pcs[ 3 ] = ScoreSchedReqYield_Pre_Sticky_Yes;
+          ctx->Map.pcs[ 3 ] < ScoreSchedReqYield_Pre_Sticky_NA;
+          ++ctx->Map.pcs[ 3 ]
+        ) {
+          for (
+            ctx->Map.pcs[ 4 ] = ScoreSchedReqYield_Pre_Other_Yes;
+            ctx->Map.pcs[ 4 ] < ScoreSchedReqYield_Pre_Other_NA;
+            ++ctx->Map.pcs[ 4 ]
+          ) {
+            ctx->Map.entry = ScoreSchedReqYield_PopEntry( ctx );
+
+            if ( ctx->Map.entry.Skip ) {
+              continue;
+            }
+
+            ScoreSchedReqYield_TestVariant( ctx );
+          }
+        }
+      }
+    }
+  }
+}
+
+/** @} */



More information about the vc mailing list