[rtems commit] score: Implement forced thread migration

Sebastian Huber sebh at rtems.org
Wed May 7 15:26:20 UTC 2014


Module:    rtems
Branch:    master
Commit:    38b59a6d3052654e356ae16b4a243c362312acce
Changeset: http://git.rtems.org/rtems/commit/?id=38b59a6d3052654e356ae16b4a243c362312acce

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Fri May  2 10:31:09 2014 +0200

score: Implement forced thread migration

The current implementation of task migration in RTEMS has some
implications with respect to the interrupt latency. It is crucial to
preserve the system invariant that a task can execute on at most one
processor in the system at a time. This is accomplished with a boolean
indicator in the task context. The processor architecture specific
low-level task context switch code will mark that a task context is no
longer executing and waits that the heir context stopped execution
before it restores the heir context and resumes execution of the heir
task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple
tasks migrate at once. Otherwise some supervising entity is necessary to
prevent life-locks. Such a global supervisor would lead to scalability
problems so this approach is not used. Currently the thread dispatch is
performed with interrupts disabled. So in case the heir task is
currently executing on another processor then this prolongs the time of
disabled interrupts since one processor has to wait for another
processor to make progress.

It is difficult to avoid this issue with the interrupt latency since
interrupts normally store the context of the interrupted task on its
stack. In case a task is marked as not executing we must not use its
task stack to store such an interrupt context. We cannot use the heir
stack before it stopped execution on another processor. So if we enable
interrupts during this transition we have to provide an alternative task
independent stack for this time frame. This issue needs further
investigation.

---

 c/src/lib/libbsp/sparc/shared/irq_asm.S            |   20 ++
 c/src/lib/libcpu/powerpc/new-exceptions/cpu.c      |    4 +
 c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S  |   20 ++
 cpukit/libmisc/cpuuse/cpuusagereport.c             |    2 +-
 cpukit/rtems/include/rtems/rtems/tasks.h           |    2 -
 cpukit/rtems/src/tasksetscheduler.c                |    5 +-
 cpukit/score/cpu/arm/cpu.c                         |   12 +
 cpukit/score/cpu/arm/cpu_asm.S                     |   20 ++
 cpukit/score/cpu/arm/rtems/score/cpu.h             |   16 ++
 cpukit/score/cpu/i386/cpu.c                        |   18 ++
 cpukit/score/cpu/i386/cpu_asm.S                    |   29 ++-
 cpukit/score/cpu/i386/rtems/score/cpu.h            |   27 ++
 cpukit/score/cpu/no_cpu/rtems/score/cpu.h          |   18 ++
 cpukit/score/cpu/powerpc/cpu.c                     |    4 +
 cpukit/score/cpu/powerpc/rtems/score/cpu.h         |   14 +-
 cpukit/score/cpu/sparc/cpu.c                       |    8 +
 cpukit/score/cpu/sparc/rtems/score/cpu.h           |   13 +
 cpukit/score/include/rtems/score/percpu.h          |   51 ++++-
 cpukit/score/include/rtems/score/scheduler.h       |    2 +-
 cpukit/score/include/rtems/score/schedulerimpl.h   |   24 +--
 cpukit/score/include/rtems/score/schedulersmp.h    |    4 +-
 .../score/include/rtems/score/schedulersmpimpl.h   |   81 ++++--
 cpukit/score/include/rtems/score/statesimpl.h      |    2 +
 cpukit/score/include/rtems/score/thread.h          |   25 +-
 cpukit/score/include/rtems/score/threadimpl.h      |   68 +++++-
 cpukit/score/include/rtems/score/userextimpl.h     |   18 +-
 cpukit/score/src/smp.c                             |    5 +-
 cpukit/score/src/threaddispatch.c                  |   46 +---
 cpukit/score/src/threadhandler.c                   |    4 +-
 cpukit/score/src/threadinitialize.c                |    6 +-
 cpukit/score/src/threadrestart.c                   |    2 +-
 cpukit/score/src/threadstartmultitasking.c         |   14 +-
 doc/user/smp.t                                     |   46 ++++
 testsuites/smptests/Makefile.am                    |    1 +
 testsuites/smptests/configure.ac                   |    1 +
 testsuites/smptests/smpmigration02/Makefile.am     |   19 ++
 testsuites/smptests/smpmigration02/init.c          |  253 ++++++++++++++++++++
 .../smptests/smpmigration02/smpmigration02.doc     |   12 +
 .../smptests/smpmigration02/smpmigration02.scn     |    7 +
 testsuites/smptests/smpscheduler02/init.c          |    4 +-
 .../smpswitchextension01/smpswitchextension01.scn  |   14 +-
 testsuites/sptests/spscheduler01/init.c            |    8 +-
 testsuites/tmtests/tm26/task1.c                    |    2 -
 43 files changed, 799 insertions(+), 152 deletions(-)

diff --git a/c/src/lib/libbsp/sparc/shared/irq_asm.S b/c/src/lib/libbsp/sparc/shared/irq_asm.S
index 3a86ad5..8b15284 100644
--- a/c/src/lib/libbsp/sparc/shared/irq_asm.S
+++ b/c/src/lib/libbsp/sparc/shared/irq_asm.S
@@ -163,6 +163,21 @@ done_flushing:
         nop
         nop
 
+#if defined(RTEMS_SMP)
+        ! Indicate that this context is no longer executing
+        stb     %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+
+        ! Wait for context to stop execution if necessary
+1:
+        ldub    [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
+        cmp     %g1, 0
+        bne     1b
+         mov    1, %g1
+
+        ! Indicate that this context is executing
+        stb     %g1, [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+#endif
+
         ld      [%o1 + G5_OFFSET], %g5        ! restore the global registers
         ld      [%o1 + G7_OFFSET], %g7
 
@@ -202,6 +217,11 @@ done_flushing:
 SYM(_CPU_Context_restore):
         save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
         rd      %psr, %o2
+#if defined(RTEMS_SMP)
+	! On SPARC the restore path needs also a valid executing context on SMP
+	! to update the is executing indicator.
+        mov     %i0, %o0
+#endif
         ba      SYM(_CPU_Context_restore_heir)
         mov     %i0, %o1                      ! in the delay slot
 
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
index 73a1d3e..32c0489 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
@@ -130,6 +130,10 @@ void _CPU_Context_Initialize(
   the_ppc_context->msr = msr_value;
   the_ppc_context->lr = (uint32_t) entry_point;
 
+#ifdef RTEMS_SMP
+  the_ppc_context->is_executing = false;
+#endif
+
 #ifdef __ALTIVEC__
   _CPU_Context_initialize_altivec( the_ppc_context );
 #endif
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
index dcd33df..4e74996 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
@@ -326,9 +326,29 @@ PROC (_CPU_Context_switch):
 
 	stw	r2, PPC_CONTEXT_OFFSET_GPR2(r3)
 
+#ifdef RTEMS_SMP
+	/* Indicate that this context is no longer executing */
+	msync
+	li	r5, 0
+	stb	r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
+#endif
+
 	/* Restore context from r4 */
 restore_context:
 
+#ifdef RTEMS_SMP
+	/* Wait for context to stop execution if necessary */
+1:
+	lbz	r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r4)
+	cmpwi	r5, 0
+	bne	1b
+
+	/* Indicate that this context is executing */
+	li	r5, 1
+	stb	r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r4)
+	isync
+#endif
+
 #ifdef __ALTIVEC__
 	mr	r14, r4 
 	.extern	_CPU_Context_switch_altivec
diff --git a/cpukit/libmisc/cpuuse/cpuusagereport.c b/cpukit/libmisc/cpuuse/cpuusagereport.c
index 86b6377..296fa28 100644
--- a/cpukit/libmisc/cpuuse/cpuusagereport.c
+++ b/cpukit/libmisc/cpuuse/cpuusagereport.c
@@ -43,7 +43,7 @@
       }
     #else
       /* FIXME: Locking */
-      if ( the_thread->is_executing ) {
+      if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
         *time_of_context_switch =
           _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
         return true;
diff --git a/cpukit/rtems/include/rtems/rtems/tasks.h b/cpukit/rtems/include/rtems/rtems/tasks.h
index 43e8c8a..a14e865 100644
--- a/cpukit/rtems/include/rtems/rtems/tasks.h
+++ b/cpukit/rtems/include/rtems/rtems/tasks.h
@@ -569,8 +569,6 @@ rtems_status_code rtems_task_get_scheduler(
  *
  * @retval RTEMS_SUCCESSFUL Successful operation.
  * @retval RTEMS_INVALID_ID Invalid task or scheduler identifier.
- * @retval RTEMS_INCORRECT_STATE The task is in the wrong state to perform a
- * scheduler change.
  *
  * @see rtems_scheduler_ident().
  */
diff --git a/cpukit/rtems/src/tasksetscheduler.c b/cpukit/rtems/src/tasksetscheduler.c
index 42c08bb..30c7c6b 100644
--- a/cpukit/rtems/src/tasksetscheduler.c
+++ b/cpukit/rtems/src/tasksetscheduler.c
@@ -30,15 +30,14 @@ rtems_status_code rtems_task_set_scheduler(
   if ( _Scheduler_Get_by_id( scheduler_id, &scheduler ) ) {
     Thread_Control    *the_thread;
     Objects_Locations  location;
-    bool               ok;
 
     the_thread = _Thread_Get( id, &location );
 
     switch ( location ) {
       case OBJECTS_LOCAL:
-        ok = _Scheduler_Set( scheduler, the_thread );
+        _Scheduler_Set( scheduler, the_thread );
         _Objects_Put( &the_thread->Object );
-        sc = ok ? RTEMS_SUCCESSFUL : RTEMS_INCORRECT_STATE;
+        sc = RTEMS_SUCCESSFUL;
         break;
 #if defined(RTEMS_MULTIPROCESSING)
       case OBJECTS_REMOTE:
diff --git a/cpukit/score/cpu/arm/cpu.c b/cpukit/score/cpu/arm/cpu.c
index 24a9249..91109e4 100644
--- a/cpukit/score/cpu/arm/cpu.c
+++ b/cpukit/score/cpu/arm/cpu.c
@@ -50,6 +50,14 @@
   );
 #endif
 
+#ifdef RTEMS_SMP
+  RTEMS_STATIC_ASSERT(
+    offsetof( Context_Control, is_executing )
+      == ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET,
+    ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
+  );
+#endif
+
 RTEMS_STATIC_ASSERT(
   sizeof( CPU_Exception_frame ) == ARM_EXCEPTION_FRAME_SIZE,
   ARM_EXCEPTION_FRAME_SIZE
@@ -93,6 +101,10 @@ void _CPU_Context_Initialize(
   the_context->thread_id = (uint32_t) tls_area;
 #endif
 
+#ifdef RTEMS_SMP
+  the_context->is_executing = false;
+#endif
+
   if ( tls_area != NULL ) {
     _TLS_TCB_at_area_begin_initialize( tls_area );
   }
diff --git a/cpukit/score/cpu/arm/cpu_asm.S b/cpukit/score/cpu/arm/cpu_asm.S
index bae7207..f2c4afe 100644
--- a/cpukit/score/cpu/arm/cpu_asm.S
+++ b/cpukit/score/cpu/arm/cpu_asm.S
@@ -67,12 +67,32 @@ DEFINE_FUNCTION_ARM(_CPU_Context_switch)
 	str	r3, [r0, #ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET]
 #endif
 
+#ifdef RTEMS_SMP
+	/* Indicate that this context is no longer executing */
+	dmb
+	mov	r3, #0
+	strb	r3, [r0, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+#endif
+
 /* Start restoring context */
 _restore:
 #ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
 	clrex
 #endif
 
+#ifdef RTEMS_SMP
+	/* Wait for context to stop execution if necessary */
+1:
+	ldrb	r3, [r1, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+	cmp	r3, #0
+	bne	1b
+
+	/* Indicate that this context is executing */
+	dmb
+	mov	r3, #1
+	strb	r3, [r1, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
+#endif
+
 #ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
 	ldr	r3, [r1, #ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET]
 	mcr	p15, 0, r3, c13, c0, 3
diff --git a/cpukit/score/cpu/arm/rtems/score/cpu.h b/cpukit/score/cpu/arm/rtems/score/cpu.h
index cb9dc7c..dc57a78 100644
--- a/cpukit/score/cpu/arm/rtems/score/cpu.h
+++ b/cpukit/score/cpu/arm/rtems/score/cpu.h
@@ -216,6 +216,14 @@
   #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
 #endif
 
+#ifdef RTEMS_SMP
+  #ifdef ARM_MULTILIB_VFP_D32
+    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
+  #else
+    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
+  #endif
+#endif
+
 #define ARM_EXCEPTION_FRAME_SIZE 76
 
 #define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
@@ -280,6 +288,9 @@ typedef struct {
   uint64_t register_d14;
   uint64_t register_d15;
 #endif
+#ifdef RTEMS_SMP
+  volatile bool is_executing;
+#endif
 } Context_Control;
 
 typedef struct {
@@ -410,6 +421,11 @@ void _CPU_Context_Initialize(
 #define _CPU_Context_Get_SP( _context ) \
   (_context)->register_sp
 
+#ifdef RTEMS_SMP
+  #define _CPU_Context_Get_is_executing( _context ) \
+    (_context)->is_executing
+#endif
+
 #define _CPU_Context_Restart_self( _the_context ) \
    _CPU_Context_restore( (_the_context) );
 
diff --git a/cpukit/score/cpu/i386/cpu.c b/cpukit/score/cpu/i386/cpu.c
index ba7501a..38b84e6 100644
--- a/cpukit/score/cpu/i386/cpu.c
+++ b/cpukit/score/cpu/i386/cpu.c
@@ -26,6 +26,24 @@
 #include <rtems/bspIo.h>
 #include <rtems/score/thread.h>
 
+#define I386_ASSERT_OFFSET(field, off) \
+  RTEMS_STATIC_ASSERT( \
+    offsetof(Context_Control, field) \
+      == I386_CONTEXT_CONTROL_ ## off ## _OFFSET, \
+    Context_Control_ ## field \
+  )
+
+I386_ASSERT_OFFSET(eflags, EFLAGS);
+I386_ASSERT_OFFSET(esp, ESP);
+I386_ASSERT_OFFSET(ebp, EBP);
+I386_ASSERT_OFFSET(ebx, EBX);
+I386_ASSERT_OFFSET(esi, ESI);
+I386_ASSERT_OFFSET(edi, EDI);
+
+#ifdef RTEMS_SMP
+  I386_ASSERT_OFFSET(is_executing, IS_EXECUTING);
+#endif
+
 void _CPU_Initialize(void)
 {
 #if CPU_HARDWARE_FP
diff --git a/cpukit/score/cpu/i386/cpu_asm.S b/cpukit/score/cpu/i386/cpu_asm.S
index 73a4c14..cc08312 100644
--- a/cpukit/score/cpu/i386/cpu_asm.S
+++ b/cpukit/score/cpu/i386/cpu_asm.S
@@ -26,13 +26,12 @@
  * Format of i386 Register structure
  */
 
-.set REG_EFLAGS,  0
-.set REG_ESP,     REG_EFLAGS + 4
-.set REG_EBP,     REG_ESP + 4
-.set REG_EBX,     REG_EBP + 4
-.set REG_ESI,     REG_EBX + 4
-.set REG_EDI,     REG_ESI + 4
-.set SIZE_REGS,   REG_EDI + 4
+.set REG_EFLAGS,  I386_CONTEXT_CONTROL_EFLAGS_OFFSET
+.set REG_ESP,     I386_CONTEXT_CONTROL_ESP_OFFSET
+.set REG_EBP,     I386_CONTEXT_CONTROL_EBP_OFFSET
+.set REG_EBX,     I386_CONTEXT_CONTROL_EBX_OFFSET
+.set REG_ESI,     I386_CONTEXT_CONTROL_ESI_OFFSET
+.set REG_EDI,     I386_CONTEXT_CONTROL_EDI_OFFSET
 
         BEGIN_CODE
 
@@ -58,9 +57,25 @@ SYM (_CPU_Context_switch):
         movl      esi,REG_ESI(eax)         /* save source register */
         movl      edi,REG_EDI(eax)         /* save destination register */
 
+#ifdef RTEMS_SMP
+        /* Indicate that this context is no longer executing */
+        movb      $0, I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax)
+#endif
+
         movl      HEIRCONTEXT_ARG(esp),eax /* eax = heir threads context */
 
 restore:
+#ifdef RTEMS_SMP
+        /* Wait for context to stop execution if necessary */
+1:
+        movb      I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax), bl
+        testb     bl, bl
+        jne       1b
+
+        /* Indicate that this context is executing */
+        movb      $1, I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax)
+#endif
+
         pushl     REG_EFLAGS(eax)          /* push eflags */
         popf                               /* restore eflags */
         movl      REG_ESP(eax),esp         /* restore stack pointer */
diff --git a/cpukit/score/cpu/i386/rtems/score/cpu.h b/cpukit/score/cpu/i386/rtems/score/cpu.h
index a9957cb..ba731b0 100644
--- a/cpukit/score/cpu/i386/rtems/score/cpu.h
+++ b/cpukit/score/cpu/i386/rtems/score/cpu.h
@@ -128,6 +128,17 @@ extern "C" {
 
 #define CPU_PER_CPU_CONTROL_SIZE 0
 
+#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
+#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
+#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
+#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
+#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
+#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
+
+#ifdef RTEMS_SMP
+  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
+#endif
+
 /* structures */
 
 #ifndef ASM
@@ -147,11 +158,19 @@ typedef struct {
   uint32_t    ebx;      /* extended bx register                      */
   uint32_t    esi;      /* extended source index register            */
   uint32_t    edi;      /* extended destination index flags register */
+#ifdef RTEMS_SMP
+  volatile bool is_executing;
+#endif
 }   Context_Control;
 
 #define _CPU_Context_Get_SP( _context ) \
   (_context)->esp
 
+#ifdef RTEMS_SMP
+  #define _CPU_Context_Get_is_executing( _context ) \
+    (_context)->is_executing
+#endif
+
 /*
  *  FP context save area for the i387 numeric coprocessors.
  */
@@ -435,6 +454,13 @@ uint32_t   _CPU_ISR_Get_level( void );
  */
 
 
+#ifdef RTEMS_SMP
+  #define _I386_Context_Initialize_is_executing( _the_context ) \
+    (_the_context)->is_executing = false
+#else
+  #define _I386_Context_Initialize_is_executing( _the_context )
+#endif
+
 #define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
                                    _isr, _entry_point, _is_fp, _tls_area ) \
   do { \
@@ -449,6 +475,7 @@ uint32_t   _CPU_ISR_Get_level( void );
     *((proc_ptr *)(_stack)) = (_entry_point); \
     (_the_context)->ebp     = (void *) 0; \
     (_the_context)->esp     = (void *) _stack; \
+    _I386_Context_Initialize_is_executing( _the_context ); \
   } while (0)
 
 #define _CPU_Context_Restart_self( _the_context ) \
diff --git a/cpukit/score/cpu/no_cpu/rtems/score/cpu.h b/cpukit/score/cpu/no_cpu/rtems/score/cpu.h
index fbf207a..739a6a8 100644
--- a/cpukit/score/cpu/no_cpu/rtems/score/cpu.h
+++ b/cpukit/score/cpu/no_cpu/rtems/score/cpu.h
@@ -574,6 +574,18 @@ typedef struct {
      * is the stack pointer.
      */
     uint32_t   stack_pointer;
+
+#ifdef RTEMS_SMP
+    /**
+     * @brief On SMP configurations the thread context must contain a boolean
+     * indicator if this context is executing on a processor.
+     *
+     * This field must be updated during a context switch.  The context switch
+     * to the heir must wait until the heir context indicates that it is no
+     * longer executing on a processor.
+     */
+    volatile bool is_executing;
+#endif
 } Context_Control;
 
 /**
@@ -1582,6 +1594,12 @@ register struct Per_CPU_Control *_CPU_Per_CPU_current asm( "rX" );
   {
     __asm__ volatile ( "" : : : "memory" );
   }
+
+  /**
+   * @brief Macro to return the is executing field of the thread context.
+   */
+  #define _CPU_Context_Get_is_executing( _context ) \
+    ( ( _context )->is_executing )
 #endif
 
 #ifdef __cplusplus
diff --git a/cpukit/score/cpu/powerpc/cpu.c b/cpukit/score/cpu/powerpc/cpu.c
index 3c699f9..53b4eaa 100644
--- a/cpukit/score/cpu/powerpc/cpu.c
+++ b/cpukit/score/cpu/powerpc/cpu.c
@@ -53,6 +53,10 @@ PPC_ASSERT_OFFSET(gpr30, GPR30);
 PPC_ASSERT_OFFSET(gpr31, GPR31);
 PPC_ASSERT_OFFSET(gpr2, GPR2);
 
+#ifdef RTEMS_SMP
+  PPC_ASSERT_OFFSET(is_executing, IS_EXECUTING);
+#endif
+
 RTEMS_STATIC_ASSERT(
   sizeof(Context_Control) % PPC_DEFAULT_CACHE_LINE_SIZE == 0,
   ppc_context_size
diff --git a/cpukit/score/cpu/powerpc/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/rtems/score/cpu.h
index 3a51b31..18a6770 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/cpu.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/cpu.h
@@ -302,6 +302,9 @@ typedef struct {
   PPC_GPR_TYPE gpr30;
   PPC_GPR_TYPE gpr31;
   uint32_t gpr2;
+  #ifdef RTEMS_SMP
+    volatile bool is_executing;
+  #endif
   #ifdef __ALTIVEC__
     /*
      * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
@@ -327,7 +330,7 @@ typedef struct {
   ];
 } Context_Control;
 
-static inline ppc_context *ppc_get_context( Context_Control *context )
+static inline ppc_context *ppc_get_context( const Context_Control *context )
 {
   uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
   uintptr_t mask = clsz - 1;
@@ -338,6 +341,11 @@ static inline ppc_context *ppc_get_context( Context_Control *context )
 
 #define _CPU_Context_Get_SP( _context ) \
   ppc_get_context(_context)->gpr1
+
+#ifdef RTEMS_SMP
+  #define _CPU_Context_Get_is_executing( _context ) \
+    ppc_get_context(_context)->is_executing
+#endif
 #endif /* ASM */
 
 #define PPC_CONTEXT_OFFSET_GPR1 32
@@ -368,6 +376,10 @@ static inline ppc_context *ppc_get_context( Context_Control *context )
 #define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
 #define PPC_CONTEXT_OFFSET_GPR2 PPC_CONTEXT_GPR_OFFSET( 32 )
 
+#ifdef RTEMS_SMP
+  #define PPC_CONTEXT_OFFSET_IS_EXECUTING (PPC_CONTEXT_GPR_OFFSET( 32 ) + 4)
+#endif
+
 #ifndef ASM
 typedef struct {
     /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
diff --git a/cpukit/score/cpu/sparc/cpu.c b/cpukit/score/cpu/sparc/cpu.c
index 6c124db..d05c511 100644
--- a/cpukit/score/cpu/sparc/cpu.c
+++ b/cpukit/score/cpu/sparc/cpu.c
@@ -67,6 +67,10 @@ SPARC_ASSERT_OFFSET(o7, O7);
 SPARC_ASSERT_OFFSET(psr, PSR);
 SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
 
+#if defined(RTEMS_SMP)
+SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
+#endif
+
 /*
  *  This initializes the set of opcodes placed in each trap
  *  table entry.  The routine which installs a handler is responsible
@@ -326,6 +330,10 @@ void _CPU_Context_Initialize(
    */
     the_context->isr_dispatch_disable = 0;
 
+#if defined(RTEMS_SMP)
+  the_context->is_executing = false;
+#endif
+
   if ( tls_area != NULL ) {
     void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );
 
diff --git a/cpukit/score/cpu/sparc/rtems/score/cpu.h b/cpukit/score/cpu/sparc/rtems/score/cpu.h
index 50da44c..7bcdbd9 100644
--- a/cpukit/score/cpu/sparc/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc/rtems/score/cpu.h
@@ -473,6 +473,10 @@ typedef struct {
    * SPARC CPU models at high interrupt rates.
    */
   uint32_t   isr_dispatch_disable;
+
+#if defined(RTEMS_SMP)
+  volatile bool is_executing;
+#endif
 } Context_Control;
 
 /**
@@ -483,6 +487,11 @@ typedef struct {
 #define _CPU_Context_Get_SP( _context ) \
   (_context)->o6_sp
 
+#ifdef RTEMS_SMP
+  #define _CPU_Context_Get_is_executing( _context ) \
+    (_context)->is_executing
+#endif
+
 #endif /* ASM */
 
 /*
@@ -538,6 +547,10 @@ typedef struct {
 /** This macro defines an offset into the context for use in assembly. */
 #define ISR_DISPATCH_DISABLE_STACK_OFFSET 0x54
 
+#if defined(RTEMS_SMP)
+  #define SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x58
+#endif
+
 /** This defines the size of the context area for use in assembly. */
 #define CONTEXT_CONTROL_SIZE 0x68
 
diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h
index 7f063ea..e232674 100644
--- a/cpukit/score/include/rtems/score/percpu.h
+++ b/cpukit/score/include/rtems/score/percpu.h
@@ -56,6 +56,8 @@ extern "C" {
 typedef struct Thread_Control_struct Thread_Control;
 #endif
 
+struct Scheduler_Context;
+
 /**
  *  @defgroup PerCPU RTEMS Per CPU Information
  *
@@ -268,13 +270,46 @@ typedef struct Per_CPU_Control {
    */
   volatile uint32_t thread_dispatch_disable_level;
 
-  /** This is set to true when this CPU needs to run the dispatcher. */
+  /**
+   * @brief This is set to true when this processor needs to run the
+   * dispatcher.
+   *
+   * It is volatile since interrupts may alter this flag.
+   *
+   * This field is not protected by a lock.  There are two writers after
+   * multitasking start.  The scheduler owning this processor sets this
+   * indicator to true, after it updated the heir field.  This processor sets
+   * this indicator to false, before it reads the heir.  This field is used in
+   * combination with the heir field.
+   *
+   * @see _Thread_Get_heir_and_make_it_executing().
+   */
   volatile bool dispatch_necessary;
 
-  /** This is the thread executing on this CPU. */
+  /**
+   * @brief This is the thread executing on this processor.
+   *
+   * This field is not protected by a lock.  The only writer is this processor.
+   *
+   * On SMP configurations a thread may be registered as executing on more than
+   * one processor in case a thread migration is in progress.  On SMP
+   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
+   * a thread context is executing on a processor.
+   */
   Thread_Control *executing;
 
-  /** This is the heir thread for this this CPU. */
+  /**
+   * @brief This is the heir thread for this processor.
+   *
+   * This field is not protected by a lock.  The only writer after multitasking
+   * start is the scheduler owning this processor.  This processor will set the
+   * dispatch necessary indicator to false, before it reads the heir.  This
+   * field is used in combination with the dispatch necessary indicator.
+   *
+   * A thread can be a heir on at most one processor in the system.
+   *
+   * @see _Thread_Get_heir_and_make_it_executing().
+   */
   Thread_Control *heir;
 
   /** This is the time of the last context switch on this CPU. */
@@ -282,11 +317,12 @@ typedef struct Per_CPU_Control {
 
   #if defined( RTEMS_SMP )
     /**
-     * @brief This lock protects the dispatch_necessary, executing, heir and
-     * message fields.
+     * @brief This lock protects some parts of the low-level thread dispatching.
      *
      * We must use a ticket lock here since we cannot transport a local context
      * through the context switch.
+     *
+     * @see _Thread_Dispatch().
      */
     SMP_ticket_lock_Control Lock;
 
@@ -310,6 +346,11 @@ typedef struct Per_CPU_Control {
     Atomic_Ulong message;
 
     /**
+     * @brief The scheduler context of the scheduler owning this processor.
+     */
+    const struct Scheduler_Context *scheduler_context;
+
+    /**
      * @brief Indicates the current state of the CPU.
      *
      * This field is protected by the _Per_CPU_State_lock lock.
diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index 9002ef8..2a1c433 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -148,7 +148,7 @@ typedef struct {
  * The scheduler context of a particular scheduler implementation must place
  * this structure at the begin of its context structure.
  */
-typedef struct {
+typedef struct Scheduler_Context {
 #if defined(RTEMS_SMP)
   /**
    * @brief Count of processors owned by this scheduler instance.
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 6fad4e2..cb73d5e 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -390,29 +390,25 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
 #endif
 }
 
-RTEMS_INLINE_ROUTINE bool _Scheduler_Set(
+RTEMS_INLINE_ROUTINE void _Scheduler_Set(
   const Scheduler_Control *scheduler,
   Thread_Control          *the_thread
 )
 {
-  bool ok;
-
-  if ( _States_Is_dormant( the_thread->current_state ) ) {
 #if defined(RTEMS_SMP)
+  const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread );
+
+  if ( current_scheduler != scheduler ) {
+    _Thread_Set_state( the_thread, STATES_MIGRATING );
     _Scheduler_Free( _Scheduler_Get( the_thread ), the_thread );
     the_thread->scheduler = scheduler;
     _Scheduler_Allocate( scheduler, the_thread );
     _Scheduler_Update( scheduler, the_thread );
+    _Thread_Clear_state( the_thread, STATES_MIGRATING );
+  }
 #else
-    (void) scheduler;
+  (void) scheduler;
 #endif
-
-    ok = true;
-  } else {
-    ok = false;
-  }
-
-  return ok;
 }
 
 RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
@@ -448,9 +444,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
     ok = ok && !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
   }
 
-  if ( ok ) {
-    ok = _Scheduler_Set( scheduler, the_thread );
-  }
+  _Scheduler_Set( scheduler, the_thread );
 
   return ok;
 }
diff --git a/cpukit/score/include/rtems/score/schedulersmp.h b/cpukit/score/include/rtems/score/schedulersmp.h
index 8f5a390..778a1fb 100644
--- a/cpukit/score/include/rtems/score/schedulersmp.h
+++ b/cpukit/score/include/rtems/score/schedulersmp.h
@@ -24,9 +24,7 @@
 #define _RTEMS_SCORE_SCHEDULERSMP_H
 
 #include <rtems/score/chain.h>
-#include <rtems/score/percpu.h>
-#include <rtems/score/prioritybitmap.h>
-#include <rtems/score/thread.h>
+#include <rtems/score/scheduler.h>
 
 #ifdef __cplusplus
 extern "C" {
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index c3e11e6..69222c2 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -24,9 +24,9 @@
 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
 
 #include <rtems/score/schedulersmp.h>
-#include <rtems/score/schedulersimpleimpl.h>
+#include <rtems/score/assert.h>
 #include <rtems/score/chainimpl.h>
-#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulersimpleimpl.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -64,47 +64,74 @@ static inline void _Scheduler_SMP_Initialize(
   _Chain_Initialize_empty( &self->Scheduled );
 }
 
+static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
+  const Scheduler_SMP_Context *self,
+  const Per_CPU_Control *cpu
+)
+{
+  return cpu->scheduler_context == &self->Base;
+}
+
+static inline void _Scheduler_SMP_Update_heir(
+  Per_CPU_Control *cpu_self,
+  Per_CPU_Control *cpu_for_heir,
+  Thread_Control *heir
+)
+{
+  cpu_for_heir->heir = heir;
+
+  /*
+   * It is critical that we first update the heir and then the dispatch
+   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
+   * update.
+   */
+  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
+
+  /*
+   * Only update the dispatch necessary indicator if not already set to
+   * avoid superfluous inter-processor interrupts.
+   */
+  if ( !cpu_for_heir->dispatch_necessary ) {
+    cpu_for_heir->dispatch_necessary = true;
+
+    if ( cpu_for_heir != cpu_self ) {
+      _Per_CPU_Send_interrupt( cpu_for_heir );
+    }
+  }
+}
+
 static inline void _Scheduler_SMP_Allocate_processor(
+  Scheduler_SMP_Context *self,
   Thread_Control *scheduled,
   Thread_Control *victim
 )
 {
   Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
   Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
+  Per_CPU_Control *cpu_self = _Per_CPU_Get();
   Thread_Control *heir;
 
   scheduled->is_scheduled = true;
   victim->is_scheduled = false;
 
-  _Per_CPU_Acquire( cpu_of_scheduled );
+  _Assert( _ISR_Get_level() != 0 );
 
-  if ( scheduled->is_executing ) {
-    heir = cpu_of_scheduled->heir;
-    cpu_of_scheduled->heir = scheduled;
+  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
+    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
+      heir = cpu_of_scheduled->heir;
+      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
+    } else {
+      /* We have to force a migration to our processor set */
+      _Assert( scheduled->debug_real_cpu->heir != scheduled );
+      heir = scheduled;
+    }
   } else {
     heir = scheduled;
   }
 
-  _Per_CPU_Release( cpu_of_scheduled );
-
   if ( heir != victim ) {
-    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
-
     _Thread_Set_CPU( heir, cpu_of_victim );
-
-    cpu_of_victim->heir = heir;
-
-    /*
-     * It is critical that we first update the heir and then the dispatch
-     * necessary so that _Thread_Dispatch() cannot miss an update.
-     */
-    _Atomic_Fence( ATOMIC_ORDER_RELEASE );
-
-    cpu_of_victim->dispatch_necessary = true;
-
-    if ( cpu_of_victim != cpu_of_executing ) {
-      _Per_CPU_Send_interrupt( cpu_of_victim );
-    }
+    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
   }
 }
 
@@ -148,7 +175,7 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
       highest_ready != NULL
         && !( *order )( &thread->Object.Node, &highest_ready->Object.Node )
     ) {
-      _Scheduler_SMP_Allocate_processor( highest_ready, thread );
+      _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
 
       ( *insert_ready )( self, thread );
       ( *move_from_ready_to_scheduled )( self, highest_ready );
@@ -168,7 +195,7 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
       lowest_scheduled != NULL
         && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node )
     ) {
-      _Scheduler_SMP_Allocate_processor( thread, lowest_scheduled );
+      _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
 
       ( *insert_scheduled )( self, thread );
       ( *move_from_scheduled_to_ready )( self, lowest_scheduled );
@@ -187,7 +214,7 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
 {
   Thread_Control *highest_ready = ( *get_highest_ready )( self );
 
-  _Scheduler_SMP_Allocate_processor( highest_ready, victim );
+  _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
 
   ( *move_from_ready_to_scheduled )( self, highest_ready );
 }
diff --git a/cpukit/score/include/rtems/score/statesimpl.h b/cpukit/score/include/rtems/score/statesimpl.h
index 842d108..0dbf0db 100644
--- a/cpukit/score/include/rtems/score/statesimpl.h
+++ b/cpukit/score/include/rtems/score/statesimpl.h
@@ -82,6 +82,8 @@ extern "C" {
 #define STATES_WAITING_FOR_TERMINATION         0x100000
 /** This macro corresponds to a task being a zombie. */
 #define STATES_ZOMBIE                          0x200000
+/** This macro corresponds to a task migration to another scheduler. */
+#define STATES_MIGRATING                       0x400000
 
 /** This macro corresponds to a task which is in an interruptible
  *  blocking state.
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 90df3a7..248ae96 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -504,20 +504,6 @@ struct Thread_Control_struct {
   bool                                  is_in_the_air;
 
   /**
-   * @brief This field is true if the thread is executing.
-   *
-   * A thread is executing if it executes on a processor.  An executing thread
-   * executes on exactly one processor.  There are exactly processor count
-   * executing threads in the system.  An executing thread may have a heir
-   * thread and thread dispatching is necessary.  On SMP a thread dispatch on a
-   * remote processor needs help from an inter-processor interrupt, thus it
-   * will take some time to complete the state change.  A lot of things can
-   * happen in the meantime.  This field is volatile since it is polled in
-   * _Thread_Kill_zombies().
-   */
-  volatile bool                         is_executing;
-
-  /**
    * @brief The scheduler of this thread.
    */
   const struct Scheduler_Control       *scheduler;
@@ -548,7 +534,18 @@ struct Thread_Control_struct {
   void                                 *scheduler_info;
 
 #ifdef RTEMS_SMP
+  /**
+   * @brief The processor assigned by the scheduler.
+   */
   Per_CPU_Control                      *cpu;
+
+#ifdef RTEMS_DEBUG
+  /**
+   * @brief The processor on which this thread executed the last time or is
+   * executing.
+   */
+  Per_CPU_Control                      *debug_real_cpu;
+#endif
 #endif
 
   /** This field contains information about the starting state of
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 4efc85d..2be5cc5 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -454,6 +454,22 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
   return ( the_thread == _Thread_Executing );
 }
 
+#if defined(RTEMS_SMP)
+/**
+ * @brief Returns @true in case the thread executes currently on some processor
+ * in the system, otherwise @a false.
+ *
+ * Do not confuse this with _Thread_Is_executing() which checks only the
+ * current processor.
+ */
+RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
+  const Thread_Control *the_thread
+)
+{
+  return _CPU_Context_Get_is_executing( &the_thread->Registers );
+}
+#endif
+
 /**
  * This function returns true if the_thread is the heir
  * thread, and false otherwise.
@@ -491,7 +507,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
 
   _Giant_Release();
 
-  _Per_CPU_ISR_disable_and_acquire( _Per_CPU_Get(), level );
+  _ISR_Disable_without_giant( level );
   ( void ) level;
 #endif
 
@@ -590,7 +606,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Request_dispatch_if_executing(
 )
 {
 #if defined(RTEMS_SMP)
-  if ( thread->is_executing ) {
+  if ( _Thread_Is_executing_on_a_processor( thread ) ) {
     const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
     Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
 
@@ -611,7 +627,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
     _Thread_Dispatch_necessary = true;
   } else {
 #if defined(RTEMS_SMP)
-    if ( thread->is_executing ) {
+    if ( _Thread_Is_executing_on_a_processor( thread ) ) {
       const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
       Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
 
@@ -624,6 +640,39 @@ RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
   }
 }
 
+/**
+ * @brief Gets the heir of the processor and makes it executing.
+ *
+ * The thread dispatch necessary indicator is cleared as a side-effect.
+ *
+ * @return The heir thread.
+ *
+ * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
+ * _Scheduler_SMP_Update_heir().
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
+  Per_CPU_Control *cpu_self
+)
+{
+  Thread_Control *heir;
+
+  cpu_self->dispatch_necessary = false;
+
+#if defined( RTEMS_SMP )
+  /*
+   * It is critical that we first update the dispatch necessary and then the
+   * read the heir so that we don't miss an update by
+   * _Scheduler_SMP_Update_heir().
+   */
+  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
+#endif
+
+  heir = cpu_self->heir;
+  cpu_self->executing = heir;
+
+  return heir;
+}
+
 RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
   Thread_Control *executing,
   Timestamp_Control *time_of_last_context_switch
@@ -736,6 +785,19 @@ RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
   return ( life_state & THREAD_LIFE_RESTARTING_TERMINTING ) != 0;
 }
 
+RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
+  Thread_Control  *the_thread,
+  Per_CPU_Control *cpu
+)
+{
+#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
+  the_thread->debug_real_cpu = cpu;
+#else
+  (void) the_thread;
+  (void) cpu;
+#endif
+}
+
 #if !defined(__DYNAMIC_REENT__)
 /**
  * This routine returns the C library re-enterant pointer.
diff --git a/cpukit/score/include/rtems/score/userextimpl.h b/cpukit/score/include/rtems/score/userextimpl.h
index 04808e1..19055f9 100644
--- a/cpukit/score/include/rtems/score/userextimpl.h
+++ b/cpukit/score/include/rtems/score/userextimpl.h
@@ -216,13 +216,21 @@ static inline void _User_extensions_Thread_switch(
   const Chain_Node    *tail = _Chain_Immutable_tail( chain );
   const Chain_Node    *node = _Chain_Immutable_first( chain );
 
-  while ( node != tail ) {
-    const User_extensions_Switch_control *extension =
-      (const User_extensions_Switch_control *) node;
+  if ( node != tail ) {
+    Per_CPU_Control *cpu_self = _Per_CPU_Get();
 
-    (*extension->thread_switch)( executing, heir );
+    _Per_CPU_Acquire( cpu_self );
 
-    node = _Chain_Immutable_next( node );
+    while ( node != tail ) {
+      const User_extensions_Switch_control *extension =
+        (const User_extensions_Switch_control *) node;
+
+      (*extension->thread_switch)( executing, heir );
+
+      node = _Chain_Immutable_next( node );
+    }
+
+    _Per_CPU_Release( cpu_self );
   }
 }
 
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index 519b152..2f86c6e 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -58,7 +58,10 @@ static void _SMP_Start_processors( uint32_t cpu_count )
     cpu->started = started;
 
     if ( started ) {
-      ++assignment->scheduler->context->processor_count;
+      Scheduler_Context *scheduler_context = assignment->scheduler->context;
+
+      ++scheduler_context->processor_count;
+      cpu->scheduler_context = scheduler_context;
     }
   }
 }
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index f1c6cfd..982bbc4 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -64,10 +64,14 @@ void _Thread_Dispatch( void )
 {
   Per_CPU_Control  *cpu_self;
   Thread_Control   *executing;
-  Thread_Control   *heir;
   ISR_Level         level;
 
 #if defined( RTEMS_SMP )
+  /*
+   * On SMP the complete context switch must be atomic with respect to one
+   * processor.  See also _Thread_Handler() since _Context_switch() may branch
+   * to this function.
+   */
   _ISR_Disable_without_giant( level );
 #endif
 
@@ -76,45 +80,21 @@ void _Thread_Dispatch( void )
   _Profiling_Thread_dispatch_disable( cpu_self, 0 );
   cpu_self->thread_dispatch_disable_level = 1;
 
-#if defined( RTEMS_SMP )
-  _ISR_Enable_without_giant( level );
-#endif
-
   /*
    *  Now determine if we need to perform a dispatch on the current CPU.
    */
   executing = cpu_self->executing;
-  _Per_CPU_ISR_disable_and_acquire( cpu_self, level );
-#if defined( RTEMS_SMP )
-  /*
-   * On SMP the complete context switch must be atomic with respect to one
-   * processor.  The scheduler must obtain the per-CPU lock to check if a
-   * thread is executing and to update the heir.  This ensures that a thread
-   * cannot execute on more than one processor at a time.  See also
-   * _Thread_Handler() since _Context_switch() may branch to this function.
-   */
-  if ( cpu_self->dispatch_necessary ) {
-#else
-  while ( cpu_self->dispatch_necessary ) {
-#endif
-    cpu_self->dispatch_necessary = false;
 
-#if defined( RTEMS_SMP )
-    /*
-     * It is critical that we first update the dispatch necessary and then the
-     * read the heir so that we don't miss an update by
-     * _Scheduler_SMP_Allocate_processor().
-     */
-    _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
+#if !defined( RTEMS_SMP )
+  _ISR_Disable( level );
 #endif
 
-    heir = cpu_self->heir;
-    cpu_self->executing = heir;
-
 #if defined( RTEMS_SMP )
-    executing->is_executing = false;
-    heir->is_executing = true;
+  if ( cpu_self->dispatch_necessary ) {
+#else
+  while ( cpu_self->dispatch_necessary ) {
 #endif
+    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
 
     /*
      *  When the heir and executing are the same, then we are being
@@ -207,6 +187,8 @@ void _Thread_Dispatch( void )
      */
     cpu_self = _Per_CPU_Get();
 
+    _Thread_Debug_set_real_processor( executing, cpu_self );
+
 #if !defined( RTEMS_SMP )
     _ISR_Disable( level );
 #endif
@@ -217,7 +199,7 @@ post_switch:
   cpu_self->thread_dispatch_disable_level = 0;
   _Profiling_Thread_dispatch_enable( cpu_self, 0 );
 
-  _Per_CPU_Release_and_ISR_enable( cpu_self, level );
+  _ISR_Enable_without_giant( level );
 
   _Thread_Run_post_switch_actions( executing );
 }
diff --git a/cpukit/score/src/threadhandler.c b/cpukit/score/src/threadhandler.c
index 229e74f..5f6623f 100644
--- a/cpukit/score/src/threadhandler.c
+++ b/cpukit/score/src/threadhandler.c
@@ -153,11 +153,11 @@ void _Thread_Handler( void )
       _Assert( cpu_self->thread_dispatch_disable_level == 1 );
       _Assert( _ISR_Get_level() != 0 );
 
+      _Thread_Debug_set_real_processor( executing, cpu_self );
+
       cpu_self->thread_dispatch_disable_level = 0;
       _Profiling_Thread_dispatch_enable( cpu_self, 0 );
 
-      _Per_CPU_Release( cpu_self );
-
       level = executing->Start.isr_level;
       _ISR_Set_level( level);
 
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index fb3d6c8..1a03b0d 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -52,6 +52,7 @@ bool _Thread_Initialize(
   bool                     extension_status;
   size_t                   i;
   bool                     scheduler_allocated = false;
+  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );
 
 #if defined( RTEMS_SMP )
   if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
@@ -182,12 +183,13 @@ bool _Thread_Initialize(
 #if defined(RTEMS_SMP)
   the_thread->is_scheduled            = false;
   the_thread->is_in_the_air           = false;
-  the_thread->is_executing            = false;
   the_thread->scheduler               = scheduler;
 #endif
 
+  _Thread_Debug_set_real_processor( the_thread, cpu );
+
   /* Initialize the CPU for the non-SMP schedulers */
-  _Thread_Set_CPU( the_thread, _Per_CPU_Get_by_index( 0 ) );
+  _Thread_Set_CPU( the_thread, cpu );
 
   the_thread->current_state           = STATES_DORMANT;
   the_thread->Wait.queue              = NULL;
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 422ee33..9cf2a85 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -107,7 +107,7 @@ static void _Thread_Wait_for_execution_stop( Thread_Control *the_thread )
    * in case the thread termination sequence is interrupted by a slow interrupt
    * service on a remote processor.
    */
-  while (the_thread->is_executing) {
+  while ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
     /* Wait */
   }
 #else
diff --git a/cpukit/score/src/threadstartmultitasking.c b/cpukit/score/src/threadstartmultitasking.c
index 78a438f..c1c8725 100644
--- a/cpukit/score/src/threadstartmultitasking.c
+++ b/cpukit/score/src/threadstartmultitasking.c
@@ -30,22 +30,12 @@ void _Thread_Start_multitasking( void )
 
   /*
    * Threads begin execution in the _Thread_Handler() function.   This
-   * function will set the thread dispatch disable level to zero and calls
-   * _Per_CPU_Release().
+   * function will set the thread dispatch disable level to zero.
    */
-  _Per_CPU_Acquire( cpu_self );
   cpu_self->thread_dispatch_disable_level = 1;
 #endif
 
-  heir = cpu_self->heir;
-
-#if defined(RTEMS_SMP)
-  cpu_self->executing->is_executing = false;
-  heir->is_executing = true;
-#endif
-
-  cpu_self->dispatch_necessary = false;
-  cpu_self->executing = heir;
+  heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
 
    /*
     * Get the init task(s) running.
diff --git a/doc/user/smp.t b/doc/user/smp.t
index 0ad21f5..0751aba 100644
--- a/doc/user/smp.t
+++ b/doc/user/smp.t
@@ -211,6 +211,52 @@ affinity. Although the behavior is scheduler specific, if the scheduler
 does not support affinity, it is likely to ignore all attempts to set
 affinity.
 
+ at subsection Task Migration
+
+ at cindex task migration
+ at cindex thread migration
+
+With more than one processor in the system tasks can migrate from one processor
+to another.  There are three reasons why tasks migrate in RTEMS.
+
+ at itemize @bullet
+ at item The scheduler changes explicitly via @code{rtems_task_set_scheduler()} or
+similar directives.
+ at item The task resumes execution after a blocking operation.  On a priority
+based scheduler it will evict the lowest priority task currently assigned to a
+processor in the processor set managed by the scheduler instance.
+ at item The task moves temporarily to another scheduler instance due to locking
+protocols like @cite{Migratory Priority Inheritance} or the
+ at cite{Multiprocessor Resource Sharing Protocol}.
+ at end itemize
+
+Task migration should be avoided so that the working set of a task can stay on
+the most local cache level.
+
+The current implementation of task migration in RTEMS has some implications
+with respect to the interrupt latency.  It is crucial to preserve the system
+invariant that a task can execute on at most one processor in the system at a
+time.  This is accomplished with a boolean indicator in the task context.  The
+processor architecture specific low-level task context switch code will mark
+that a task context is no longer executing and waits that the heir context
+stopped execution before it restores the heir context and resumes execution of
+the heir task.  So there is one point in time in which a processor is without a
+task.  This is essential to avoid cyclic dependencies in case multiple tasks
+migrate at once.  Otherwise some supervising entity is necessary to prevent
+life-locks.  Such a global supervisor would lead to scalability problems so
+this approach is not used.  Currently the thread dispatch is performed with
+interrupts disabled.  So in case the heir task is currently executing on
+another processor then this prolongs the time of disabled interrupts since one
+processor has to wait for another processor to make progress.
+
+It is difficult to avoid this issue with the interrupt latency since interrupts
+normally store the context of the interrupted task on its stack.  In case a
+task is marked as not executing we must not use its task stack to store such an
+interrupt context.  We cannot use the heir stack before it stopped execution on
+another processor.  So if we enable interrupts during this transition we have
+to provide an alternative task independent stack for this time frame.  This
+issue needs further investigation.
+
 @subsection Critical Section Techniques and SMP
 
 As discussed earlier, SMP systems have opportunities for true parallelism
diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
index d82503a..36fb156 100644
--- a/testsuites/smptests/Makefile.am
+++ b/testsuites/smptests/Makefile.am
@@ -22,6 +22,7 @@ SUBDIRS += smpipi01
 SUBDIRS += smpload01
 SUBDIRS += smplock01
 SUBDIRS += smpmigration01
+SUBDIRS += smpmigration02
 SUBDIRS += smpscheduler01
 SUBDIRS += smpscheduler02
 SUBDIRS += smpsignal01
diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
index 27f7f54..0b9b4c6 100644
--- a/testsuites/smptests/configure.ac
+++ b/testsuites/smptests/configure.ac
@@ -77,6 +77,7 @@ smpipi01/Makefile
 smpload01/Makefile
 smplock01/Makefile
 smpmigration01/Makefile
+smpmigration02/Makefile
 smppsxaffinity01/Makefile
 smppsxaffinity02/Makefile
 smppsxsignal01/Makefile
diff --git a/testsuites/smptests/smpmigration02/Makefile.am b/testsuites/smptests/smpmigration02/Makefile.am
new file mode 100644
index 0000000..8dcd8ad
--- /dev/null
+++ b/testsuites/smptests/smpmigration02/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpmigration02
+smpmigration02_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpmigration02.scn smpmigration02.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpmigration02_OBJECTS)
+LINK_LIBS = $(smpmigration02_LDLIBS)
+
+smpmigration02$(EXEEXT): $(smpmigration02_OBJECTS) $(smpmigration02_DEPENDENCIES)
+	@rm -f smpmigration02$(EXEEXT)
+	$(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpmigration02/init.c b/testsuites/smptests/smpmigration02/init.c
new file mode 100644
index 0000000..7cf4651
--- /dev/null
+++ b/testsuites/smptests/smpmigration02/init.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
+ *
+ *  embedded brains GmbH
+ *  Dornierstr. 4
+ *  82178 Puchheim
+ *  Germany
+ *  <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/libcsupport.h>
+
+#include "tmacros.h"
+
+const char rtems_test_name[] = "SMPMIGRATION 2";
+
+#define CPU_COUNT 32
+
+#define TASK_COUNT (CPU_COUNT + 1)
+
+#define PRIO_LOW 3
+
+#define PRIO_HIGH 2
+
+typedef struct {
+  uint32_t value;
+  uint32_t cache_line_separation[31];
+} test_counter;
+
+typedef struct {
+  test_counter counters[TASK_COUNT];
+  rtems_id scheduler_ids[CPU_COUNT];
+  rtems_id task_ids[TASK_COUNT];
+} test_context;
+
+static test_context test_instance;
+
+static void task(rtems_task_argument arg)
+{
+  test_context *ctx = &test_instance;
+  rtems_status_code sc;
+  uint32_t cpu_count = rtems_get_processor_count();
+  uint32_t cpu_index = rtems_get_current_processor();
+
+  while (true) {
+    cpu_index = (cpu_index + 1) % cpu_count;
+
+    sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index]);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    ++ctx->counters[arg].value;
+
+    rtems_test_assert(cpu_index == rtems_get_current_processor());
+  }
+}
+
+static void test(void)
+{
+  test_context *ctx = &test_instance;
+  rtems_status_code sc;
+  uint32_t cpu_count = rtems_get_processor_count();
+  uint32_t cpu_index;
+  uint32_t task_count = cpu_count + 1;
+  uint32_t task_index;
+
+  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
+    sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+  }
+
+  for (task_index = 0; task_index < task_count; ++task_index) {
+    rtems_id task_id;
+
+    sc = rtems_task_create(
+      rtems_build_name('T', 'A', 'S', 'K'),
+      task_index > 0 ? PRIO_LOW : PRIO_HIGH,
+      RTEMS_MINIMUM_STACK_SIZE,
+      RTEMS_DEFAULT_MODES,
+      RTEMS_DEFAULT_ATTRIBUTES,
+      &task_id
+    );
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    sc = rtems_task_set_scheduler(task_id, ctx->scheduler_ids[task_index % cpu_count]);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    sc = rtems_task_start(task_id, task, task_index);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    ctx->task_ids[task_index] = task_id;
+  }
+
+  sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  for (task_index = 0; task_index < task_count; ++task_index) {
+    printf(
+      "task %" PRIu32 " counter: %" PRIu32 "\n",
+      task_index,
+      ctx->counters[task_index].value
+    );
+
+    sc = rtems_task_delete(ctx->task_ids[task_index]);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+  }
+}
+
+static void Init(rtems_task_argument arg)
+{
+  rtems_resource_snapshot snapshot;
+
+  TEST_BEGIN();
+
+  rtems_resource_snapshot_take(&snapshot);
+
+  test();
+
+  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
+
+  TEST_END();
+  rtems_test_exit(0);
+}
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_SMP_APPLICATION
+
+#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
+
+#define CONFIGURE_SCHEDULER_SIMPLE_SMP
+
+#include <rtems/scheduler.h>
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(17);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(18);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(19);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(20);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(21);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(22);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(23);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(24);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(25);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(26);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(27);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(28);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(29);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(30);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(31);
+
+#define CONFIGURE_SCHEDULER_CONTROLS \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(17, 17), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(18, 18), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(19, 19), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(20, 20), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(21, 21), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(22, 22), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(23, 23), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(24, 24), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(25, 25), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(26, 26), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(27, 27), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(28, 28), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(29, 29), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(30, 30), \
+  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(31, 31)
+
+#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
+  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(17, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(18, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(19, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(20, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(21, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(22, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(23, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(24, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(25, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(26, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(27, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(28, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(29, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(30, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+  RTEMS_SCHEDULER_ASSIGN(31, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
+
+#define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpmigration02/smpmigration02.doc b/testsuites/smptests/smpmigration02/smpmigration02.doc
new file mode 100644
index 0000000..bfae205
--- /dev/null
+++ b/testsuites/smptests/smpmigration02/smpmigration02.doc
@@ -0,0 +1,12 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpmigration02
+
+directives:
+
+  - _Scheduler_SMP_Allocate_processor()
+  - _CPU_Context_switch()
+
+concepts:
+
+  - Ensure that forced thread migration works.
diff --git a/testsuites/smptests/smpmigration02/smpmigration02.scn b/testsuites/smptests/smpmigration02/smpmigration02.scn
new file mode 100644
index 0000000..c436e19
--- /dev/null
+++ b/testsuites/smptests/smpmigration02/smpmigration02.scn
@@ -0,0 +1,7 @@
+*** BEGIN OF TEST SMPMIGRATION 2 ***
+task 0 counter: 1137459
+task 1 counter: 1136714
+task 2 counter: 1136713
+task 3 counter: 1136712
+task 4 counter: 1136711
+*** END OF TEST SMPMIGRATION 2 ***
diff --git a/testsuites/smptests/smpscheduler02/init.c b/testsuites/smptests/smpscheduler02/init.c
index 1e6b6d5..5bfff0e 100644
--- a/testsuites/smptests/smpscheduler02/init.c
+++ b/testsuites/smptests/smpscheduler02/init.c
@@ -158,8 +158,8 @@ static void test(void)
   sc = rtems_task_start(task_id, task, 0);
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
-  sc = rtems_task_set_scheduler(task_id, scheduler_a_id);
-  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
+  sc = rtems_task_set_scheduler(task_id, scheduler_b_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
   sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
diff --git a/testsuites/smptests/smpswitchextension01/smpswitchextension01.scn b/testsuites/smptests/smpswitchextension01/smpswitchextension01.scn
index 9024fdb..64b9b6e 100644
--- a/testsuites/smptests/smpswitchextension01/smpswitchextension01.scn
+++ b/testsuites/smptests/smpswitchextension01/smpswitchextension01.scn
@@ -1,13 +1,13 @@
-*** TEST SMPSWITCHEXTENSION 1 ***
+*** BEGIN OF TEST SMPSWITCHEXTENSION 1 ***
 toggler 0
-        toggles 2146479
+        toggles 1555183
 toggler 1
-        toggles 2146477
+        toggles 1555182
 extension 0
-        context switches 2146478
+        context switches 1555185
 extension 1
-        context switches 2146481
+        context switches 1244705
 extension 2
-        context switches 2146482
-extension switches 718121
+        context switches 1554688
+extension switches 311649
 *** END OF TEST SMPSWITCHEXTENSION 1 ***
diff --git a/testsuites/sptests/spscheduler01/init.c b/testsuites/sptests/spscheduler01/init.c
index 30ea4ce..bcb656d 100644
--- a/testsuites/sptests/spscheduler01/init.c
+++ b/testsuites/sptests/spscheduler01/init.c
@@ -81,10 +81,10 @@ static void test_task_get_set_affinity(void)
   rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));
 
   sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
-  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
   sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
-  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
   sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset);
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -163,7 +163,7 @@ static void test_task_get_set_scheduler(void)
   rtems_test_assert(sc == RTEMS_INVALID_ID);
 
   sc = rtems_task_set_scheduler(self_id, scheduler_id);
-  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
   sc = rtems_task_create(
     rtems_build_name('T', 'A', 'S', 'K'),
@@ -188,7 +188,7 @@ static void test_task_get_set_scheduler(void)
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
   sc = rtems_task_set_scheduler(task_id, scheduler_id);
-  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 
   sc = rtems_task_delete(task_id);
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
diff --git a/testsuites/tmtests/tm26/task1.c b/testsuites/tmtests/tm26/task1.c
index 6b2572e..9685940 100644
--- a/testsuites/tmtests/tm26/task1.c
+++ b/testsuites/tmtests/tm26/task1.c
@@ -146,8 +146,6 @@ static void thread_disable_dispatch( void )
 
   self_cpu = _Per_CPU_Get();
   self_cpu->thread_dispatch_disable_level = 1;
-
-  _Per_CPU_Acquire( self_cpu );
 #else
   _Thread_Disable_dispatch();
 #endif




More information about the vc mailing list