[PATCH 12/30] score: Protect thread scheduler state changes
Sebastian Huber
sebastian.huber at embedded-brains.de
Mon Oct 31 08:51:43 UTC 2016
Update #2556.
---
cpukit/score/include/rtems/score/schedulerimpl.h | 31 +++++++++---
.../score/include/rtems/score/schedulersmpimpl.h | 57 +++++++++++++---------
2 files changed, 58 insertions(+), 30 deletions(-)
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 3108641..7c74765 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -21,6 +21,7 @@
#define _RTEMS_SCORE_SCHEDULERIMPL_H
#include <rtems/score/scheduler.h>
+#include <rtems/score/assert.h>
#include <rtems/score/cpusetimpl.h>
#include <rtems/score/priorityimpl.h>
#include <rtems/score/smpimpl.h>
@@ -867,6 +868,11 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
_Scheduler_Thread_state_valid_state_changes
[ the_thread->Scheduler.state ][ new_state ]
);
+ _Assert(
+ _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
+ || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
+ || !_System_state_Is_up( _System_state_Get() )
+ );
the_thread->Scheduler.state = new_state;
}
@@ -983,18 +989,23 @@ _Scheduler_Try_to_schedule_node(
Scheduler_Get_idle_thread get_idle_thread
)
{
- Scheduler_Try_to_schedule_action action;
- Thread_Control *owner;
- Thread_Control *user;
+ ISR_lock_Context lock_context;
+ Scheduler_Try_to_schedule_action action;
+ Thread_Control *owner;
+ Thread_Control *user;
action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
+ user = _Scheduler_Node_get_user( node );
+
+ _Thread_Scheduler_acquire_critical( user, &lock_context );
if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
+ _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
+ _Thread_Scheduler_release_critical( user, &lock_context );
return action;
}
owner = _Scheduler_Node_get_owner( node );
- user = _Scheduler_Node_get_user( node );
if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
@@ -1026,6 +1037,11 @@ _Scheduler_Try_to_schedule_node(
}
}
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
+ }
+
+ _Thread_Scheduler_release_critical( user, &lock_context );
return action;
}
@@ -1097,10 +1113,13 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
Scheduler_Get_idle_thread get_idle_thread
)
{
- Thread_Control *old_user;
- Thread_Control *new_user;
+ ISR_lock_Context lock_context;
+ Thread_Control *old_user;
+ Thread_Control *new_user;
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
_Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
_Assert( thread == _Scheduler_Node_get_user( node ) );
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index eced51b..e5423ff 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -526,22 +526,44 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
static inline void _Scheduler_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled,
- Scheduler_Node *victim,
+ Thread_Control *victim_thread,
Scheduler_SMP_Allocate_processor allocate_processor
)
{
Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
- Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
_Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
- _Scheduler_Thread_change_state(
- scheduled_thread,
- THREAD_SCHEDULER_SCHEDULED
- );
( *allocate_processor )( context, scheduled_thread, victim_thread );
}
+static inline Thread_Control *_Scheduler_SMP_Preempt(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled,
+ Scheduler_Node *victim,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Thread_Control *victim_thread;
+ ISR_lock_Context lock_context;
+
+ victim_thread = _Scheduler_Node_get_user( victim );
+ _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
+
+ _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
+ _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
+ _Thread_Scheduler_release_critical( victim_thread, &lock_context );
+
+ _Scheduler_SMP_Allocate_processor(
+ context,
+ scheduled,
+ victim_thread,
+ allocate_processor
+ );
+
+ return victim_thread;
+}
+
static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
Scheduler_Context *context,
Scheduler_Node *filter,
@@ -581,20 +603,10 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
);
if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
- Thread_Control *lowest_scheduled_user =
- _Scheduler_Node_get_user( lowest_scheduled );
+ Thread_Control *lowest_scheduled_user;
Thread_Control *idle;
- _Scheduler_SMP_Node_change_state(
- lowest_scheduled,
- SCHEDULER_SMP_NODE_READY
- );
- _Scheduler_Thread_change_state(
- lowest_scheduled_user,
- THREAD_SCHEDULER_READY
- );
-
- _Scheduler_SMP_Allocate_processor(
+ lowest_scheduled_user = _Scheduler_SMP_Preempt(
context,
node,
lowest_scheduled,
@@ -748,13 +760,10 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
);
if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
- Thread_Control *user = _Scheduler_Node_get_user( node );
+ Thread_Control *user;
Thread_Control *idle;
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
-
- _Scheduler_SMP_Allocate_processor(
+ user = _Scheduler_SMP_Preempt(
context,
highest_ready,
node,
@@ -836,7 +845,7 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
_Scheduler_SMP_Allocate_processor(
context,
highest_ready,
- victim,
+ _Scheduler_Node_get_user( victim ),
allocate_processor
);
--
1.8.4.5
More information about the devel
mailing list