[PATCH 20/27] score: Move _Scheduler_Block_node()
Sebastian Huber
sebastian.huber at embedded-brains.de
Mon Nov 15 17:12:52 UTC 2021
Move _Scheduler_Block_node() into _Scheduler_SMP_Block(). This simplifies the
code and makes it easier to review.
Update #4531.
---
cpukit/include/rtems/score/schedulerimpl.h | 56 ---------------
cpukit/include/rtems/score/schedulersmpimpl.h | 71 ++++++++++++-------
2 files changed, 46 insertions(+), 81 deletions(-)
diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h
index 12b6806402..0081b1904b 100644
--- a/cpukit/include/rtems/score/schedulerimpl.h
+++ b/cpukit/include/rtems/score/schedulerimpl.h
@@ -926,62 +926,6 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread_if_necessary
return idle;
}
-/**
- * @brief Blocks this scheduler node.
- *
- * @param[in, out] thread The thread which wants to get blocked referencing this
- * node. This is not necessarily the user of this node in case the node
- * participates in the scheduler helping protocol.
- *
- * @param[in, out] node is the node which wants to get blocked.
- *
- * @param get_idle_node is the get idle node handler.
- *
- * @param arg is the get idle node handler argument.
- *
- * @retval thread_cpu The processor of the thread. Indicates to continue with
- * the blocking operation.
- * @retval NULL Otherwise.
- */
-RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
- Thread_Control *thread,
- Scheduler_Node *node,
- bool is_scheduled,
- Scheduler_Get_idle_node get_idle_node,
- void *arg
-)
-{
- int sticky_level;
- ISR_lock_Context lock_context;
- Per_CPU_Control *thread_cpu;
-
- sticky_level = node->sticky_level;
- --sticky_level;
- node->sticky_level = sticky_level;
- _Assert( sticky_level >= 0 );
-
- _Thread_Scheduler_acquire_critical( thread, &lock_context );
- thread_cpu = _Thread_Get_CPU( thread );
- _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
- _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
- _Thread_Scheduler_release_critical( thread, &lock_context );
-
- if ( sticky_level > 0 ) {
- if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
- Thread_Control *idle;
-
- idle = _Scheduler_Use_idle_thread( node, get_idle_node, arg );
- _Thread_Set_CPU( idle, thread_cpu );
- _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
- }
-
- return NULL;
- }
-
- _Assert( thread == _Scheduler_Node_get_user( node ) );
- return thread_cpu;
-}
-
/**
* @brief Discards the idle thread used by the scheduler node.
*
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index 2ebcd98373..8aa41e81e4 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -1250,38 +1250,59 @@ static inline void _Scheduler_SMP_Block(
Scheduler_Release_idle_node release_idle_node
)
{
+ int sticky_level;
+ ISR_lock_Context lock_context;
Scheduler_SMP_Node_state node_state;
Per_CPU_Control *thread_cpu;
- node_state = _Scheduler_SMP_Node_state( node );
+ sticky_level = node->sticky_level;
+ --sticky_level;
+ node->sticky_level = sticky_level;
+ _Assert( sticky_level >= 0 );
- thread_cpu = _Scheduler_Block_node(
- thread,
- node,
- node_state == SCHEDULER_SMP_NODE_SCHEDULED,
- get_idle_node,
- context
- );
+ _Thread_Scheduler_acquire_critical( thread, &lock_context );
+ thread_cpu = _Thread_Get_CPU( thread );
+ _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
+ _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
+ _Thread_Scheduler_release_critical( thread, &lock_context );
- if ( thread_cpu != NULL ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+ node_state = _Scheduler_SMP_Node_state( node );
- if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
- ( *extract_from_scheduled )( context, node );
- _Scheduler_SMP_Schedule_highest_ready(
- context,
- node,
- thread_cpu,
- extract_from_ready,
- get_highest_ready,
- move_from_ready_to_scheduled,
- allocate_processor,
- get_idle_node,
- release_idle_node
- );
- } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
- ( *extract_from_ready )( context, node );
+ if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
+ if (
+ node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
+ _Scheduler_Node_get_idle( node ) == NULL
+ ) {
+ Thread_Control *idle;
+
+ idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
+ _Thread_Set_CPU( idle, thread_cpu );
+ _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
}
+
+ return;
+ }
+
+ _Assert( _Scheduler_Node_get_user( node ) == thread );
+ _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ ( *extract_from_scheduled )( context, node );
+ _Scheduler_SMP_Schedule_highest_ready(
+ context,
+ node,
+ thread_cpu,
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor,
+ get_idle_node,
+ release_idle_node
+ );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ ( *extract_from_ready )( context, node );
}
}
--
2.26.2
More information about the devel
mailing list