[PATCH 19/30] score: First part of new MrsP implementation
Sebastian Huber
sebastian.huber at embedded-brains.de
Mon Oct 31 08:51:50 UTC 2016
Update #2556.
---
cpukit/libmisc/monitor/mon-sema.c | 2 +-
cpukit/sapi/src/interrtext.c | 3 +-
cpukit/score/include/rtems/score/interr.h | 3 +-
cpukit/score/include/rtems/score/mrsp.h | 80 +-----
cpukit/score/include/rtems/score/mrspimpl.h | 284 ++++++---------------
cpukit/score/include/rtems/score/schedulerimpl.h | 56 ++++
cpukit/score/include/rtems/score/status.h | 2 -
cpukit/score/include/rtems/score/threadimpl.h | 11 +
cpukit/score/include/rtems/score/threadqimpl.h | 62 +++++
cpukit/score/src/threadchangepriority.c | 17 ++
cpukit/score/src/threadqenqueue.c | 151 +++++++++--
testsuites/smptests/Makefile.am | 1 +
testsuites/smptests/configure.ac | 1 +
testsuites/smptests/smpfatal03/Makefile.am | 19 ++
testsuites/smptests/smpfatal03/init.c | 108 ++++++++
testsuites/smptests/smpfatal03/smpfatal03.doc | 12 +
testsuites/smptests/smpfatal03/smpfatal03.scn | 2 +
testsuites/smptests/smpmrsp01/init.c | 118 +++++----
testsuites/sptests/spinternalerror02/init.c | 2 +-
.../spinternalerror02/spinternalerror02.scn | 1 +
20 files changed, 569 insertions(+), 366 deletions(-)
create mode 100644 testsuites/smptests/smpfatal03/Makefile.am
create mode 100644 testsuites/smptests/smpfatal03/init.c
create mode 100644 testsuites/smptests/smpfatal03/smpfatal03.doc
create mode 100644 testsuites/smptests/smpfatal03/smpfatal03.scn
diff --git a/cpukit/libmisc/monitor/mon-sema.c b/cpukit/libmisc/monitor/mon-sema.c
index 7334611..3dc7172 100644
--- a/cpukit/libmisc/monitor/mon-sema.c
+++ b/cpukit/libmisc/monitor/mon-sema.c
@@ -84,7 +84,7 @@ rtems_monitor_sema_canonical(
#if defined(RTEMS_SMP)
case SEMAPHORE_VARIANT_MRSP:
canonical_sema->cur_count =
- rtems_sema->Core_control.MRSP.Resource.owner == NULL;
+ _MRSP_Get_owner( &rtems_sema->Core_control.MRSP ) == NULL;
canonical_sema->max_count = 1;
break;
#endif
diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c
index 8408061..6653022 100644
--- a/cpukit/sapi/src/interrtext.c
+++ b/cpukit/sapi/src/interrtext.c
@@ -55,7 +55,8 @@ static const char *const internal_error_text[] = {
"INTERNAL_ERROR_RESOURCE_IN_USE",
"INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL",
"INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL",
- "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK"
+ "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK",
+ "INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE"
};
const char *rtems_internal_error_text( rtems_fatal_code error )
diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h
index 845dc6f..dff6101 100644
--- a/cpukit/score/include/rtems/score/interr.h
+++ b/cpukit/score/include/rtems/score/interr.h
@@ -164,7 +164,8 @@ typedef enum {
INTERNAL_ERROR_RESOURCE_IN_USE,
INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL,
INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL,
- INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
+ INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
} Internal_errors_Core_list;
typedef CPU_Uint32ptr Internal_errors_t;
diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
index 5af3a06..85cbff7 100644
--- a/cpukit/score/include/rtems/score/mrsp.h
+++ b/cpukit/score/include/rtems/score/mrsp.h
@@ -19,9 +19,6 @@
#if defined(RTEMS_SMP)
-#include <rtems/score/chain.h>
-#include <rtems/score/scheduler.h>
-#include <rtems/score/thread.h>
#include <rtems/score/threadq.h>
#ifdef __cplusplus
@@ -51,87 +48,16 @@ extern "C" {
* @{
*/
-typedef struct MRSP_Control MRSP_Control;
-
-/**
- * @brief MrsP rival.
- *
- * The rivals are used by threads waiting for resource ownership. They are
- * registered in the MrsP control block.
- */
-typedef struct {
- /**
- * @brief The node for registration in the MrsP rival chain.
- *
- * The chain operations are protected by the MrsP control lock.
- *
- * @see MRSP_Control::Rivals.
- */
- Chain_Node Node;
-
- /**
- * @brief The corresponding MrsP control block.
- */
- MRSP_Control *resource;
-
- /**
- * @brief Identification of the rival thread.
- */
- Thread_Control *thread;
-
- /**
- * @brief The ceiling priority used by the rival thread.
- */
- Priority_Node Ceiling_priority;
-
- /**
- * @brief The initial help state of the thread at the begin of the resource
- * obtain sequence.
- *
- * Used to restore this state after a timeout.
- */
- Scheduler_Help_state initial_help_state;
-
- /**
- * @brief The rival status.
- *
- * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP. The rival will
- * busy wait until a status change happens. This can be STATUS_SUCCESSFUL or
- * STATUS_TIMEOUT. State changes are protected by the MrsP control lock.
- */
- volatile int status;
-
- /**
- * @brief Watchdog for timeouts.
- */
- Watchdog_Control Watchdog;
-} MRSP_Rival;
-
/**
* @brief MrsP control block.
*/
-struct MRSP_Control {
+typedef struct {
/**
- * @brief Lock to protect the resource dependency tree.
- *
- * This is a thread queue since this simplifies the Classic semaphore
- * implementation. Only the lock part of the thread queue is used.
+ * @brief The thread queue to manage ownership and waiting threads.
*/
Thread_queue_Control Wait_queue;
/**
- * @brief Basic resource control.
- */
- Resource_Control Resource;
-
- /**
- * @brief A chain of MrsP rivals waiting for resource ownership.
- *
- * @see MRSP_Rival::Node.
- */
- Chain_Control Rivals;
-
- /**
* @brief The ceiling priority used by the owner thread.
*/
Priority_Node Ceiling_priority;
@@ -140,7 +66,7 @@ struct MRSP_Control {
* @brief One ceiling priority per scheduler instance.
*/
Priority_Control *ceiling_priorities;
-};
+} MRSP_Control;
/** @} */
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index 92cc566..4b4e8c3 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -20,9 +20,6 @@
#if defined(RTEMS_SMP)
#include <rtems/score/assert.h>
-#include <rtems/score/chainimpl.h>
-#include <rtems/score/resourceimpl.h>
-#include <rtems/score/schedulerimpl.h>
#include <rtems/score/status.h>
#include <rtems/score/threadqimpl.h>
#include <rtems/score/watchdogimpl.h>
@@ -38,28 +35,7 @@ extern "C" {
* @{
*/
-/**
- * @brief Internal state used for MRSP_Rival::status to indicate that this
- * rival waits for resource ownership.
- */
-#define MRSP_WAIT_FOR_OWNERSHIP STATUS_MINUS_ONE
-
-/*
- * FIXME: Operations with the resource dependency tree are protected by the
- * global scheduler lock. Since the scheduler lock should be scheduler
- * instance specific in the future this will only work temporarily. A more
- * sophisticated locking strategy is necessary.
- */
-
-RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context )
-{
- /* FIXME: MrsP protocol implementation will be reworked soon */
-}
-
-RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context )
-{
- /* FIXME: MrsP protocol implementation will be reworked soon */
-}
+#define MRSP_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
MRSP_Control *mrsp,
@@ -77,6 +53,19 @@ RTEMS_INLINE_ROUTINE void _MRSP_Release(
_Thread_queue_Release( &mrsp->Wait_queue, queue_context );
}
+RTEMS_INLINE_ROUTINE Thread_Control *_MRSP_Get_owner( MRSP_Control *mrsp )
+{
+ return mrsp->Wait_queue.Queue.owner;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Set_owner(
+ MRSP_Control *mrsp,
+ Thread_Control *owner
+)
+{
+ mrsp->Wait_queue.Queue.owner = owner;
+}
+
RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_priority(
const MRSP_Control *mrsp,
const Scheduler_Control *scheduler
@@ -149,23 +138,23 @@ RTEMS_INLINE_ROUTINE void _MRSP_Remove_priority(
RTEMS_INLINE_ROUTINE void _MRSP_Replace_priority(
MRSP_Control *mrsp,
Thread_Control *thread,
- MRSP_Rival *rival
+ Priority_Node *ceiling_priority
)
{
ISR_lock_Context lock_context;
- _Thread_Wait_acquire_default_critical( thread, &lock_context );
+ _Thread_Wait_acquire_default( thread, &lock_context );
_Thread_Priority_replace(
thread,
- &rival->Ceiling_priority,
+ ceiling_priority,
&mrsp->Ceiling_priority
);
- _Thread_Wait_release_default_critical( thread, &lock_context );
+ _Thread_Wait_release_default( thread, &lock_context );
}
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
MRSP_Control *mrsp,
- Thread_Control *new_owner,
+ Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
@@ -174,7 +163,7 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
status = _MRSP_Raise_priority(
mrsp,
- new_owner,
+ executing,
&mrsp->Ceiling_priority,
queue_context
);
@@ -184,17 +173,12 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Claim_ownership(
return status;
}
- _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
- _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
- _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
-
+ _MRSP_Set_owner( mrsp, executing );
cpu_self = _Thread_Dispatch_disable_critical(
&queue_context->Lock_context.Lock_context
);
_MRSP_Release( mrsp, queue_context );
-
- _Thread_Priority_update( queue_context );
-
+ _Thread_Priority_and_sticky_update( executing, 1 );
_Thread_Dispatch_enable( cpu_self );
return STATUS_SUCCESSFUL;
}
@@ -234,74 +218,23 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
}
}
- _Resource_Initialize( &mrsp->Resource );
- _Chain_Initialize_empty( &mrsp->Rivals );
_Thread_queue_Initialize( &mrsp->Wait_queue );
-
return STATUS_SUCCESSFUL;
}
-RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
-{
- MRSP_Rival *rival;
- MRSP_Control *mrsp;
- Thread_Control *thread;
- Thread_queue_Context queue_context;
-
- rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
- mrsp = rival->resource;
- thread = rival->thread;
-
- _Thread_queue_Context_initialize( &queue_context );
- _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
- _MRSP_Acquire_critical( mrsp, &queue_context );
-
- if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
- ISR_lock_Context giant_lock_context;
-
- _MRSP_Remove_priority( thread, &rival->Ceiling_priority, &queue_context );
-
- _MRSP_Giant_acquire( &giant_lock_context );
-
- _Chain_Extract_unprotected( &rival->Node );
- _Resource_Node_extract( &thread->Resource_node );
- _Resource_Node_set_dependency( &thread->Resource_node, NULL );
- _Scheduler_Thread_change_help_state( thread, rival->initial_help_state );
- _Scheduler_Thread_change_resource_root( thread, thread );
-
- _MRSP_Giant_release( &giant_lock_context );
-
- rival->status = STATUS_TIMEOUT;
-
- _MRSP_Release( mrsp, &queue_context );
-
- _Thread_Priority_update( &queue_context );
- } else {
- _MRSP_Release( mrsp, &queue_context );
- }
-}
-
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
MRSP_Control *mrsp,
- Resource_Node *owner,
Thread_Control *executing,
Thread_queue_Context *queue_context
)
{
- Status_Control status;
- MRSP_Rival rival;
- Thread_Life_state life_state;
- Per_CPU_Control *cpu_self;
- ISR_lock_Context giant_lock_context;
- ISR_Level level;
- Watchdog_Interval timeout;
-
- _Assert( queue_context->timeout_discipline == WATCHDOG_RELATIVE );
+ Status_Control status;
+ Priority_Node ceiling_priority;
status = _MRSP_Raise_priority(
mrsp,
executing,
- &rival.Ceiling_priority,
+ &ceiling_priority,
queue_context
);
@@ -310,64 +243,38 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
return status;
}
- rival.thread = executing;
- rival.resource = mrsp;
- _Chain_Initialize_node( &rival.Node );
-
- _MRSP_Giant_acquire( &giant_lock_context );
-
- rival.initial_help_state =
- _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
- rival.status = MRSP_WAIT_FOR_OWNERSHIP;
-
- _Chain_Initialize_node( &rival.Node );
- _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
- _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node );
- _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
- _Scheduler_Thread_change_resource_root(
- executing,
- THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) )
+ _Thread_queue_Context_set_deadlock_callout(
+ queue_context,
+ _Thread_queue_Deadlock_status
);
-
- _MRSP_Giant_release( &giant_lock_context );
-
- cpu_self = _Thread_Dispatch_disable_critical(
- &queue_context->Lock_context.Lock_context
+ status = _Thread_queue_Enqueue_sticky(
+ &mrsp->Wait_queue.Queue,
+ MRSP_TQ_OPERATIONS,
+ executing,
+ queue_context
);
- _MRSP_Release( mrsp, queue_context );
-
- _Thread_Priority_update( queue_context );
- timeout = (Watchdog_Interval) queue_context->timeout;
-
- if ( timeout > 0 ) {
- _Watchdog_Preinitialize( &rival.Watchdog, cpu_self );
- _Watchdog_Initialize( &rival.Watchdog, _MRSP_Timeout );
- _ISR_Local_disable( level );
- _Watchdog_Per_CPU_insert_relative( &rival.Watchdog, cpu_self, timeout );
- _ISR_Local_enable( level );
- }
-
- life_state = _Thread_Set_life_protection( THREAD_LIFE_PROTECTED );
- _Thread_Dispatch_enable( cpu_self );
-
- _Assert( _Debug_Is_thread_dispatching_allowed() );
-
- /* Wait for state change */
- do {
- status = rival.status;
- } while ( status == MRSP_WAIT_FOR_OWNERSHIP );
+ if ( status == STATUS_SUCCESSFUL ) {
+ _MRSP_Replace_priority( mrsp, executing, &ceiling_priority );
+ } else {
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ int sticky_level_change;
- _Thread_Set_life_protection( life_state );
+ if ( status != STATUS_DEADLOCK ) {
+ sticky_level_change = -1;
+ } else {
+ sticky_level_change = 0;
+ }
- if ( timeout > 0 ) {
- _ISR_Local_disable( level );
- _Watchdog_Per_CPU_remove(
- &rival.Watchdog,
- cpu_self,
- &cpu_self->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]
+ _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
+ _MRSP_Remove_priority( executing, &ceiling_priority, &queue_context );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context.Lock_context.Lock_context
);
- _ISR_Local_enable( level );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+ _Thread_Priority_and_sticky_update( executing, sticky_level_change );
+ _Thread_Dispatch_enable( cpu_self );
}
return status;
@@ -381,22 +288,21 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
)
{
Status_Control status;
- Resource_Node *owner;
+ Thread_Control *owner;
_MRSP_Acquire_critical( mrsp, queue_context );
- owner = _Resource_Get_owner( &mrsp->Resource );
+ owner = _MRSP_Get_owner( mrsp );
if ( owner == NULL ) {
status = _MRSP_Claim_ownership( mrsp, executing, queue_context );
- } else if (
- wait
- && _Resource_Node_get_root( owner ) != &executing->Resource_node
- ) {
- status = _MRSP_Wait_for_ownership( mrsp, owner, executing, queue_context );
+ } else if ( owner == executing ) {
+ _MRSP_Release( mrsp, queue_context );
+ status = STATUS_UNAVAILABLE;
+ } else if ( wait ) {
+ status = _MRSP_Wait_for_ownership( mrsp, executing, queue_context );
} else {
_MRSP_Release( mrsp, queue_context );
- /* Not available, nested access or deadlock */
status = STATUS_UNAVAILABLE;
}
@@ -409,77 +315,45 @@ RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
Thread_queue_Context *queue_context
)
{
- ISR_lock_Context giant_lock_context;
- Per_CPU_Control *cpu_self;
+ Thread_queue_Heads *heads;
- if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
+ if ( _MRSP_Get_owner( mrsp ) != executing ) {
_ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
return STATUS_NOT_OWNER;
}
- if (
- !_Resource_Is_most_recently_obtained(
- &mrsp->Resource,
- &executing->Resource_node
- )
- ) {
- _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
- return STATUS_RELEASE_ORDER_VIOLATION;
- }
-
_MRSP_Acquire_critical( mrsp, queue_context );
- _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
- _MRSP_Giant_acquire( &giant_lock_context );
-
- _Resource_Extract( &mrsp->Resource );
- if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
- _Resource_Set_owner( &mrsp->Resource, NULL );
- } else {
- MRSP_Rival *rival;
- Thread_Control *new_owner;
-
- rival = (MRSP_Rival *) _Chain_Get_first_unprotected( &mrsp->Rivals );
-
- /*
- * This must be inside the critical section since the status prevents a
- * potential double extraction in _MRSP_Timeout().
- */
- rival->status = STATUS_SUCCESSFUL;
-
- new_owner = rival->thread;
+ _MRSP_Set_owner( mrsp, NULL );
+ _MRSP_Remove_priority( executing, &mrsp->Ceiling_priority, queue_context );
- _MRSP_Replace_priority( mrsp, new_owner, rival );
+ heads = mrsp->Wait_queue.Queue.heads;
- _Resource_Node_extract( &new_owner->Resource_node );
- _Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
- _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
- _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
- _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
- _Scheduler_Thread_change_resource_root( new_owner, new_owner );
- }
+ if ( heads == NULL ) {
+ Per_CPU_Control *cpu_self;
- if ( !_Resource_Node_owns_resources( &executing->Resource_node ) ) {
- _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _MRSP_Release( mrsp, queue_context );
+ _Thread_Priority_and_sticky_update( executing, -1 );
+ _Thread_Dispatch_enable( cpu_self );
+ return STATUS_SUCCESSFUL;
}
- _MRSP_Giant_release( &giant_lock_context );
-
- cpu_self = _Thread_Dispatch_disable_critical(
- &queue_context->Lock_context.Lock_context
+ _Thread_queue_Surrender_sticky(
+ &mrsp->Wait_queue.Queue,
+ heads,
+ executing,
+ queue_context,
+ MRSP_TQ_OPERATIONS
);
- _MRSP_Release( mrsp, queue_context );
-
- _Thread_Priority_update( queue_context );
-
- _Thread_Dispatch_enable( cpu_self );
-
return STATUS_SUCCESSFUL;
}
RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
{
- if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) {
+ if ( _MRSP_Get_owner( mrsp ) != NULL ) {
return STATUS_RESOURCE_IN_USE;
}
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 25b961f..92b08e5 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -553,6 +553,62 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread
#endif
}
+#if defined(RTEMS_SMP)
+/**
+ * @brief Changes the sticky level of the home scheduler node and propagates a
+ * priority change of a thread to the scheduler.
+ *
+ * @param[in] the_thread The thread changing its priority or sticky level.
+ *
+ * @see _Scheduler_Update_priority().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
+ Thread_Control *the_thread,
+ int sticky_level_change
+)
+{
+ Chain_Node *node;
+ const Chain_Node *tail;
+ Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ _Thread_Scheduler_process_requests( the_thread );
+
+ node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+ node = _Chain_Next( node );
+
+ while ( node != tail ) {
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ( *scheduler->Operations.update_priority )(
+ scheduler,
+ the_thread,
+ scheduler_node
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+
+ node = _Chain_Next( node );
+ }
+}
+#endif
+
/**
* @brief Maps a thread priority from the user domain to the scheduler domain.
*
diff --git a/cpukit/score/include/rtems/score/status.h b/cpukit/score/include/rtems/score/status.h
index 453bf11..6b6f3c5 100644
--- a/cpukit/score/include/rtems/score/status.h
+++ b/cpukit/score/include/rtems/score/status.h
@@ -113,8 +113,6 @@ typedef enum {
STATUS_BUILD( STATUS_CLASSIC_NOT_OWNER_OF_RESOURCE, EPERM ),
STATUS_OBJECT_WAS_DELETED =
STATUS_BUILD( STATUS_CLASSIC_OBJECT_WAS_DELETED, EINVAL ),
- STATUS_RELEASE_ORDER_VIOLATION =
- STATUS_BUILD( STATUS_CLASSIC_INCORRECT_STATE, EPERM ),
STATUS_RESOURCE_IN_USE =
STATUS_BUILD( STATUS_CLASSIC_RESOURCE_IN_USE, EBUSY ),
STATUS_SUCCESSFUL =
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 0d8074e..79239db 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -1705,6 +1705,17 @@ RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
#endif
}
+RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
+#else
+ return the_thread->Wait.flags;
+#endif
+}
+
/**
* @brief Tries to change the thread wait flags with release semantics in case
* of success.
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index e24beec..f74436d 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -24,6 +24,7 @@
#include <rtems/score/priorityimpl.h>
#include <rtems/score/scheduler.h>
#include <rtems/score/smp.h>
+#include <rtems/score/status.h>
#include <rtems/score/thread.h>
#if defined(RTEMS_DEBUG)
@@ -553,6 +554,37 @@ void _Thread_queue_Enqueue_critical(
Thread_queue_Context *queue_context
);
+#if defined(RTEMS_SMP)
+/**
+ * @brief Enqueues the thread on the thread queue and busy waits for dequeue.
+ *
+ * Optionally starts the thread timer in case the timeout discipline is not
+ * WATCHDOG_NO_TIMEOUT. Timeout discipline and value are in the queue_context.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue lock and register it as the new thread lock.
+ *
+ * The thread priorities of the owner and the are updated with respect to the
+ * scheduler. The sticky level of the thread is incremented. A thread
+ * dispatch is performed if necessary.
+ *
+ * Afterwards, the thread busy waits on the thread wait flags until a timeout
+ * occurs or the thread queue is surrendered to this thread. So, it sticks to
+ * the processor instead of blocking with respect to the scheduler.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] operations The thread queue operations.
+ * @param[in] the_thread The thread to enqueue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ */
+Status_Control _Thread_queue_Enqueue_sticky(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+);
+#endif
+
/**
* @brief Acquires the thread queue lock and calls
* _Thread_queue_Enqueue_critical().
@@ -733,6 +765,36 @@ void _Thread_queue_Surrender(
const Thread_queue_Operations *operations
);
+#if defined(RTEMS_SMP)
+/**
+ * @brief Surrenders the thread queue previously owned by the thread to the
+ * first enqueued thread.
+ *
+ * The owner of the thread queue must be set to NULL by the caller.
+ *
+ * The caller must be the owner of the thread queue lock. This function will
+ * release the thread queue.
+ *
+ * The thread priorities of the previous owner and the new owner are updated. The
+ * sticky level of the previous owner is decremented. A thread dispatch is
+ * performed if necessary.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] heads The thread queue heads. It must not be NULL.
+ * @param[in] previous_owner The previous owner thread surrendering the thread
+ * queue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ * @param[in] operations The thread queue operations.
+ */
+void _Thread_queue_Surrender_sticky(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+);
+#endif
+
RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_empty(
const Thread_queue_Queue *queue
)
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index e107bcc..4fd4c02 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -353,3 +353,20 @@ void _Thread_Priority_update( Thread_queue_Context *queue_context )
_Thread_State_release( the_thread, &lock_context );
}
}
+
+#if defined(RTEMS_SMP)
+void _Thread_Priority_and_sticky_update(
+ Thread_Control *the_thread,
+ int sticky_level_change
+)
+{
+ ISR_lock_Context lock_context;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ _Scheduler_Priority_and_sticky_update(
+ the_thread,
+ sticky_level_change
+ );
+ _Thread_State_release( the_thread, &lock_context );
+}
+#endif
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 362ce8f..335ee0f 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -370,6 +370,37 @@ void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
);
}
+static void _Thread_queue_Timeout(
+ Thread_Control *the_thread,
+ Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+)
+{
+ switch ( queue_context->timeout_discipline ) {
+ case WATCHDOG_RELATIVE:
+ /* A relative timeout of 0 is a special case indefinite (no) timeout */
+ if ( queue_context->timeout != 0 ) {
+ _Thread_Timer_insert_relative(
+ the_thread,
+ cpu_self,
+ _Thread_Timeout,
+ (Watchdog_Interval) queue_context->timeout
+ );
+ }
+ break;
+ case WATCHDOG_ABSOLUTE:
+ _Thread_Timer_insert_absolute(
+ the_thread,
+ cpu_self,
+ _Thread_Timeout,
+ queue_context->timeout
+ );
+ break;
+ default:
+ break;
+ }
+}
+
void _Thread_queue_Enqueue_critical(
Thread_queue_Queue *queue,
const Thread_queue_Operations *operations,
@@ -430,29 +461,7 @@ void _Thread_queue_Enqueue_critical(
/*
* If the thread wants to timeout, then schedule its timer.
*/
- switch ( queue_context->timeout_discipline ) {
- case WATCHDOG_RELATIVE:
- /* A relative timeout of 0 is a special case indefinite (no) timeout */
- if ( queue_context->timeout != 0 ) {
- _Thread_Timer_insert_relative(
- the_thread,
- cpu_self,
- _Thread_Timeout,
- (Watchdog_Interval) queue_context->timeout
- );
- }
- break;
- case WATCHDOG_ABSOLUTE:
- _Thread_Timer_insert_absolute(
- the_thread,
- cpu_self,
- _Thread_Timeout,
- queue_context->timeout
- );
- break;
- default:
- break;
- }
+ _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
/*
* At this point thread dispatching is disabled, however, we already released
@@ -476,6 +485,65 @@ void _Thread_queue_Enqueue_critical(
_Thread_Dispatch_enable( cpu_self );
}
+#if defined(RTEMS_SMP)
+Status_Control _Thread_queue_Enqueue_sticky(
+ Thread_queue_Queue *queue,
+ const Thread_queue_Operations *operations,
+ Thread_Control *the_thread,
+ Thread_queue_Context *queue_context
+)
+{
+ Per_CPU_Control *cpu_self;
+
+ _Thread_Wait_claim( the_thread, queue );
+
+ if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
+ _Thread_queue_Path_release_critical( queue_context );
+ _Thread_Wait_restore_default( the_thread );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
+ _Thread_Wait_tranquilize( the_thread );
+ ( *queue_context->deadlock_callout )( the_thread );
+ return _Thread_Wait_get_status( the_thread );
+ }
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ _Thread_Wait_claim_finalize( the_thread, operations );
+ ( *operations->enqueue )( queue, the_thread, queue_context );
+
+ _Thread_queue_Path_release_critical( queue_context );
+
+ the_thread->Wait.return_code = STATUS_SUCCESSFUL;
+ _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
+
+ if ( cpu_self->thread_dispatch_disable_level != 1 ) {
+ _Terminate(
+ INTERNAL_ERROR_CORE,
+ false,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
+ );
+ }
+
+ _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
+ _Thread_Priority_update( queue_context );
+ _Thread_Priority_and_sticky_update( the_thread, 1 );
+ _Thread_Dispatch_enable( cpu_self );
+
+ while (
+ _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
+ ) {
+ /* Wait */
+ }
+
+ _Thread_Wait_tranquilize( the_thread );
+ _Thread_Timer_remove( the_thread );
+ return _Thread_Wait_get_status( the_thread );
+}
+#endif
+
#if defined(RTEMS_MULTIPROCESSING)
static bool _Thread_queue_MP_set_callout(
Thread_Control *the_thread,
@@ -666,6 +734,43 @@ void _Thread_queue_Surrender(
_Thread_Dispatch_enable( cpu_self );
}
+#if defined(RTEMS_SMP)
+void _Thread_queue_Surrender_sticky(
+ Thread_queue_Queue *queue,
+ Thread_queue_Heads *heads,
+ Thread_Control *previous_owner,
+ Thread_queue_Context *queue_context,
+ const Thread_queue_Operations *operations
+)
+{
+ Thread_Control *new_owner;
+ Per_CPU_Control *cpu_self;
+
+ _Assert( heads != NULL );
+
+ _Thread_queue_Context_clear_priority_updates( queue_context );
+ new_owner = ( *operations->surrender )(
+ queue,
+ heads,
+ previous_owner,
+ queue_context
+ );
+ queue->owner = new_owner;
+ _Thread_queue_Make_ready_again( new_owner );
+
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_queue_Queue_release(
+ queue,
+ &queue_context->Lock_context.Lock_context
+ );
+ _Thread_Priority_and_sticky_update( previous_owner, -1 );
+ _Thread_Priority_and_sticky_update( new_owner, 0 );
+ _Thread_Dispatch_enable( cpu_self );
+}
+#endif
+
Thread_Control *_Thread_queue_Do_dequeue(
Thread_queue_Control *the_thread_queue,
const Thread_queue_Operations *operations
diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
index 02d1dfc..63398e3 100644
--- a/testsuites/smptests/Makefile.am
+++ b/testsuites/smptests/Makefile.am
@@ -17,6 +17,7 @@ SUBDIRS += smpcapture02
SUBDIRS += smpclock01
SUBDIRS += smpfatal01
SUBDIRS += smpfatal02
+SUBDIRS += smpfatal03
SUBDIRS += smpfatal04
SUBDIRS += smpfatal05
SUBDIRS += smpfatal08
diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
index 8c8476f..6c632ce 100644
--- a/testsuites/smptests/configure.ac
+++ b/testsuites/smptests/configure.ac
@@ -75,6 +75,7 @@ smpcapture02/Makefile
smpclock01/Makefile
smpfatal01/Makefile
smpfatal02/Makefile
+smpfatal03/Makefile
smpfatal04/Makefile
smpfatal05/Makefile
smpfatal08/Makefile
diff --git a/testsuites/smptests/smpfatal03/Makefile.am b/testsuites/smptests/smpfatal03/Makefile.am
new file mode 100644
index 0000000..4ec2862
--- /dev/null
+++ b/testsuites/smptests/smpfatal03/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpfatal03
+smpfatal03_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpfatal03.scn smpfatal03.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpfatal03_OBJECTS)
+LINK_LIBS = $(smpfatal03_LDLIBS)
+
+smpfatal03$(EXEEXT): $(smpfatal03_OBJECTS) $(smpfatal03_DEPENDENCIES)
+ @rm -f smpfatal03$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpfatal03/init.c b/testsuites/smptests/smpfatal03/init.c
new file mode 100644
index 0000000..0eb15aa
--- /dev/null
+++ b/testsuites/smptests/smpfatal03/init.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define TESTS_USE_PRINTK
+#include "tmacros.h"
+
+const char rtems_test_name[] = "SMPFATAL 3";
+
+static void task(rtems_task_argument arg)
+{
+ rtems_status_code sc;
+ rtems_id *sem_id;
+
+ sem_id = (rtems_id *) arg;
+
+ sc = rtems_semaphore_obtain(*sem_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_test_assert(0);
+}
+
+static void Init(rtems_task_argument arg)
+{
+ rtems_status_code sc;
+ rtems_id task_id;
+ rtems_id sem_id;
+
+ TEST_BEGIN();
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &sem_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_create(
+ rtems_build_name('T', 'A', 'S', 'K'),
+ 1,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &task_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(task_id, task, (rtems_task_argument) &sem_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ _Thread_Dispatch_disable();
+ rtems_semaphore_obtain(sem_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(0);
+}
+
+static void fatal_extension(
+ rtems_fatal_source source,
+ bool is_internal,
+ rtems_fatal_code code
+)
+{
+ if (
+ source == INTERNAL_ERROR_CORE
+ && !is_internal
+ && code == INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
+ ) {
+ TEST_END();
+ }
+}
+
+#define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_INITIAL_EXTENSIONS \
+ { .fatal = fatal_extension }, \
+ RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_MAXIMUM_TASKS 2
+#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES 1
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpfatal03/smpfatal03.doc b/testsuites/smptests/smpfatal03/smpfatal03.doc
new file mode 100644
index 0000000..6d7e829
--- /dev/null
+++ b/testsuites/smptests/smpfatal03/smpfatal03.doc
@@ -0,0 +1,12 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpfatal03
+
+directives:
+
+ - _Thread_queue_Enqueue_sticky()
+
+concepts:
+
+ - Trigger the INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE fatal
+ error.
diff --git a/testsuites/smptests/smpfatal03/smpfatal03.scn b/testsuites/smptests/smpfatal03/smpfatal03.scn
new file mode 100644
index 0000000..5ec996f
--- /dev/null
+++ b/testsuites/smptests/smpfatal03/smpfatal03.scn
@@ -0,0 +1,2 @@
+*** BEGIN OF TEST SMPFATAL 3 ***
+*** END OF TEST SMPFATAL 3 ***
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
index efc997a..de5f31c 100644
--- a/testsuites/smptests/smpmrsp01/init.c
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -214,6 +214,37 @@ static void print_switch_events(test_context *ctx)
}
}
+static void create_timer(test_context *ctx)
+{
+ rtems_status_code sc;
+
+ sc = rtems_timer_create(
+ rtems_build_name('T', 'I', 'M', 'R'),
+ &ctx->timer_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void delete_timer(test_context *ctx)
+{
+ rtems_status_code sc;
+
+ sc = rtems_timer_delete(ctx->timer_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void fire_timer(
+ test_context *ctx,
+ rtems_interval interval,
+ rtems_timer_service_routine_entry routine
+)
+{
+ rtems_status_code sc;
+
+ sc = rtems_timer_fire_after(ctx->timer_id, interval, routine, ctx);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
static void create_mrsp_sema(
test_context *ctx,
rtems_id *id,
@@ -744,37 +775,11 @@ static void test_mrsp_nested_obtain_error(test_context *ctx)
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}
-static void test_mrsp_unlock_order_error(test_context *ctx)
+static void deadlock_timer(rtems_id timer_id, void *arg)
{
- rtems_status_code sc;
- rtems_id id_a;
- rtems_id id_b;
-
- puts("test MrsP unlock order error");
-
- create_mrsp_sema(ctx, &id_a, 1);
- create_mrsp_sema(ctx, &id_b, 1);
-
- sc = rtems_semaphore_obtain(id_a, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- sc = rtems_semaphore_obtain(id_b, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- sc = rtems_semaphore_release(id_a);
- rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
-
- sc = rtems_semaphore_release(id_b);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- sc = rtems_semaphore_release(id_a);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
-
- sc = rtems_semaphore_delete(id_a);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ test_context *ctx = arg;
- sc = rtems_semaphore_delete(id_b);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ change_prio(ctx->main_task_id, 1);
}
static void deadlock_worker(rtems_task_argument arg)
@@ -785,6 +790,8 @@ static void deadlock_worker(rtems_task_argument arg)
sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ fire_timer(ctx, 2, deadlock_timer);
+
sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -810,6 +817,7 @@ static void test_mrsp_deadlock_error(test_context *ctx)
change_prio(RTEMS_SELF, prio);
+ create_timer(ctx);
create_mrsp_sema(ctx, &ctx->mrsp_ids[0], prio);
create_mrsp_sema(ctx, &ctx->mrsp_ids[1], prio);
@@ -832,8 +840,26 @@ static void test_mrsp_deadlock_error(test_context *ctx)
sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ prio = 1;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[1],
+ ctx->scheduler_ids[0],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 2);
+
sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
- rtems_test_assert(sc == RTEMS_UNSATISFIED);
+ rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
+
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[1],
+ ctx->scheduler_ids[0],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -849,6 +875,8 @@ static void test_mrsp_deadlock_error(test_context *ctx)
sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ delete_timer(ctx);
}
static void test_mrsp_multiple_obtain(test_context *ctx)
@@ -1006,8 +1034,7 @@ static void unblock_ready_owner(test_context *ctx)
assert_prio(RTEMS_SELF, 3);
- sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ fire_timer(ctx, 2, unblock_ready_timer);
sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -1103,13 +1130,7 @@ static void various_block_unblock(test_context *ctx)
* user.
*/
- sc = rtems_timer_fire_after(
- ctx->timer_id,
- 2,
- unblock_owner_before_rival_timer,
- ctx
- );
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ fire_timer(ctx, 2, unblock_owner_before_rival_timer);
/* This will take the processor away from us, the timer will help later */
sc = rtems_task_resume(ctx->high_task_id[1]);
@@ -1123,13 +1144,7 @@ static void various_block_unblock(test_context *ctx)
sc = rtems_task_resume(ctx->high_task_id[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_timer_fire_after(
- ctx->timer_id,
- 2,
- unblock_owner_after_rival_timer,
- ctx
- );
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ fire_timer(ctx, 2, unblock_owner_after_rival_timer);
/* This will take the processor away from us, the timer will help later */
sc = rtems_task_resume(ctx->high_task_id[1]);
@@ -1229,11 +1244,7 @@ static void test_mrsp_various_block_and_unblock(test_context *ctx)
sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
- sc = rtems_timer_create(
- rtems_build_name('T', 'I', 'M', 'R'),
- &ctx->timer_id
- );
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ create_timer(ctx);
/* In case these tasks run, then we have a MrsP protocol violation */
start_low_task(ctx, 0);
@@ -1246,9 +1257,7 @@ static void test_mrsp_various_block_and_unblock(test_context *ctx)
rtems_test_assert(!ctx->low_run[1]);
print_switch_events(ctx);
-
- sc = rtems_timer_delete(ctx->timer_id);
- rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ delete_timer(ctx);
sc = rtems_task_delete(ctx->high_task_id[0]);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
@@ -1749,7 +1758,6 @@ static void Init(rtems_task_argument arg)
test_mrsp_flush_error(ctx);
test_mrsp_initially_locked_error();
test_mrsp_nested_obtain_error(ctx);
- test_mrsp_unlock_order_error(ctx);
test_mrsp_deadlock_error(ctx);
test_mrsp_multiple_obtain(ctx);
diff --git a/testsuites/sptests/spinternalerror02/init.c b/testsuites/sptests/spinternalerror02/init.c
index eac90a0..3de5ef9 100644
--- a/testsuites/sptests/spinternalerror02/init.c
+++ b/testsuites/sptests/spinternalerror02/init.c
@@ -36,7 +36,7 @@ static void test_internal_error_text(void)
} while ( text != text_last );
rtems_test_assert(
- error - 3 == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
+ error - 3 == INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
);
}
diff --git a/testsuites/sptests/spinternalerror02/spinternalerror02.scn b/testsuites/sptests/spinternalerror02/spinternalerror02.scn
index ff04560..2be58f4 100644
--- a/testsuites/sptests/spinternalerror02/spinternalerror02.scn
+++ b/testsuites/sptests/spinternalerror02/spinternalerror02.scn
@@ -28,6 +28,7 @@ INTERNAL_ERROR_RESOURCE_IN_USE
INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL
INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
+INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
?
?
INTERNAL_ERROR_CORE
--
1.8.4.5
More information about the devel
mailing list