[PATCH 02/12] score: _CORE_mutex_Check_dispatch_for_seize()
Sebastian Huber
sebastian.huber at embedded-brains.de
Fri May 27 13:50:31 UTC 2016
Move the safety check performed by
_CORE_mutex_Check_dispatch_for_seize() out of the performance critical
path and generalize it. Blocking on a thread queue with an unexpected
thread dispatch disabled level is illegal in all system states.
Add the expected thread dispatch disable level (which may be 1 or 2
depending on the operation) to Thread_queue_Context and use it in
_Thread_queue_Enqueue_critical().
---
cpukit/posix/include/rtems/posix/psignalimpl.h | 24 ++++-
cpukit/posix/src/condwaitsupp.c | 3 +-
cpukit/posix/src/killinfo.c | 9 +-
cpukit/posix/src/psignalclearsignals.c | 7 +-
cpukit/posix/src/psignalsetprocesssignals.c | 7 +-
cpukit/posix/src/psignalunblockthread.c | 15 +--
cpukit/posix/src/pthreadjoin.c | 26 ++---
cpukit/posix/src/sigaction.c | 7 +-
cpukit/posix/src/sigtimedwait.c | 26 ++---
cpukit/sapi/src/interrtext.c | 2 +-
cpukit/score/include/rtems/score/coremuteximpl.h | 33 +-----
cpukit/score/include/rtems/score/coresemimpl.h | 3 +-
cpukit/score/include/rtems/score/interr.h | 2 +-
cpukit/score/include/rtems/score/threadimpl.h | 8 +-
cpukit/score/include/rtems/score/threadq.h | 9 ++
cpukit/score/include/rtems/score/threadqimpl.h | 46 +++++++--
cpukit/score/src/condition.c | 88 ++++++++--------
cpukit/score/src/corebarrierwait.c | 3 +-
cpukit/score/src/coremsgseize.c | 3 +-
cpukit/score/src/coremsgsubmit.c | 3 +-
cpukit/score/src/coremutexseize.c | 22 ++--
cpukit/score/src/corerwlockobtainread.c | 3 +-
cpukit/score/src/corerwlockobtainwrite.c | 3 +-
cpukit/score/src/futex.c | 36 ++++---
cpukit/score/src/mutex.c | 125 ++++++++++++-----------
cpukit/score/src/semaphore.c | 37 ++++---
cpukit/score/src/threadqenqueue.c | 17 ++-
cpukit/score/src/threadrestart.c | 18 ++--
testsuites/sptests/spfatal03/testcase.h | 6 +-
29 files changed, 339 insertions(+), 252 deletions(-)
diff --git a/cpukit/posix/include/rtems/posix/psignalimpl.h b/cpukit/posix/include/rtems/posix/psignalimpl.h
index 3b76fc3..62c1a85 100644
--- a/cpukit/posix/include/rtems/posix/psignalimpl.h
+++ b/cpukit/posix/include/rtems/posix/psignalimpl.h
@@ -73,11 +73,25 @@ extern Chain_Control _POSIX_signals_Siginfo[ SIG_ARRAY_MAX ];
* Internal routines
*/
-#define _POSIX_signals_Acquire( lock_context ) \
- _Thread_queue_Acquire( &_POSIX_signals_Wait_queue, lock_context )
-
-#define _POSIX_signals_Release( lock_context ) \
- _Thread_queue_Release( &_POSIX_signals_Wait_queue, lock_context )
+RTEMS_INLINE_ROUTINE void _POSIX_signals_Acquire(
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire(
+ &_POSIX_signals_Wait_queue,
+ &queue_context->Lock_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _POSIX_signals_Release(
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release(
+ &_POSIX_signals_Wait_queue,
+ &queue_context->Lock_context
+ );
+}
/**
* @brief Unlock POSIX signals thread.
diff --git a/cpukit/posix/src/condwaitsupp.c b/cpukit/posix/src/condwaitsupp.c
index 9270c36..7dff27f 100644
--- a/cpukit/posix/src/condwaitsupp.c
+++ b/cpukit/posix/src/condwaitsupp.c
@@ -86,13 +86,14 @@ int _POSIX_Condition_variables_Wait_support(
}
if ( !already_timedout ) {
+ _Thread_queue_Context_set_expected_level( &queue_context, 2 );
_Thread_queue_Enqueue_critical(
&the_cond->Wait_queue.Queue,
POSIX_CONDITION_VARIABLES_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_CONDITION_VARIABLE,
timeout,
- &queue_context.Lock_context
+ &queue_context
);
} else {
_POSIX_Condition_variables_Release( the_cond, &queue_context );
diff --git a/cpukit/posix/src/killinfo.c b/cpukit/posix/src/killinfo.c
index b16b408..33754af 100644
--- a/cpukit/posix/src/killinfo.c
+++ b/cpukit/posix/src/killinfo.c
@@ -75,7 +75,7 @@ int _POSIX_signals_Send(
siginfo_t *siginfo;
POSIX_signals_Siginfo_node *psiginfo;
Thread_queue_Heads *heads;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Per_CPU_Control *cpu_self;
/*
@@ -334,14 +334,15 @@ post_process_signal:
*/
_POSIX_signals_Set_process_signals( mask );
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( _POSIX_signals_Vectors[ sig ].sa_flags == SA_SIGINFO ) {
psiginfo = (POSIX_signals_Siginfo_node *)
_Chain_Get_unprotected( &_POSIX_signals_Inactive_siginfo );
if ( !psiginfo ) {
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
_Thread_Dispatch_enable( cpu_self );
rtems_set_errno_and_return_minus_one( EAGAIN );
}
@@ -354,7 +355,7 @@ post_process_signal:
);
}
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
DEBUG_STEP("\n");
_Thread_Dispatch_enable( cpu_self );
return 0;
diff --git a/cpukit/posix/src/psignalclearsignals.c b/cpukit/posix/src/psignalclearsignals.c
index c785255..39ea41c 100644
--- a/cpukit/posix/src/psignalclearsignals.c
+++ b/cpukit/posix/src/psignalclearsignals.c
@@ -47,7 +47,7 @@ bool _POSIX_signals_Clear_signals(
{
sigset_t mask;
sigset_t signals_unblocked;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
bool do_callout;
POSIX_signals_Siginfo_node *psiginfo;
@@ -68,7 +68,8 @@ bool _POSIX_signals_Clear_signals(
/* XXX are we sure they can be cleared the same way? */
if ( do_signals_acquire_release ) {
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
}
if ( is_global ) {
@@ -102,7 +103,7 @@ bool _POSIX_signals_Clear_signals(
}
if ( do_signals_acquire_release ) {
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
}
return do_callout;
diff --git a/cpukit/posix/src/psignalsetprocesssignals.c b/cpukit/posix/src/psignalsetprocesssignals.c
index 8a25864..b755c2d 100644
--- a/cpukit/posix/src/psignalsetprocesssignals.c
+++ b/cpukit/posix/src/psignalsetprocesssignals.c
@@ -36,9 +36,10 @@ void _POSIX_signals_Set_process_signals(
sigset_t mask
)
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
_POSIX_signals_Pending |= mask;
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
}
diff --git a/cpukit/posix/src/psignalunblockthread.c b/cpukit/posix/src/psignalunblockthread.c
index b4475b2..d75e454 100644
--- a/cpukit/posix/src/psignalunblockthread.c
+++ b/cpukit/posix/src/psignalunblockthread.c
@@ -96,9 +96,9 @@ static void _POSIX_signals_Action_handler(
ISR_lock_Context *lock_context
)
{
- POSIX_API_Control *api;
- int signo;
- uint32_t hold_errno;
+ POSIX_API_Control *api;
+ int signo;
+ uint32_t hold_errno;
(void) action;
_Thread_State_release( executing, lock_context );
@@ -135,13 +135,16 @@ static void _POSIX_signals_Action_handler(
* processed at all. No point in doing this loop otherwise.
*/
while (1) {
- _POSIX_signals_Acquire( lock_context );
+ Thread_queue_Context queue_context;
+
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( !(api->signals_unblocked &
(api->signals_pending | _POSIX_signals_Pending)) ) {
- _POSIX_signals_Release( lock_context );
+ _POSIX_signals_Release( &queue_context );
break;
}
- _POSIX_signals_Release( lock_context );
+ _POSIX_signals_Release( &queue_context );
for ( signo = SIGRTMIN ; signo <= SIGRTMAX ; signo++ ) {
_POSIX_signals_Check_signal( api, signo, false );
diff --git a/cpukit/posix/src/pthreadjoin.c b/cpukit/posix/src/pthreadjoin.c
index f4a0676..86b8051 100644
--- a/cpukit/posix/src/pthreadjoin.c
+++ b/cpukit/posix/src/pthreadjoin.c
@@ -32,13 +32,15 @@
static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
{
- Thread_Control *the_thread;
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- void *value;
+ Thread_Control *the_thread;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ void *value;
- the_thread = _Thread_Get( thread, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+ the_thread = _Thread_Get( thread, &queue_context.Lock_context );
if ( the_thread == NULL ) {
return ESRCH;
@@ -48,29 +50,29 @@ static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
executing = _Per_CPU_Get_executing( cpu_self );
if ( executing == the_thread ) {
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EDEADLK;
}
- _Thread_State_acquire_critical( the_thread, &lock_context );
+ _Thread_State_acquire_critical( the_thread, &queue_context.Lock_context );
if ( !_Thread_Is_joinable( the_thread ) ) {
- _Thread_State_release( the_thread, &lock_context );
+ _Thread_State_release( the_thread, &queue_context.Lock_context );
return EINVAL;
}
if ( _States_Is_waiting_for_join_at_exit( the_thread->current_state ) ) {
value = the_thread->Life.exit_value;
_Thread_Clear_state_locked( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT );
- _Thread_Dispatch_disable_with_CPU( cpu_self, &lock_context );
- _Thread_State_release( the_thread, &lock_context );
+ _Thread_Dispatch_disable_with_CPU( cpu_self, &queue_context.Lock_context );
+ _Thread_State_release( the_thread, &queue_context.Lock_context );
_Thread_Dispatch_enable( cpu_self );
} else {
_Thread_Join(
the_thread,
STATES_INTERRUPTIBLE_BY_SIGNAL | STATES_WAITING_FOR_JOIN,
executing,
- &lock_context
+ &queue_context
);
if ( _POSIX_Get_error_after_wait( executing ) != 0 ) {
diff --git a/cpukit/posix/src/sigaction.c b/cpukit/posix/src/sigaction.c
index 177dcd1..26df98d 100644
--- a/cpukit/posix/src/sigaction.c
+++ b/cpukit/posix/src/sigaction.c
@@ -33,7 +33,7 @@ int sigaction(
struct sigaction *__restrict oact
)
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
if ( !sig )
rtems_set_errno_and_return_minus_one( EINVAL );
@@ -51,7 +51,8 @@ int sigaction(
if ( sig == SIGKILL )
rtems_set_errno_and_return_minus_one( EINVAL );
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( oact )
*oact = _POSIX_signals_Vectors[ sig ];
@@ -76,7 +77,7 @@ int sigaction(
}
}
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
return 0;
}
diff --git a/cpukit/posix/src/sigtimedwait.c b/cpukit/posix/src/sigtimedwait.c
index ddc2884..7855bb0 100644
--- a/cpukit/posix/src/sigtimedwait.c
+++ b/cpukit/posix/src/sigtimedwait.c
@@ -69,14 +69,14 @@ int sigtimedwait(
const struct timespec *__restrict timeout
)
{
- Thread_Control *executing;
- POSIX_API_Control *api;
- Watchdog_Interval interval;
- siginfo_t signal_information;
- siginfo_t *the_info;
- int signo;
- ISR_lock_Context lock_context;
- int error;
+ Thread_Control *executing;
+ POSIX_API_Control *api;
+ Watchdog_Interval interval;
+ siginfo_t signal_information;
+ siginfo_t *the_info;
+ int signo;
+ Thread_queue_Context queue_context;
+ int error;
/*
* Error check parameters before disabling interrupts.
@@ -115,7 +115,8 @@ int sigtimedwait(
/* API signals pending? */
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( *set & api->signals_pending ) {
/* XXX real info later */
the_info->si_signo = _POSIX_signals_Get_lowest( api->signals_pending );
@@ -127,7 +128,7 @@ int sigtimedwait(
false,
false
);
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
the_info->si_code = SI_USER;
the_info->si_value.sival_int = 0;
@@ -139,7 +140,7 @@ int sigtimedwait(
if ( *set & _POSIX_signals_Pending ) {
signo = _POSIX_signals_Get_lowest( _POSIX_signals_Pending );
_POSIX_signals_Clear_signals( api, signo, the_info, true, false, false );
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
the_info->si_signo = signo;
the_info->si_code = SI_USER;
@@ -151,13 +152,14 @@ int sigtimedwait(
executing->Wait.option = *set;
executing->Wait.return_argument = the_info;
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&_POSIX_signals_Wait_queue.Queue,
POSIX_SIGNALS_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SIGNAL | STATES_INTERRUPTIBLE_BY_SIGNAL,
interval,
- &lock_context
+ &queue_context
);
/*
diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c
index 3a0681d..3ae7315 100644
--- a/cpukit/sapi/src/interrtext.c
+++ b/cpukit/sapi/src/interrtext.c
@@ -45,7 +45,7 @@ static const char *const internal_error_text[] = {
"INTERNAL_ERROR_BAD_ATTRIBUTES",
"INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY",
"OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL",
- "INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE",
+ "INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE",
"INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0",
"OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP",
"INTERNAL_ERROR_GXX_KEY_ADD_FAILED",
diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h b/cpukit/score/include/rtems/score/coremuteximpl.h
index f5faf95..e29d4b7 100644
--- a/cpukit/score/include/rtems/score/coremuteximpl.h
+++ b/cpukit/score/include/rtems/score/coremuteximpl.h
@@ -21,7 +21,6 @@
#include <rtems/score/coremutex.h>
#include <rtems/score/chainimpl.h>
#include <rtems/score/status.h>
-#include <rtems/score/sysstate.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/threadqimpl.h>
@@ -95,27 +94,13 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
* @param[in] lock_context is the interrupt level
*/
Status_Control _CORE_mutex_Seize_interrupt_blocking(
- CORE_mutex_Control *the_mutex,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ CORE_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
);
/**
- * @brief Verifies that a mutex blocking seize is performed safely.
- *
- * This macro is to verify that a mutex blocking seize is
- * performed from a safe system state. For example, one
- * cannot block inside an isr.
- *
- * @retval this method returns true if dispatch is in an unsafe state.
- */
-#define _CORE_mutex_Check_dispatch_for_seize(_wait) \
- (!_Thread_Dispatch_is_enabled() \
- && (_wait) \
- && (_System_state_Get() >= SYSTEM_STATE_UP))
-
-/**
* @brief Is mutex locked.
*
* This routine returns true if the mutex specified is locked and false
@@ -301,14 +286,6 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_mutex_Seize(
{
Status_Control status;
- if ( _CORE_mutex_Check_dispatch_for_seize( wait ) ) {
- _Terminate(
- INTERNAL_ERROR_CORE,
- false,
- INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
- );
- }
-
_CORE_mutex_Acquire_critical( the_mutex, queue_context );
status = _CORE_mutex_Seize_interrupt_trylock(
@@ -330,7 +307,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_mutex_Seize(
the_mutex,
executing,
timeout,
- &queue_context->Lock_context
+ queue_context
);
}
diff --git a/cpukit/score/include/rtems/score/coresemimpl.h b/cpukit/score/include/rtems/score/coresemimpl.h
index ac90f20..a55089e 100644
--- a/cpukit/score/include/rtems/score/coresemimpl.h
+++ b/cpukit/score/include/rtems/score/coresemimpl.h
@@ -204,13 +204,14 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
return STATUS_UNSATISFIED;
}
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_semaphore->Wait_queue.Queue,
the_semaphore->operations,
executing,
STATES_WAITING_FOR_SEMAPHORE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h
index ea468e0..ca48db2 100644
--- a/cpukit/score/include/rtems/score/interr.h
+++ b/cpukit/score/include/rtems/score/interr.h
@@ -154,7 +154,7 @@ typedef enum {
INTERNAL_ERROR_BAD_ATTRIBUTES,
INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY,
OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL,
- INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE,
INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0,
OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP,
INTERNAL_ERROR_GXX_KEY_ADD_FAILED,
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 1081108..d102212 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -232,10 +232,10 @@ void _Thread_Exit(
);
void _Thread_Join(
- Thread_Control *the_thread,
- States_Control waiting_for_join,
- Thread_Control *executing,
- ISR_lock_Context *lock_context
+ Thread_Control *the_thread,
+ States_Control waiting_for_join,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
);
void _Thread_Cancel(
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index 2859c79..4c2f8e2 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -70,6 +70,15 @@ typedef struct {
ISR_lock_Context Lock_context;
/**
+ * @brief The expected thread dispatch disable level for
+ * _Thread_queue_Enqueue_critical().
+ *
+ * In case the actual thread dispatch disable level is not equal to the
+ * expected level, then a fatal error occurs.
+ */
+ uint32_t expected_thread_dispatch_disable_level;
+
+ /**
* @brief Callout to unblock the thread in case it is actually a thread
* proxy.
*
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 7772959..b528c30 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -62,14 +62,35 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
Thread_queue_Context *queue_context
)
{
-#if defined(RTEMS_MULTIPROCESSING) && defined(RTEMS_DEBUG)
+#if defined(RTEMS_DEBUG)
+ queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef;
+#if defined(RTEMS_MULTIPROCESSING)
queue_context->mp_callout = NULL;
+#endif
#else
(void) queue_context;
#endif
}
/**
+ * @brief Sets the expected thread dispatch disable level in the thread queue
+ * context.
+ *
+ * @param queue_context The thread queue context.
+ * @param expected_level The expected thread dispatch disable level.
+ *
+ * @see _Thread_queue_Enqueue_critical().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_expected_level(
+ Thread_queue_Context *queue_context,
+ uint32_t expected_level
+)
+{
+ queue_context->expected_thread_dispatch_disable_level = expected_level;
+}
+
+/**
* @brief Sets the MP callout in the thread queue context.
*
* @param queue_context The thread queue context.
@@ -308,17 +329,19 @@ Thread_Control *_Thread_queue_Do_dequeue(
*
* void _Mutex_Obtain( Mutex *mutex )
* {
- * ISR_lock_Context lock_context;
- * Thread_Control *executing;
+ * Thread_queue_Context queue_context;
+ * Thread_Control *executing;
*
- * _Thread_queue_Acquire( &mutex->Queue, &lock_context );
+ * _Thread_queue_Context_initialize( &queue_context );
+ * _Thread_queue_Acquire( &mutex->Queue, &queue_context.Lock_context );
*
* executing = _Thread_Executing;
*
* if ( mutex->owner == NULL ) {
* mutex->owner = executing;
- * _Thread_queue_Release( &mutex->Queue, &lock_context );
+ * _Thread_queue_Release( &mutex->Queue, &queue_context.Lock_context );
* } else {
+ * _Thread_queue_Context_set_expected_level( &queue_context, 1 );
* _Thread_queue_Enqueue_critical(
* &mutex->Queue.Queue,
* MUTEX_TQ_OPERATIONS,
@@ -326,7 +349,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
* STATES_WAITING_FOR_MUTEX,
* WATCHDOG_NO_TIMEOUT,
* 0,
- * &lock_context
+ * &queue_context
* );
* }
* }
@@ -338,7 +361,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
* @param[in] state The new state of the thread.
* @param[in] timeout Interval to wait. Use WATCHDOG_NO_TIMEOUT to block
* potentially forever.
- * @param[in] lock_context The lock context of the lock acquire.
+ * @param[in] queue_context The thread queue context of the lock acquire.
*/
void _Thread_queue_Enqueue_critical(
Thread_queue_Queue *queue,
@@ -346,7 +369,7 @@ void _Thread_queue_Enqueue_critical(
Thread_Control *the_thread,
States_Control state,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
);
/**
@@ -361,16 +384,17 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue(
Watchdog_Interval timeout
)
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _Thread_queue_Acquire( the_thread_queue, &lock_context );
+ _Thread_queue_Acquire( the_thread_queue, &queue_context.Lock_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_thread_queue->Queue,
operations,
the_thread,
state,
timeout,
- &lock_context
+ &queue_context
);
}
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
index fae150a..e7d9805 100644
--- a/cpukit/score/src/condition.c
+++ b/cpukit/score/src/condition.c
@@ -53,8 +53,8 @@ static Condition_Control *_Condition_Get(
}
static Thread_Control *_Condition_Queue_acquire_critical(
- Condition_Control *condition,
- ISR_lock_Context *lock_context
+ Condition_Control *condition,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
@@ -63,24 +63,27 @@ static Thread_Control *_Condition_Queue_acquire_critical(
_Thread_queue_Queue_acquire_critical(
&condition->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Condition_Queue_release(
- Condition_Control *condition,
- ISR_lock_Context *lock_context
+ Condition_Control *condition,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &condition->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &condition->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
static Per_CPU_Control *_Condition_Do_wait(
struct _Condition_Control *_condition,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Condition_Control *condition;
@@ -88,16 +91,17 @@ static Per_CPU_Control *_Condition_Do_wait(
Per_CPU_Control *cpu_self;
condition = _Condition_Get( _condition );
- executing = _Condition_Queue_acquire_critical( condition, lock_context );
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
+ executing = _Condition_Queue_acquire_critical( condition, queue_context );
+ cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+ _Thread_queue_Context_set_expected_level( queue_context, 2 );
_Thread_queue_Enqueue_critical(
&condition->Queue.Queue,
CONDITION_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_CONDITION,
timeout,
- lock_context
+ queue_context
);
return cpu_self;
@@ -108,11 +112,12 @@ void _Condition_Wait(
struct _Mutex_Control *_mutex
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
- _ISR_lock_ISR_disable( &lock_context );
- cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &queue_context );
_Mutex_Release( _mutex );
_Thread_Dispatch_enable( cpu_self );
@@ -125,27 +130,28 @@ int _Condition_Wait_timed(
const struct timespec *abstime
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- int eno;
- Watchdog_Interval ticks;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ Watchdog_Interval ticks;
- _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return ETIMEDOUT;
default:
break;
}
- cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, ticks, &queue_context );
_Mutex_Release( _mutex );
executing = cpu_self->executing;
@@ -161,12 +167,13 @@ void _Condition_Wait_recursive(
struct _Mutex_recursive_Control *_mutex
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- unsigned int nest_level;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ unsigned int nest_level;
- _ISR_lock_ISR_disable( &lock_context );
- cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &queue_context );
nest_level = _mutex->_nest_level;
_mutex->_nest_level = 0;
@@ -182,28 +189,29 @@ int _Condition_Wait_recursive_timed(
const struct timespec *abstime
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- int eno;
- unsigned int nest_level;
- Watchdog_Interval ticks;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ unsigned int nest_level;
+ Watchdog_Interval ticks;
- _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return ETIMEDOUT;
default:
break;
}
- cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, ticks, &queue_context );
nest_level = _mutex->_nest_level;
_mutex->_nest_level = 0;
@@ -249,14 +257,14 @@ static void _Condition_Wake( struct _Condition_Control *_condition, int count )
condition = _Condition_Get( _condition );
_Thread_queue_Context_initialize( &context.Base );
_ISR_lock_ISR_disable( &context.Base.Lock_context );
- _Condition_Queue_acquire_critical( condition, &context.Base.Lock_context );
+ _Condition_Queue_acquire_critical( condition, &context.Base );
/*
* In common uses cases of condition variables there are normally no threads
* on the queue, so check this condition early.
*/
if ( __predict_true( _Thread_queue_Is_empty( &condition->Queue.Queue ) ) ) {
- _Condition_Queue_release( condition, &context.Base.Lock_context );
+ _Condition_Queue_release( condition, &context.Base );
return;
}
diff --git a/cpukit/score/src/corebarrierwait.c b/cpukit/score/src/corebarrierwait.c
index a1c862d..d939acf 100644
--- a/cpukit/score/src/corebarrierwait.c
+++ b/cpukit/score/src/corebarrierwait.c
@@ -45,13 +45,14 @@ Status_Control _CORE_barrier_Seize(
return STATUS_BARRIER_AUTOMATICALLY_RELEASED;
} else {
the_barrier->number_of_waiting_threads = number_of_waiting_threads;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_barrier->Wait_queue.Queue,
CORE_BARRIER_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_BARRIER,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/coremsgseize.c b/cpukit/score/src/coremsgseize.c
index 00ff437..6906328 100644
--- a/cpukit/score/src/coremsgseize.c
+++ b/cpukit/score/src/coremsgseize.c
@@ -114,13 +114,14 @@ Status_Control _CORE_message_queue_Seize(
executing->Wait.return_argument = size_p;
/* Wait.count will be filled in with the message priority */
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_message_queue->Wait_queue.Queue,
the_message_queue->operations,
executing,
STATES_WAITING_FOR_MESSAGE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/coremsgsubmit.c b/cpukit/score/src/coremsgsubmit.c
index a623291..fb56ffe 100644
--- a/cpukit/score/src/coremsgsubmit.c
+++ b/cpukit/score/src/coremsgsubmit.c
@@ -132,13 +132,14 @@ Status_Control _CORE_message_queue_Submit(
executing->Wait.option = (uint32_t) size;
executing->Wait.count = submit_type;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_message_queue->Wait_queue.Queue,
the_message_queue->operations,
executing,
STATES_WAITING_FOR_MESSAGE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
#endif
diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c
index 0fc63f4..37de3a1 100644
--- a/cpukit/score/src/coremutexseize.c
+++ b/cpukit/score/src/coremutexseize.c
@@ -22,13 +22,14 @@
#include <rtems/score/isr.h>
#include <rtems/score/coremuteximpl.h>
#include <rtems/score/statesimpl.h>
+#include <rtems/score/sysstate.h>
#include <rtems/score/thread.h>
Status_Control _CORE_mutex_Seize_interrupt_blocking(
- CORE_mutex_Control *the_mutex,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ CORE_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
)
{
#if !defined(RTEMS_SMP)
@@ -51,23 +52,30 @@ Status_Control _CORE_mutex_Seize_interrupt_blocking(
* otherwise the current holder may be no longer the holder of the mutex
* once we released the lock.
*/
- _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+ _CORE_mutex_Release( the_mutex, queue_context );
#endif
_Thread_Inherit_priority( holder, executing );
#if !defined(RTEMS_SMP)
- _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
+ _CORE_mutex_Acquire_critical( the_mutex, queue_context );
#endif
}
+#if defined(RTEMS_SMP)
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
+#else
+ _Thread_queue_Context_set_expected_level( queue_context, 2 );
+#endif
+
_Thread_queue_Enqueue_critical(
&the_mutex->Wait_queue.Queue,
the_mutex->operations,
executing,
STATES_WAITING_FOR_MUTEX,
timeout,
- lock_context
+ queue_context
);
#if !defined(RTEMS_SMP)
diff --git a/cpukit/score/src/corerwlockobtainread.c b/cpukit/score/src/corerwlockobtainread.c
index 5192eb1..bce992c 100644
--- a/cpukit/score/src/corerwlockobtainread.c
+++ b/cpukit/score/src/corerwlockobtainread.c
@@ -79,13 +79,14 @@ Status_Control _CORE_RWLock_Seize_for_reading(
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_rwlock->Wait_queue.Queue,
CORE_RWLOCK_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_RWLOCK,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/corerwlockobtainwrite.c b/cpukit/score/src/corerwlockobtainwrite.c
index 0536b82..c261d32 100644
--- a/cpukit/score/src/corerwlockobtainwrite.c
+++ b/cpukit/score/src/corerwlockobtainwrite.c
@@ -67,13 +67,14 @@ Status_Control _CORE_RWLock_Seize_for_writing(
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_rwlock->Wait_queue.Queue,
CORE_RWLOCK_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_RWLOCK,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/futex.c b/cpukit/score/src/futex.c
index 980c7fb..a192509 100644
--- a/cpukit/score/src/futex.c
+++ b/cpukit/score/src/futex.c
@@ -50,52 +50,56 @@ static Futex_Control *_Futex_Get( struct _Futex_Control *_futex )
static Thread_Control *_Futex_Queue_acquire(
Futex_Control *futex,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&futex->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Futex_Queue_release(
- Futex_Control *futex,
- ISR_lock_Context *lock_context
+ Futex_Control *futex,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &futex->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &futex->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val )
{
- Futex_Control *futex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- int eno;
+ Futex_Control *futex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ int eno;
futex = _Futex_Get( _futex );
- executing = _Futex_Queue_acquire( futex, &lock_context );
+ executing = _Futex_Queue_acquire( futex, &queue_context );
if ( *uaddr == val ) {
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&futex->Queue.Queue,
FUTEX_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_FUTEX,
WATCHDOG_NO_TIMEOUT,
- &lock_context
+ &queue_context
);
eno = 0;
} else {
- _Futex_Queue_release( futex, &lock_context );
+ _Futex_Queue_release( futex, &queue_context );
eno = EWOULDBLOCK;
}
@@ -128,11 +132,11 @@ static Thread_Control *_Futex_Flush_filter(
int _Futex_Wake( struct _Futex_Control *_futex, int count )
{
- Futex_Control *futex;
+ Futex_Control *futex;
Futex_Context context;
futex = _Futex_Get( _futex );
- _Futex_Queue_acquire( futex, &context.Base.Lock_context );
+ _Futex_Queue_acquire( futex, &context.Base );
/*
* For some synchronization objects like barriers the _Futex_Wake() must be
@@ -140,7 +144,7 @@ int _Futex_Wake( struct _Futex_Control *_futex, int count )
* check this condition early.
*/
if ( __predict_true( _Thread_queue_Is_empty( &futex->Queue.Queue ) ) ) {
- _Futex_Queue_release( futex, &context.Base.Lock_context );
+ _Futex_Queue_release( futex, &context.Base );
return 0;
}
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index e93a8e4..dbc1d8c 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -79,47 +79,51 @@ static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
}
static Thread_Control *_Mutex_Queue_acquire(
- Mutex_Control *mutex,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&mutex->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Mutex_Queue_release(
- Mutex_Control *mutex,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &mutex->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
static void _Mutex_Acquire_slow(
- Mutex_Control *mutex,
- Thread_Control *owner,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_Control *owner,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
)
{
_Thread_Inherit_priority( owner, executing );
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&mutex->Queue.Queue,
MUTEX_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_MUTEX,
timeout,
- lock_context
+ queue_context
);
}
@@ -155,7 +159,7 @@ static void _Mutex_Release_slow(
&queue_context->Lock_context
);
} else {
- _Mutex_Queue_release( mutex, &queue_context->Lock_context );
+ _Mutex_Queue_release( mutex, queue_context );
}
if ( !keep_priority ) {
@@ -192,7 +196,7 @@ static void _Mutex_Release_critical(
|| !executing->priority_restore_hint;
if ( __predict_true( heads == NULL && keep_priority ) ) {
- _Mutex_Queue_release( mutex, &queue_context->Lock_context );
+ _Mutex_Queue_release( mutex, queue_context );
} else {
_Mutex_Release_slow(
mutex,
@@ -206,22 +210,23 @@ static void _Mutex_Release_critical(
void _Mutex_Acquire( struct _Mutex_Control *_mutex )
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->owner;
if ( __predict_true( owner == NULL ) ) {
mutex->owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
} else {
- _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, 0, &queue_context );
}
}
@@ -230,20 +235,21 @@ int _Mutex_Acquire_timed(
const struct timespec *abstime
)
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->owner;
if ( __predict_true( owner == NULL ) ) {
mutex->owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return 0;
} else {
@@ -251,17 +257,17 @@ int _Mutex_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return ETIMEDOUT;
default:
break;
}
- _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, ticks, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -269,14 +275,15 @@ int _Mutex_Acquire_timed(
int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
- int eno;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
+ int eno;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->owner;
@@ -288,7 +295,7 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return eno;
}
@@ -301,7 +308,7 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Assert( mutex->owner == executing );
@@ -318,24 +325,25 @@ static Mutex_recursive_Control *_Mutex_recursive_Get(
void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
} else {
- _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
+ _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &queue_context );
}
}
@@ -345,24 +353,25 @@ int _Mutex_recursive_Acquire_timed(
)
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return 0;
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return 0;
} else {
@@ -370,11 +379,11 @@ int _Mutex_recursive_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return ETIMEDOUT;
default:
break;
@@ -385,7 +394,7 @@ int _Mutex_recursive_Acquire_timed(
owner,
executing,
ticks,
- &lock_context
+ &queue_context
);
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
@@ -395,13 +404,14 @@ int _Mutex_recursive_Acquire_timed(
int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.owner;
@@ -416,7 +426,7 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return eno;
}
@@ -430,10 +440,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire(
- &mutex->Mutex,
- &queue_context.Lock_context
- );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Assert( mutex->Mutex.owner == executing );
@@ -444,7 +451,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
} else {
mutex->nest_level = nest_level - 1;
- _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
}
}
diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c
index 72abd9e..03af9cf 100644
--- a/cpukit/score/src/semaphore.c
+++ b/cpukit/score/src/semaphore.c
@@ -56,53 +56,58 @@ static Semaphore_Control *_Semaphore_Get(
}
static Thread_Control *_Semaphore_Queue_acquire(
- Semaphore_Control *sem,
- ISR_lock_Context *lock_context
+ Semaphore_Control *sem,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&sem->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Semaphore_Queue_release(
- Semaphore_Control *sem,
- ISR_lock_Context *lock_context
+ Semaphore_Control *sem,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &sem->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &sem->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
void _Semaphore_Wait( struct _Semaphore_Control *_sem )
{
- Semaphore_Control *sem ;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- unsigned int count;
+ Semaphore_Control *sem ;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ unsigned int count;
sem = _Semaphore_Get( _sem );
- executing = _Semaphore_Queue_acquire( sem, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Semaphore_Queue_acquire( sem, &queue_context );
count = sem->count;
if ( count > 0 ) {
sem->count = count - 1;
- _Semaphore_Queue_release( sem, &lock_context );
+ _Semaphore_Queue_release( sem, &queue_context );
} else {
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&sem->Queue.Queue,
SEMAPHORE_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE,
WATCHDOG_NO_TIMEOUT,
- &lock_context
+ &queue_context
);
}
}
@@ -115,13 +120,13 @@ void _Semaphore_Post( struct _Semaphore_Control *_sem )
sem = _Semaphore_Get( _sem );
_Thread_queue_Context_initialize( &queue_context );
- _Semaphore_Queue_acquire( sem, &queue_context.Lock_context );
+ _Semaphore_Queue_acquire( sem, &queue_context );
heads = sem->Queue.Queue.heads;
if ( heads == NULL ) {
_Assert( sem->count < UINT_MAX );
++sem->count;
- _Semaphore_Queue_release( sem, &queue_context.Lock_context );
+ _Semaphore_Queue_release( sem, &queue_context );
} else {
const Thread_queue_Operations *operations;
Thread_Control *first;
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 4eaafa9..1e95003 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -40,7 +40,7 @@ void _Thread_queue_Enqueue_critical(
Thread_Control *the_thread,
States_Control state,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Per_CPU_Control *cpu_self;
@@ -61,8 +61,19 @@ void _Thread_queue_Enqueue_critical(
( *operations->enqueue )( queue, the_thread );
_Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
- _Thread_queue_Queue_release( queue, lock_context );
+ cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
+
+ if (
+ cpu_self->thread_dispatch_disable_level
+ != queue_context->expected_thread_dispatch_disable_level
+ ) {
+ _Terminate(
+ INTERNAL_ERROR_CORE,
+ false,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
+ );
+ }
/*
* Set the blocking state for this thread queue in the thread.
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 3bddac4..f155980 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -443,10 +443,10 @@ static void _Thread_Finalize_life_change(
}
void _Thread_Join(
- Thread_Control *the_thread,
- States_Control waiting_for_join,
- Thread_Control *executing,
- ISR_lock_Context *lock_context
+ Thread_Control *the_thread,
+ States_Control waiting_for_join,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
)
{
_Assert( the_thread != executing );
@@ -462,7 +462,7 @@ void _Thread_Join(
executing,
waiting_for_join,
WATCHDOG_NO_TIMEOUT,
- lock_context
+ queue_context
);
}
@@ -524,14 +524,16 @@ void _Thread_Cancel(
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing )
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, 2 );
+ _Thread_State_acquire( the_thread, &queue_context.Lock_context );
_Thread_Join(
the_thread,
STATES_WAITING_FOR_JOIN,
executing,
- &lock_context
+ &queue_context
);
_Thread_Cancel( the_thread, executing, NULL );
}
diff --git a/testsuites/sptests/spfatal03/testcase.h b/testsuites/sptests/spfatal03/testcase.h
index 86b3003..34a20f6 100644
--- a/testsuites/sptests/spfatal03/testcase.h
+++ b/testsuites/sptests/spfatal03/testcase.h
@@ -16,7 +16,7 @@
#define FATAL_ERROR_EXPECTED_SOURCE INTERNAL_ERROR_CORE
#define FATAL_ERROR_EXPECTED_IS_INTERNAL FALSE
#define FATAL_ERROR_EXPECTED_ERROR \
- INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
void force_error(void)
{
@@ -26,7 +26,7 @@ void force_error(void)
status = rtems_semaphore_create(
rtems_build_name( 'S','0',' ',' '),
- 1,
+ 0,
RTEMS_LOCAL|
RTEMS_SIMPLE_BINARY_SEMAPHORE,
0,
@@ -37,7 +37,7 @@ void force_error(void)
printk("Obtain semaphore in dispatching critical section\n");
_Thread_Dispatch_disable();
- status = rtems_semaphore_obtain( mutex, RTEMS_DEFAULT_OPTIONS, 0 );
+ status = rtems_semaphore_obtain( mutex, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
/* !!! SHOULD NOT RETURN FROM THE ABOVE CALL */
rtems_test_assert( 0 );
--
1.8.4.5
More information about the devel
mailing list