[PATCH v2 5/5] score: Add deadlock detection

Chris Johns chrisj at rtems.org
Wed Jul 27 04:51:53 UTC 2016


On 26/07/2016 19:20, Sebastian Huber wrote:
> The mutex objects use the owner field of the thread queues for the mutex
> owner.  Use this and add a deadlock detection to
> _Thread_queue_Enqueue_critical() for thread queues with an owner.
>
> Update #2412.
> Update #2556.
> Close #2765.
> ---
>   cpukit/sapi/src/interrtext.c                       |   5 +-
>   cpukit/score/include/rtems/score/interr.h          |   3 +-
>   cpukit/score/include/rtems/score/thread.h          |   6 +
>   cpukit/score/include/rtems/score/threadq.h         |  45 ++-
>   cpukit/score/include/rtems/score/threadqimpl.h     |  38 ++
>   cpukit/score/src/coremutexseize.c                  |   9 +
>   cpukit/score/src/mutex.c                           |   4 +
>   cpukit/score/src/threadqenqueue.c                  | 256 ++++++++++++-
>   testsuites/sptests/spinternalerror02/init.c        |   4 +-
>   .../spinternalerror02/spinternalerror02.scn        |   3 +-
>   testsuites/sptests/spmutex01/init.c                | 413 +++++++++++++++++++--
>   testsuites/sptests/spmutex01/spmutex01.doc         |   5 +
>   testsuites/sptests/spsyslock01/init.c              |  79 ++--
>   13 files changed, 768 insertions(+), 102 deletions(-)
>
> diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c
> index 3ae7315..8408061 100644
> --- a/cpukit/sapi/src/interrtext.c
> +++ b/cpukit/sapi/src/interrtext.c
> @@ -7,7 +7,7 @@
>    */
>
>   /*
> - * Copyright (c) 2012-2015 embedded brains GmbH.  All rights reserved.
> + * Copyright (c) 2012, 2016 embedded brains GmbH.  All rights reserved.
>    *
>    *  embedded brains GmbH
>    *  Dornierstr. 4
> @@ -54,7 +54,8 @@ static const char *const internal_error_text[] = {
>     "INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR",
>     "INTERNAL_ERROR_RESOURCE_IN_USE",
>     "INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL",
> -  "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL"
> +  "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL",
> +  "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK"
>   };
>
>   const char *rtems_internal_error_text( rtems_fatal_code error )
> diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h
> index 8d4c104..845dc6f 100644
> --- a/cpukit/score/include/rtems/score/interr.h
> +++ b/cpukit/score/include/rtems/score/interr.h
> @@ -163,7 +163,8 @@ typedef enum {
>     INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR,
>     INTERNAL_ERROR_RESOURCE_IN_USE,
>     INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL,
> -  INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
> +  INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL,
> +  INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
>   } Internal_errors_Core_list;
>
>   typedef CPU_Uint32ptr Internal_errors_t;
> diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
> index 85d759c..d10143b 100644
> --- a/cpukit/score/include/rtems/score/thread.h
> +++ b/cpukit/score/include/rtems/score/thread.h
> @@ -327,6 +327,12 @@ typedef struct {
>        */
>       Chain_Control Pending_requests;
>     } Lock;
> +
> +  /**
> +   * @brief Thread queue link provided for use by the thread wait lock owner to
> +   * build a thread queue path.
> +   */
> +  Thread_queue_Link Link;
>   #endif
>
>     /**
> diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
> index 9a17804..a39a031 100644
> --- a/cpukit/score/include/rtems/score/threadq.h
> +++ b/cpukit/score/include/rtems/score/threadq.h
> @@ -49,6 +49,17 @@ typedef struct Thread_queue_Operations Thread_queue_Operations;
>
>   typedef struct Thread_queue_Path Thread_queue_Path;
>
> +/**
> + * @brief Thread queue deadlock callout.
> + *
> + * @param the_thread The thread that detected the deadlock.
> + *
> + * @see _Thread_queue_Context_set_deadlock_callout().
> + */
> +typedef void ( *Thread_queue_Deadlock_callout )(
> +  Thread_Control *the_thread
> +);
> +
>   #if defined(RTEMS_MULTIPROCESSING)
>   /**
>    * @brief Multiprocessing (MP) support callout for thread queue operations.
> @@ -117,6 +128,17 @@ typedef struct {
>     uint64_t timeout;
>
>     /**
> +   * @brief Invoked in case of a detected deadlock.
> +   *
> +   * Must be initialized for _Thread_queue_Enqueue_critical() in case the
> +   * thread queue may have an owner, e.g. for mutex objects.
> +   *
> +   * @see _Thread_queue_Context_set_deadlock_callout().
> +   */
> +  Thread_queue_Deadlock_callout deadlock_callout;
> +
> +#if defined(RTEMS_MULTIPROCESSING)
> +  /**
>      * @brief Callout to unblock the thread in case it is actually a thread
>      * proxy.
>      *
> @@ -126,7 +148,6 @@ typedef struct {
>      *
>      * @see _Thread_queue_Context_set_MP_callout().
>      */
> -#if defined(RTEMS_MULTIPROCESSING)
>     Thread_queue_MP_callout mp_callout;
>   #endif
>
> @@ -175,6 +196,28 @@ typedef struct {
>    */
>   typedef struct {
>     /**
> +   * @brief Node to register this link in the global thread queue links lookup
> +   * tree.
> +   */
> +  RBTree_Node Registry_node;
> +
> +  /**
> +   * @brief The source thread queue determined by the thread queue owner.
> +   */
> +  Thread_queue_Queue *source;
> +
> +  /**
> +   * @brief The target thread queue determined by the thread wait queue of the
> +   * source owner.
> +   */
> +  Thread_queue_Queue *target;
> +
> +  /**
> +   * @brief Node to add this link to a thread queue path.
> +   */
> +  Chain_Node Path_node;
> +
> +  /**
>      * @brief The owner of this thread queue link.
>      */
>     Thread_Control *owner;
> diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
> index 11dc6c5..fba6856 100644
> --- a/cpukit/score/include/rtems/score/threadqimpl.h
> +++ b/cpukit/score/include/rtems/score/threadqimpl.h
> @@ -52,6 +52,11 @@ extern "C" {
>   struct Thread_queue_Path {
>   #if defined(RTEMS_SMP)
>     /**
> +   * @brief The chain of thread queue links defining the thread queue path.
> +   */
> +  Chain_Control Links;
> +
> +  /**
>      * @brief The start of a thread queue path.
>      */
>     Thread_queue_Link Start;
> @@ -86,6 +91,16 @@ typedef struct {
>   } Thread_queue_Syslock_queue;
>
>   /**
> + * @brief Sets the thread wait return code to STATUS_DEADLOCK.
> + */
> +void _Thread_queue_Deadlock_status( Thread_Control *the_thread );
> +
> +/**
> + * @brief Results in an INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal error.
> + */
> +void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread );
> +
> +/**
>    * @brief Initializes a thread queue context.
>    *
>    * @param queue_context The thread queue context to initialize.
> @@ -97,6 +112,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
>   #if defined(RTEMS_DEBUG)
>     memset( queue_context, 0, sizeof( *queue_context ) );
>     queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef;
> +  queue_context->deadlock_callout = _Thread_queue_Deadlock_fatal;
>   #else
>     (void) queue_context;
>   #endif
> @@ -173,6 +189,28 @@ _Thread_queue_Context_set_absolute_timeout(
>   }
>
>   /**
> + * @brief Sets the deadlock callout in the thread queue
> + * context.
> + *
> + * A deadlock callout must be provided for _Thread_queue_Enqueue_critical()
> + * operations that operate on thread queues which may have an owner, e.g. mutex
> + * objects.  Available deadlock callouts are _Thread_queue_Deadlock_status()
> + * and _Thread_queue_Deadlock_fatal().
> + *
> + * @param queue_context The thread queue context.
> + * @param deadlock_callout The deadlock callout.
> + *
> + * @see _Thread_queue_Enqueue_critical().
> + */
> +RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
> +  Thread_queue_Context          *queue_context,
> +  Thread_queue_Deadlock_callout  deadlock_callout
> +)
> +{
> +  queue_context->deadlock_callout = deadlock_callout;
> +}
> +
> +/**
>    * @brief Sets the MP callout in the thread queue context.
>    *
>    * @param queue_context The thread queue context.
> diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c
> index dacb274..cfefc50 100644
> --- a/cpukit/score/src/coremutexseize.c
> +++ b/cpukit/score/src/coremutexseize.c
> @@ -62,6 +62,11 @@ Status_Control _CORE_mutex_Seize_slow(
>     _Thread_queue_Context_set_expected_level( queue_context, 2 );
>   #endif
>
> +  _Thread_queue_Context_set_deadlock_callout(
> +    queue_context,
> +    _Thread_queue_Deadlock_status
> +  );
> +
>     _Thread_queue_Enqueue_critical(
>       &the_mutex->Wait_queue.Queue,
>       CORE_MUTEX_TQ_PRIORITY_INHERIT_OPERATIONS,
> @@ -87,6 +92,10 @@ Status_Control _CORE_mutex_Seize_no_protocol_slow(
>   {
>     if ( wait ) {
>       _Thread_queue_Context_set_expected_level( queue_context, 1 );
> +    _Thread_queue_Context_set_deadlock_callout(
> +      queue_context,
> +      _Thread_queue_Deadlock_status
> +    );
>       _Thread_queue_Enqueue_critical(
>         &the_mutex->Wait_queue.Queue,
>         operations,
> diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
> index 12a4f29..085d5c2 100644
> --- a/cpukit/score/src/mutex.c
> +++ b/cpukit/score/src/mutex.c
> @@ -108,6 +108,10 @@ static void _Mutex_Acquire_slow(
>   )
>   {
>     _Thread_queue_Context_set_expected_level( queue_context, 1 );
> +  _Thread_queue_Context_set_deadlock_callout(
> +    queue_context,
> +    _Thread_queue_Deadlock_fatal
> +  );
>     _Thread_queue_Enqueue_critical(
>       &mutex->Queue.Queue,
>       MUTEX_TQ_OPERATIONS,
> diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
> index 19c345b..8ccbf68 100644
> --- a/cpukit/score/src/threadqenqueue.c
> +++ b/cpukit/score/src/threadqenqueue.c
> @@ -9,6 +9,8 @@
>    *  COPYRIGHT (c) 1989-2014.
>    *  On-Line Applications Research Corporation (OAR).
>    *
> + *  Copyright (c) 2015, 2016 embedded brains GmbH.
> + *
>    *  The license and distribution terms for this file may be
>    *  found in the file LICENSE in this distribution or at
>    *  http://www.rtems.org/license/LICENSE.
> @@ -34,49 +36,266 @@
>   #define THREAD_QUEUE_READY_AGAIN \
>     (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
>
> +#if defined(RTEMS_SMP)
> +/*
> + * A global registry of active thread queue links is used to provide deadlock
> + * detection on SMP configurations.  This is simple to implement and no
> + * additional storage is required for the thread queues.  The disadvantage is
> + * that this global registry is not scalable and may lead to lock contention.

  The disadvantage is the global registry is not scalable ...

> + * However, the registry is only used in case of nested resource conflicts.  In
> + * this case, the application is already in trouble.
> + */

How does a user know this or detect there maybe an issue? Is there a 
flag, count or something else kept that says there is a problem and so 
an application problem?

Writing apps for real-time SMP is something new and difficult and it 
would good to provide users with relevant indications when things are 
not going well.

> +
> +typedef struct {
> +  ISR_lock_Control Lock;
> +
> +  RBTree_Control Links;
> +} Thread_queue_Links;
> +
> +static Thread_queue_Links _Thread_queue_Links = {
> +  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
> +  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
> +};
> +
> +static bool _Thread_queue_Link_equal(
> +  const void        *left,
> +  const RBTree_Node *right
> +)
> +{
> +  const Thread_queue_Queue *the_left;
> +  const Thread_queue_Link  *the_right;
> +
> +  the_left = left;
> +  the_right = (Thread_queue_Link *) right;
> +
> +  return the_left == the_right->source;
> +}
> +
> +static bool _Thread_queue_Link_less(
> +  const void        *left,
> +  const RBTree_Node *right
> +)
> +{
> +  const Thread_queue_Queue *the_left;
> +  const Thread_queue_Link  *the_right;
> +
> +  the_left = left;
> +  the_right = (Thread_queue_Link *) right;
> +
> +  return (uintptr_t) the_left < (uintptr_t) the_right->source;
> +}
> +
> +static void *_Thread_queue_Link_map( RBTree_Node *node )
> +{
> +  return node;
> +}
> +
> +static Thread_queue_Link *_Thread_queue_Link_find(
> +  Thread_queue_Links *links,
> +  Thread_queue_Queue *source
> +)
> +{
> +  return _RBTree_Find_inline(
> +    &links->Links,
> +    source,
> +    _Thread_queue_Link_equal,
> +    _Thread_queue_Link_less,
> +    _Thread_queue_Link_map
> +  );
> +}
> +
> +static bool _Thread_queue_Link_add(
> +  Thread_queue_Link  *link,
> +  Thread_queue_Queue *source,
> +  Thread_queue_Queue *target
> +)
> +{
> +  Thread_queue_Links *links;
> +  Thread_queue_Queue *recursive_target;
> +  ISR_lock_Context    lock_context;
> +
> +  links = &_Thread_queue_Links;
> +  recursive_target = target;
> +
> +  _ISR_lock_Acquire( &links->Lock, &lock_context );
> +
> +  while ( true ) {
> +    Thread_queue_Link *recursive_link;
> +
> +    recursive_link = _Thread_queue_Link_find( links, recursive_target );
> +
> +    if ( recursive_link == NULL ) {
> +      break;
> +    }
> +
> +    recursive_target = recursive_link->target;
> +
> +    if ( recursive_target == source ) {
> +      _ISR_lock_Release( &links->Lock, &lock_context );
> +      return false;
> +    }
> +  }
> +
> +  link->source = source;
> +  link->target = target;
> +  _RBTree_Insert_inline(
> +    &links->Links,
> +    &link->Registry_node,
> +    source,
> +    _Thread_queue_Link_less
> +  );
> +
> +  _ISR_lock_Release( &links->Lock, &lock_context );
> +  return true;
> +}
> +
> +static void _Thread_queue_Link_remove( Thread_queue_Link *link )
> +{
> +  Thread_queue_Links *links;
> +  ISR_lock_Context    lock_context;
> +
> +  links = &_Thread_queue_Links;
> +
> +  _ISR_lock_Acquire( &links->Lock, &lock_context );
> +  _RBTree_Extract( &links->Links, &link->Registry_node );
> +  _ISR_lock_Release( &links->Lock, &lock_context );
> +}
> +#endif
> +
>   static void _Thread_queue_Path_release( Thread_queue_Path *path )
>   {
>   #if defined(RTEMS_SMP)
> -  Thread_queue_Link *link;
> +  Chain_Node *head;
> +  Chain_Node *node;
>
> -  link = &path->Start;
> +  head = _Chain_Head( &path->Links );
> +  node = _Chain_Last( &path->Links );
> +
> +  while ( head != node ) {
> +    Thread_queue_Link *link;
> +
> +    link = RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
> +
> +    if ( link->Queue_context.Wait.queue_lock != NULL ) {
> +      _Thread_queue_Link_remove( link );
> +    }
>
> -  if ( link->owner != NULL ) {
>       _Thread_Wait_release_critical( link->owner, &link->Queue_context );
> +
> +    node = _Chain_Previous( node );
> +#if defined(RTEMS_DEBUG)
> +    _Chain_Set_off_chain( &link->Path_node );
> +#endif
>     }
>   #else
>     (void) path;
>   #endif
>   }
>
> -static void _Thread_queue_Path_acquire(
> +static bool _Thread_queue_Path_acquire(
>     Thread_Control     *the_thread,
>     Thread_queue_Queue *queue,
>     Thread_queue_Path  *path
>   )
>   {
> -#if defined(RTEMS_SMP)
>     Thread_Control     *owner;
> +#if defined(RTEMS_SMP)
>     Thread_queue_Link  *link;
> +  Thread_queue_Queue *target;
> +
> +  _Chain_Initialize_empty( &path->Links );
> +  _Chain_Initialize_node( &path->Start.Path_node );
> +  _Thread_queue_Context_initialize( &path->Start.Queue_context );
>
>     owner = queue->owner;
>
>     if ( owner == NULL ) {
> -    return;
> +    return true;
> +  }
> +
> +  if ( owner == the_thread ) {
> +    return false;
>     }
>
>     link = &path->Start;
> -  link->owner = owner;
>
> -  _Thread_Wait_acquire_default_critical(
> -    owner,
> -    &link->Queue_context.Lock_context
> -  );
> +  do {
> +    _Chain_Append_unprotected( &path->Links, &link->Path_node );
> +    link->owner = owner;
> +
> +    _Thread_Wait_acquire_default_critical(
> +      owner,
> +      &link->Queue_context.Lock_context
> +    );
> +
> +    target = owner->Wait.queue;
> +    link->Queue_context.Wait.queue = target;
> +    link->Queue_context.Wait.operations = owner->Wait.operations;
> +
> +    if ( target != NULL ) {
> +      if ( _Thread_queue_Link_add( link, queue, target ) ) {
> +        link->Queue_context.Wait.queue_lock = &target->Lock;
> +        _Chain_Append_unprotected(
> +          &owner->Wait.Lock.Pending_requests,
> +          &link->Queue_context.Wait.Gate.Node
> +        );
> +        _Thread_Wait_release_default_critical(
> +          owner,
> +          &link->Queue_context.Lock_context
> +        );
> +        _Thread_Wait_acquire_queue_critical(
> +          &target->Lock,
> +          &link->Queue_context
> +        );
> +
> +        if ( link->Queue_context.Wait.queue == NULL ) {
> +          return true;
> +        }
> +      } else {
> +        link->Queue_context.Wait.queue_lock = NULL;
> +        _Thread_queue_Path_release( path );
> +        return false;
> +      }
> +    } else {
> +      link->Queue_context.Wait.queue_lock = NULL;
> +      return true;
> +    }
> +
> +    link = &owner->Wait.Link;
> +    queue = target;
> +    owner = queue->owner;
> +  } while ( owner != NULL );
>   #else
> -  (void) the_thread;
> -  (void) queue;
> -  (void) path;
> +  do {
> +    owner = queue->owner;
> +
> +    if ( owner == NULL ) {
> +      return true;
> +    }
> +
> +    if ( owner == the_thread ) {
> +      return false;
> +    }
> +
> +    queue = owner->Wait.queue;
> +  } while ( queue != NULL );
>   #endif
> +
> +  return true;
> +}
> +
> +void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
> +{
> +  the_thread->Wait.return_code = STATUS_DEADLOCK;
> +}
> +
> +void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
> +{
> +  _Terminate(
> +    INTERNAL_ERROR_CORE,
> +    false,
> +    INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
> +  );
>   }
>
>   void _Thread_queue_Enqueue_critical(
> @@ -99,8 +318,15 @@ void _Thread_queue_Enqueue_critical(
>
>     _Thread_Wait_claim( the_thread, queue, operations );
>
> -  _Thread_queue_Path_acquire( the_thread, queue, &path );
> +  if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
> +    _Thread_Wait_restore_default( the_thread );
> +    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
> +    ( *queue_context->deadlock_callout )( the_thread );
> +    return;
> +  }
> +
>     ( *operations->enqueue )( queue, the_thread, &path );
> +
>     _Thread_queue_Path_release( &path );
>
>     the_thread->Wait.return_code = STATUS_SUCCESSFUL;
> diff --git a/testsuites/sptests/spinternalerror02/init.c b/testsuites/sptests/spinternalerror02/init.c
> index cbc81a5..eac90a0 100644
> --- a/testsuites/sptests/spinternalerror02/init.c
> +++ b/testsuites/sptests/spinternalerror02/init.c
> @@ -1,5 +1,5 @@
>   /*
> - * Copyright (c) 2012-2015 embedded brains GmbH.  All rights reserved.
> + * Copyright (c) 2012, 2016 embedded brains GmbH.  All rights reserved.
>    *
>    *  embedded brains GmbH
>    *  Donierstr. 4
> @@ -36,7 +36,7 @@ static void test_internal_error_text(void)
>     } while ( text != text_last );
>
>     rtems_test_assert(
> -    error - 3 == INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
> +    error - 3 == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
>     );
>   }
>
> diff --git a/testsuites/sptests/spinternalerror02/spinternalerror02.scn b/testsuites/sptests/spinternalerror02/spinternalerror02.scn
> index c6e85b1..ff04560 100644
> --- a/testsuites/sptests/spinternalerror02/spinternalerror02.scn
> +++ b/testsuites/sptests/spinternalerror02/spinternalerror02.scn
> @@ -17,7 +17,7 @@ INTERNAL_ERROR_BAD_STACK_HOOK
>   INTERNAL_ERROR_BAD_ATTRIBUTES
>   INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY
>   OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL
> -INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
> +INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
>   INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0
>   OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP
>   INTERNAL_ERROR_GXX_KEY_ADD_FAILED
> @@ -27,6 +27,7 @@ INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR
>   INTERNAL_ERROR_RESOURCE_IN_USE
>   INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL
>   INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
> +INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
>   ?
>   ?
>   INTERNAL_ERROR_CORE
> diff --git a/testsuites/sptests/spmutex01/init.c b/testsuites/sptests/spmutex01/init.c
> index 76d62c0..c363370 100644
> --- a/testsuites/sptests/spmutex01/init.c
> +++ b/testsuites/sptests/spmutex01/init.c
> @@ -16,33 +16,65 @@
>     #include "config.h"
>   #endif
>
> +#include <threads.h>
> +#include <setjmp.h>
> +
> +#include <rtems.h>
> +#include <rtems/libcsupport.h>
> +
> +#ifdef RTEMS_POSIX_API
> +#include <errno.h>
> +#include <pthread.h>
> +#endif
> +
>   #include "tmacros.h"
>
>   const char rtems_test_name[] = "SPMUTEX 1";
>
>   #define TASK_COUNT 5
>
> +#define MTX_COUNT 3
> +
>   typedef enum {
>     REQ_WAKE_UP_MASTER = RTEMS_EVENT_0,
>     REQ_WAKE_UP_HELPER = RTEMS_EVENT_1,
> -  REQ_MTX_OBTAIN = RTEMS_EVENT_2,
> -  REQ_MTX_RELEASE = RTEMS_EVENT_3
> +  REQ_MTX_0_OBTAIN = RTEMS_EVENT_2,
> +  REQ_MTX_0_RELEASE = RTEMS_EVENT_3,
> +  REQ_MTX_1_OBTAIN = RTEMS_EVENT_4,
> +  REQ_MTX_1_RELEASE = RTEMS_EVENT_5,
> +  REQ_MTX_2_OBTAIN = RTEMS_EVENT_6,
> +  REQ_MTX_2_RELEASE = RTEMS_EVENT_7,
> +  REQ_MTX_C11_OBTAIN = RTEMS_EVENT_8,
> +  REQ_MTX_C11_RELEASE = RTEMS_EVENT_9,
> +  REQ_MTX_POSIX_OBTAIN = RTEMS_EVENT_10,
> +  REQ_MTX_POSIX_RELEASE = RTEMS_EVENT_11
>   } request_id;
>
>   typedef enum {
> +  M,
>     A_1,
>     A_2_0,
>     A_2_1,
> -  M,
>     H,
>     NONE
>   } task_id;
>
> +typedef enum {
> +  MTX_0,
> +  MTX_1,
> +  MTX_2
> +} mutex_id;
> +
>   typedef struct {
> -  rtems_id mtx;
> +  rtems_id mtx[MTX_COUNT];
> +  mtx_t mtx_c11;
> +#ifdef RTEMS_POSIX_API
> +  pthread_mutex_t mtx_posix;
> +#endif
>     rtems_id tasks[TASK_COUNT];
>     int generation[TASK_COUNT];
>     int expected_generation[TASK_COUNT];
> +  jmp_buf deadlock_return_context;
>   } test_context;
>
>   static test_context test_instance;
> @@ -109,22 +141,79 @@ static void request(test_context *ctx, task_id id, request_id req)
>     sync_with_helper(ctx);
>   }
>
> -static void obtain(test_context *ctx)
> +static void obtain(test_context *ctx, mutex_id id)
>   {
>     rtems_status_code sc;
>
> -  sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> +  sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
>     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
>   }
>
> -static void release(test_context *ctx)
> +static void deadlock_obtain(test_context *ctx, mutex_id id)
>   {
>     rtems_status_code sc;
>
> -  sc = rtems_semaphore_release(ctx->mtx);
> +  sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> +  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
> +}
> +
> +static void release(test_context *ctx, mutex_id id)
> +{
> +  rtems_status_code sc;
> +
> +  sc = rtems_semaphore_release(ctx->mtx[id]);
>     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
>   }
>
> +static void obtain_c11(test_context *ctx)
> +{
> +  int status;
> +
> +  status = mtx_lock(&ctx->mtx_c11);
> +  rtems_test_assert(status == thrd_success);
> +}
> +
> +static void deadlock_obtain_c11(test_context *ctx)
> +{
> +  if (setjmp(ctx->deadlock_return_context) == 0) {
> +    (void) mtx_lock(&ctx->mtx_c11);
> +  }
> +}
> +
> +static void release_c11(test_context *ctx)
> +{
> +  int status;
> +
> +  status = mtx_unlock(&ctx->mtx_c11);
> +  rtems_test_assert(status == thrd_success);
> +}
> +
> +#ifdef RTEMS_POSIX_API
> +static void obtain_posix(test_context *ctx)
> +{
> +  int error;
> +
> +  error = pthread_mutex_lock(&ctx->mtx_posix);
> +  rtems_test_assert(error == 0);
> +}
> +
> +static void deadlock_obtain_posix(test_context *ctx)
> +{
> +  int error;
> +
> +  error = pthread_mutex_lock(&ctx->mtx_posix);
> +  rtems_test_assert(error == EDEADLK);
> +}
> +
> +static void release_posix(test_context *ctx)
> +{
> +  int error;
> +
> +  error = pthread_mutex_unlock(&ctx->mtx_posix);
> +  rtems_test_assert(error == 0);
> +}
> +#endif
> +
>   static void check_generations(test_context *ctx, task_id a, task_id b)
>   {
>     size_t i;
> @@ -179,22 +268,65 @@ static void worker(rtems_task_argument arg)
>     while (true) {
>       rtems_event_set events = wait_for_events();
>
> -    if ((events & REQ_MTX_OBTAIN) != 0) {
> -      obtain(ctx);
> +    if ((events & REQ_MTX_0_OBTAIN) != 0) {
> +      obtain(ctx, MTX_0);
>         ++ctx->generation[id];
>       }
>
> -    if ((events & REQ_MTX_RELEASE) != 0) {
> -      release(ctx);
> +    if ((events & REQ_MTX_0_RELEASE) != 0) {
> +      release(ctx, MTX_0);
>         ++ctx->generation[id];
>       }
> +
> +    if ((events & REQ_MTX_1_OBTAIN) != 0) {
> +      obtain(ctx, MTX_1);
> +      ++ctx->generation[id];
> +    }
> +
> +    if ((events & REQ_MTX_1_RELEASE) != 0) {
> +      release(ctx, MTX_1);
> +      ++ctx->generation[id];
> +    }
> +
> +    if ((events & REQ_MTX_2_OBTAIN) != 0) {
> +      obtain(ctx, MTX_2);
> +      ++ctx->generation[id];
> +    }
> +
> +    if ((events & REQ_MTX_2_RELEASE) != 0) {
> +      release(ctx, MTX_2);
> +      ++ctx->generation[id];
> +    }
> +
> +    if ((events & REQ_MTX_C11_OBTAIN) != 0) {
> +      obtain_c11(ctx);
> +      ++ctx->generation[id];
> +    }
> +
> +    if ((events & REQ_MTX_C11_RELEASE) != 0) {
> +      release_c11(ctx);
> +      ++ctx->generation[id];
> +    }
> +
> +#ifdef RTEMS_POSIX_API
> +    if ((events & REQ_MTX_POSIX_OBTAIN) != 0) {
> +      obtain_posix(ctx);
> +      ++ctx->generation[id];
> +    }
> +
> +    if ((events & REQ_MTX_POSIX_RELEASE) != 0) {
> +      release_posix(ctx);
> +      ++ctx->generation[id];
> +    }
> +#endif
>     }
>   }
>
> -static void test(void)
> +static void set_up(test_context *ctx)
>   {
> -  test_context *ctx = &test_instance;
>     rtems_status_code sc;
> +  int status;
> +  size_t i;
>
>     ctx->tasks[M] = rtems_task_self();
>     start_task(ctx, A_1, worker, 1);
> @@ -202,61 +334,264 @@ static void test(void)
>     start_task(ctx, A_2_1, worker, 2);
>     start_task(ctx, H, helper, 3);
>
> -  sc = rtems_semaphore_create(
> -    rtems_build_name(' ', 'M', 'T', 'X'),
> -    1,
> -    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
> -    0,
> -    &ctx->mtx
> -  );
> -  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +  for (i = 0; i < MTX_COUNT; ++i) {
> +    sc = rtems_semaphore_create(
> +      rtems_build_name(' ', 'M', 'T', 'X'),
> +      1,
> +      RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
> +      0,
> +      &ctx->mtx[i]
> +    );
> +    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +  }
>
> -  obtain(ctx);
> -  request(ctx, A_1, REQ_MTX_OBTAIN);
> +  status = mtx_init(&ctx->mtx_c11, mtx_plain);
> +  rtems_test_assert(status == thrd_success);
> +
> +#ifdef RTEMS_POSIX_API
> +  {
> +    int error;
> +    pthread_mutexattr_t attr;
> +
> +    error = pthread_mutexattr_init(&attr);
> +    rtems_test_assert(error == 0);
> +
> +    error = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT);
> +    rtems_test_assert(error == 0);
> +
> +    error = pthread_mutex_init(&ctx->mtx_posix, &attr);
> +    rtems_test_assert(error == 0);
> +
> +    error = pthread_mutexattr_destroy(&attr);
> +    rtems_test_assert(error == 0);
> +  }
> +#endif
> +}
> +
> +static void test_inherit(test_context *ctx)
> +{
> +  obtain(ctx, MTX_0);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
>     check_generations(ctx, NONE, NONE);
>     assert_prio(ctx, M, 1);
> -  release(ctx);
> +  release(ctx, MTX_0);
>     check_generations(ctx, A_1, NONE);
>     assert_prio(ctx, M, 3);
> -  request(ctx, A_1, REQ_MTX_RELEASE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
>     check_generations(ctx, A_1, NONE);
> +}
>
> -  obtain(ctx);
> -  request(ctx, A_2_0, REQ_MTX_OBTAIN);
> -  request(ctx, A_1, REQ_MTX_OBTAIN);
> -  request(ctx, A_2_1, REQ_MTX_OBTAIN);
> +static void test_inherit_fifo_for_equal_priority(test_context *ctx)
> +{
> +  obtain(ctx, MTX_0);
> +  request(ctx, A_2_0, REQ_MTX_0_OBTAIN);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  request(ctx, A_2_1, REQ_MTX_0_OBTAIN);
>     check_generations(ctx, NONE, NONE);
>     assert_prio(ctx, M, 1);
> -  release(ctx);
> +  release(ctx, MTX_0);
>     check_generations(ctx, A_1, NONE);
>     assert_prio(ctx, M, 3);
>     assert_prio(ctx, A_1, 1);
> -  request(ctx, A_1, REQ_MTX_RELEASE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
>     check_generations(ctx, A_1, A_2_0);
> -  request(ctx, A_2_0, REQ_MTX_RELEASE);
> +  request(ctx, A_2_0, REQ_MTX_0_RELEASE);
>     check_generations(ctx, A_2_0, A_2_1);
> -  request(ctx, A_2_1, REQ_MTX_RELEASE);
> +  request(ctx, A_2_1, REQ_MTX_0_RELEASE);
>     check_generations(ctx, A_2_1, NONE);
>   }
>
> -static void Init(rtems_task_argument arg)
> +static void test_deadlock_two_classic(test_context *ctx)
>   {
> -  TEST_BEGIN();
> +  obtain(ctx, MTX_0);
> +  request(ctx, A_1, REQ_MTX_1_OBTAIN);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  deadlock_obtain(ctx, MTX_1);
> +  release(ctx, MTX_0);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_1_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +}
> +
> +static void test_deadlock_three_classic(test_context *ctx)
> +{
> +  obtain(ctx, MTX_0);
> +  request(ctx, A_1, REQ_MTX_1_OBTAIN);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_2_0, REQ_MTX_2_OBTAIN);
> +  check_generations(ctx, A_2_0, NONE);
> +  request(ctx, A_2_0, REQ_MTX_1_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  deadlock_obtain(ctx, MTX_2);
> +  release(ctx, MTX_0);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_1_RELEASE);
> +  check_generations(ctx, A_1, A_2_0);
> +  request(ctx, A_2_0, REQ_MTX_2_RELEASE);
> +  check_generations(ctx, A_2_0, NONE);
> +  request(ctx, A_2_0, REQ_MTX_1_RELEASE);
> +  check_generations(ctx, A_2_0, NONE);
> +}
> +
> +static void test_deadlock_c11_and_classic(test_context *ctx)
> +{
> +  obtain_c11(ctx);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_C11_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  deadlock_obtain(ctx, MTX_0);
> +  release_c11(ctx);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_C11_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +}
> +
> +static void test_deadlock_classic_and_c11(test_context *ctx)
> +{
> +  obtain(ctx, MTX_0);
> +  request(ctx, A_1, REQ_MTX_C11_OBTAIN);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  deadlock_obtain_c11(ctx);
> +  release(ctx, MTX_0);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_C11_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +}
> +
> +static void test_deadlock_posix_and_classic(test_context *ctx)
> +{
> +#ifdef RTEMS_POSIX_API
> +  obtain_posix(ctx);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_POSIX_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  deadlock_obtain(ctx, MTX_0);
> +  release_posix(ctx);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_POSIX_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +#endif
> +}
> +
> +static void test_deadlock_classic_and_posix(test_context *ctx)
> +{
> +#ifdef RTEMS_POSIX_API
> +  obtain(ctx, MTX_0);
> +  request(ctx, A_1, REQ_MTX_POSIX_OBTAIN);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_OBTAIN);
> +  check_generations(ctx, NONE, NONE);
> +  deadlock_obtain_posix(ctx);
> +  release(ctx, MTX_0);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_0_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +  request(ctx, A_1, REQ_MTX_POSIX_RELEASE);
> +  check_generations(ctx, A_1, NONE);
> +#endif
> +}
> +
> +static void tear_down(test_context *ctx)
> +{
> +  rtems_status_code sc;
> +  size_t i;
> +
> +  for (i = 1; i < TASK_COUNT; ++i) {
> +    sc = rtems_task_delete(ctx->tasks[i]);
> +    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +  }
> +
> +  for (i = 0; i < MTX_COUNT; ++i) {
> +    sc = rtems_semaphore_delete(ctx->mtx[i]);
> +    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +  }
> +
> +  mtx_destroy(&ctx->mtx_c11);
> +
> +#ifdef RTEMS_POSIX_API
> +  {
> +    int error;
> +
> +    error = pthread_mutex_destroy(&ctx->mtx_posix);
> +    rtems_test_assert(error == 0);
> +  }
> +#endif
> +}
>
> -  test();
> +static void Init(rtems_task_argument arg)
> +{
> +  test_context *ctx = &test_instance;
> +  rtems_resource_snapshot snapshot;
>
> +  TEST_BEGIN();
> +  rtems_resource_snapshot_take(&snapshot);
> +
> +  set_up(ctx);
> +  test_inherit(ctx);
> +  test_inherit_fifo_for_equal_priority(ctx);
> +  test_deadlock_two_classic(ctx);
> +  test_deadlock_three_classic(ctx);
> +  test_deadlock_c11_and_classic(ctx);
> +  test_deadlock_classic_and_c11(ctx);
> +  test_deadlock_posix_and_classic(ctx);
> +  test_deadlock_classic_and_posix(ctx);
> +  tear_down(ctx);
> +
> +  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
>     TEST_END();
>     rtems_test_exit(0);
>   }
>
> +static void fatal_extension(
> +  rtems_fatal_source source,
> +  bool is_internal,
> +  rtems_fatal_code error
> +)
> +{
> +
> +  if (
> +    source == INTERNAL_ERROR_CORE
> +      && !is_internal
> +      && error == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
> +  ) {
> +    test_context *ctx = &test_instance;
> +
> +    longjmp(ctx->deadlock_return_context, 1);
> +  }
> +}
> +
>   #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
>   #define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
>
>   #define CONFIGURE_MAXIMUM_TASKS TASK_COUNT
>
> -#define CONFIGURE_MAXIMUM_SEMAPHORES 1
> +#define CONFIGURE_MAXIMUM_SEMAPHORES 3
> +
> +#ifdef RTEMS_POSIX_API
> +#define CONFIGURE_MAXIMUM_POSIX_MUTEXES 1
> +#endif
>
> -#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
> +#define CONFIGURE_INITIAL_EXTENSIONS \
> +  { .fatal = fatal_extension }, \
> +  RTEMS_TEST_INITIAL_EXTENSION
>
>   #define CONFIGURE_INIT_TASK_PRIORITY 3
>
> diff --git a/testsuites/sptests/spmutex01/spmutex01.doc b/testsuites/sptests/spmutex01/spmutex01.doc
> index 7951024..7bcb850 100644
> --- a/testsuites/sptests/spmutex01/spmutex01.doc
> +++ b/testsuites/sptests/spmutex01/spmutex01.doc
> @@ -4,6 +4,10 @@ test set name: spmutex01
>
>   directives:
>
> +  - mtx_lock()
> +  - mtx_unlock()
> +  - pthread_mutex_lock()
> +  - pthread_mutex_unlock()
>     - rtems_semaphore_create()
>     - rtems_semaphore_obtain()
>     - rtems_semaphore_release()
> @@ -12,3 +16,4 @@ concepts:
>
>     - Ensure that priority inheritance mechanism works.
>     - Ensure that thread priority queueing discipline works.
> +  - Ensure that deadlock detection works in various combinations.
> diff --git a/testsuites/sptests/spsyslock01/init.c b/testsuites/sptests/spsyslock01/init.c
> index 5bf5d6a..1e0d4818 100644
> --- a/testsuites/sptests/spsyslock01/init.c
> +++ b/testsuites/sptests/spsyslock01/init.c
> @@ -1,5 +1,5 @@
>   /*
> - * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
> + * Copyright (c) 2015, 2016 embedded brains GmbH.  All rights reserved.
>    *
>    *  embedded brains GmbH
>    *  Dornierstr. 4
> @@ -21,7 +21,7 @@
>   #include <sys/lock.h>
>   #include <errno.h>
>   #include <limits.h>
> -#include <pthread.h>
> +#include <setjmp.h>
>   #include <string.h>
>   #include <time.h>
>
> @@ -35,8 +35,6 @@ const char rtems_test_name[] = "SPSYSLOCK 1";
>
>   #define EVENT_MTX_PRIO_INV RTEMS_EVENT_2
>
> -#define EVENT_MTX_DEADLOCK RTEMS_EVENT_3
> -
>   #define EVENT_REC_MTX_ACQUIRE RTEMS_EVENT_4
>
>   #define EVENT_REC_MTX_RELEASE RTEMS_EVENT_5
> @@ -56,7 +54,6 @@ typedef struct {
>     rtems_id mid;
>     rtems_id low;
>     struct _Mutex_Control mtx;
> -  struct _Mutex_Control deadlock_mtx;
>     struct _Mutex_recursive_Control rec_mtx;
>     struct _Condition_Control cond;
>     struct _Semaphore_Control sem;
> @@ -65,6 +62,7 @@ typedef struct {
>     int eno[2];
>     int generation[2];
>     int current_generation[2];
> +  jmp_buf deadlock_return_context;
>   } test_context;
>
>   static test_context test_instance;
> @@ -298,6 +296,19 @@ static void test_mtx_timeout_recursive(test_context *ctx)
>     send_event(ctx, idx, EVENT_REC_MTX_RELEASE);
>   }
>
> +static void test_mtx_deadlock(test_context *ctx)
> +{
> +  struct _Mutex_Control *mtx = &ctx->mtx;
> +
> +  _Mutex_Acquire(mtx);
> +
> +  if (setjmp(ctx->deadlock_return_context) == 0) {
> +    _Mutex_Acquire(mtx);
> +  }
> +
> +  _Mutex_Release(mtx);
> +}
> +
>   static void test_condition(test_context *ctx)
>   {
>     struct _Condition_Control *cond = &ctx->cond;
> @@ -493,21 +504,6 @@ static void mid_task(rtems_task_argument arg)
>     rtems_test_assert(0);
>   }
>
> -#ifdef RTEMS_POSIX_API
> -static void deadlock_cleanup(void *arg)
> -{
> -  struct _Mutex_Control *deadlock_mtx = arg;
> -
> -  /*
> -   * The thread terminate procedure will dequeue us from the wait queue.  So,
> -   * one release is sufficient.
> -   */
> -
> -  _Mutex_Release(deadlock_mtx);
> -  _Mutex_Destroy(deadlock_mtx);
> -}
> -#endif
> -
>   static void high_task(rtems_task_argument idx)
>   {
>     test_context *ctx = &test_instance;
> @@ -553,22 +549,6 @@ static void high_task(rtems_task_argument idx)
>         rtems_test_assert(sc == RTEMS_SUCCESSFUL);
>       }
>
> -    if ((events & EVENT_MTX_DEADLOCK) != 0) {
> -      struct _Mutex_Control *deadlock_mtx = &ctx->deadlock_mtx;
> -
> -#ifdef RTEMS_POSIX_API
> -      pthread_cleanup_push(deadlock_cleanup, deadlock_mtx);
> -#endif
> -
> -      _Mutex_Initialize(deadlock_mtx);
> -      _Mutex_Acquire(deadlock_mtx);
> -      _Mutex_Acquire(deadlock_mtx);
> -
> -#ifdef RTEMS_POSIX_API
> -      pthread_cleanup_pop(0);
> -#endif
> -    }
> -
>       if ((events & EVENT_REC_MTX_ACQUIRE) != 0) {
>         _Mutex_recursive_Acquire(&ctx->rec_mtx);
>       }
> @@ -670,6 +650,7 @@ static void test(void)
>     test_prio_inv_recursive(ctx);
>     test_mtx_timeout_normal(ctx);
>     test_mtx_timeout_recursive(ctx);
> +  test_mtx_deadlock(ctx);
>     test_condition(ctx);
>     test_condition_timeout(ctx);
>     test_sem(ctx);
> @@ -677,15 +658,11 @@ static void test(void)
>     test_futex(ctx);
>     test_sched();
>
> -  send_event(ctx, 0, EVENT_MTX_DEADLOCK);
> -
>     sc = rtems_task_delete(ctx->mid);
>     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
>
> -#ifdef RTEMS_POSIX_API
>     sc = rtems_task_delete(ctx->high[0]);
>     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> -#endif
>
>     sc = rtems_task_delete(ctx->high[1]);
>     rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> @@ -707,6 +684,24 @@ static void Init(rtems_task_argument arg)
>     rtems_test_exit(0);
>   }
>
> +static void fatal_extension(
> +  rtems_fatal_source source,
> +  bool is_internal,
> +  rtems_fatal_code error
> +)
> +{
> +
> +  if (
> +    source == INTERNAL_ERROR_CORE
> +      && !is_internal
> +      && error == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
> +  ) {
> +    test_context *ctx = &test_instance;
> +
> +    longjmp(ctx->deadlock_return_context, 1);
> +  }
> +}
> +
>   #define CONFIGURE_MICROSECONDS_PER_TICK US_PER_TICK
>
>   #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
> @@ -714,7 +709,9 @@ static void Init(rtems_task_argument arg)
>
>   #define CONFIGURE_MAXIMUM_TASKS 4
>
> -#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
> +#define CONFIGURE_INITIAL_EXTENSIONS \
> +  { .fatal = fatal_extension }, \
> +  RTEMS_TEST_INITIAL_EXTENSION
>
>   #define CONFIGURE_INIT_TASK_PRIORITY 4
>   #define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
>



More information about the devel mailing list