[PATCH 4/4] score: Use Resource Handler for MrsP semaphores
Gedare Bloom
gedare at rtems.org
Wed May 28 21:45:24 UTC 2014
On Wed, May 28, 2014 at 10:29 AM, Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
> This enables proper resource dependency tracking and as a side-effect
> deadlock detection.
> ---
> cpukit/score/include/rtems/score/mrsp.h | 14 +-
> cpukit/score/include/rtems/score/mrspimpl.h | 127 +++++++++---
> doc/user/sem.t | 11 +-
> testsuites/smptests/smpmrsp01/init.c | 288 +++++++++++++++++++++++++++
> 4 files changed, 404 insertions(+), 36 deletions(-)
>
> diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
> index 407d5ef..c31d5f6 100644
> --- a/cpukit/score/include/rtems/score/mrsp.h
> +++ b/cpukit/score/include/rtems/score/mrsp.h
> @@ -63,6 +63,7 @@ typedef enum {
> MRSP_INVALID_NUMBER = 10,
> MRSP_RESOUCE_IN_USE = 12,
> MRSP_UNSATISFIED = 13,
> + MRSP_INCORRECT_STATE = 14,
> MRSP_INVALID_PRIORITY = 19,
> MRSP_NOT_OWNER_OF_RESOURCE = 23,
> MRSP_NO_MEMORY = 26
> @@ -102,12 +103,9 @@ typedef struct {
> */
> typedef struct {
> /**
> - * @brief The owner of the MRSP resource.
> - *
> - * In case this field is @c NULL, then this MRSP resource has currently no
> - * owner.
> + * @brief Basic resource control.
> */
> - Thread_Control *owner;
> + Resource_Control Resource;
>
> /**
> * @brief A chain of MrsP rivals waiting for resource ownership.
> @@ -117,6 +115,12 @@ typedef struct {
> Chain_Control Rivals;
>
> /**
> + * @brief The initial priority of the owner before it was elevated to the
> + * ceiling priority.
> + */
> + Priority_Control initial_priority_of_owner;
> +
> + /**
> * @brief One ceiling priority per scheduler instance.
> */
> Priority_Control *ceiling_priorities;
> diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
> index 76d3bc8..e46309e 100644
> --- a/cpukit/score/include/rtems/score/mrspimpl.h
> +++ b/cpukit/score/include/rtems/score/mrspimpl.h
> @@ -21,6 +21,7 @@
>
> #include <rtems/score/assert.h>
> #include <rtems/score/chainimpl.h>
> +#include <rtems/score/resourceimpl.h>
> #include <rtems/score/schedulerimpl.h>
> #include <rtems/score/watchdogimpl.h>
> #include <rtems/score/wkspace.h>
> @@ -41,17 +42,64 @@ extern "C" {
>
> #define MRSP_RIVAL_STATE_TIMEOUT 0x2U
>
> -RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
> +RTEMS_INLINE_ROUTINE bool _MRSP_Set_root_visitor(
> + Resource_Node *node,
> + void *arg
> +)
> +{
> + _Resource_Node_set_root( node, arg );
> +
> + return false;
> +}
> +
> +RTEMS_INLINE_ROUTINE void _MRSP_Set_root(
> + Resource_Node *top,
> + Resource_Node *root
> +)
> +{
> + _Resource_Node_set_root( top, root );
> + _Resource_Iterate( top, _MRSP_Set_root_visitor, root );
> +}
> +
> +RTEMS_INLINE_ROUTINE void _MRSP_Elevate_priority(
> MRSP_Control *mrsp,
> Thread_Control *new_owner,
> Priority_Control ceiling_priority
> )
> {
> - ++new_owner->resource_count;
> - mrsp->owner = new_owner;
> _Thread_Change_priority( new_owner, ceiling_priority, false );
> }
>
> +RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority(
> + const MRSP_Control *mrsp,
> + Thread_Control *thread,
> + Priority_Control initial_priority
> +)
> +{
> + if ( thread->resource_count == 0 ) {
Perhaps the resource_count field should be renamed to be more explicit
that it counts held locks. The overloading of the term resource is
confusing, especially here, it takes a minute to figure out what this
condition checks.
> + Priority_Control new_priority = _Scheduler_Highest_priority_of_two(
> + _Scheduler_Get( thread ),
> + initial_priority,
> + thread->real_priority
> + );
> +
> + _Thread_Change_priority( thread, new_priority, true );
> + }
> +}
> +
> +RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
> + MRSP_Control *mrsp,
> + Thread_Control *new_owner,
> + Priority_Control initial_priority,
> + Priority_Control ceiling_priority
> +)
> +{
> + _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
> + _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
> + mrsp->initial_priority_of_owner = initial_priority;
> + _MRSP_Elevate_priority( mrsp, new_owner, ceiling_priority );
> +}
> +
> RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize(
> MRSP_Control *mrsp,
> Priority_Control ceiling_priority,
> @@ -77,7 +125,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize(
> mrsp->ceiling_priorities[ i ] = ceiling_priority;
> }
>
> - mrsp->owner = NULL;
> + _Resource_Initialize( &mrsp->Resource );
> _Chain_Initialize_empty( &mrsp->Rivals );
>
> return MRSP_SUCCESSFUL;
> @@ -100,11 +148,6 @@ RTEMS_INLINE_ROUTINE void _MRSP_Set_ceiling_priority(
> mrsp->ceiling_priorities[ scheduler_index ] = ceiling_priority;
> }
>
> -RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority( Thread_Control *thread )
> -{
> - _Thread_Change_priority( thread, thread->real_priority, true );
> -}
> -
> RTEMS_INLINE_ROUTINE void _MRSP_Add_state(
> MRSP_Rival *rival,
> unsigned int state
> @@ -127,7 +170,9 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout(
>
> RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
> MRSP_Control *mrsp,
> + Resource_Node *owner,
> Thread_Control *executing,
> + Priority_Control initial_priority,
> Priority_Control ceiling_priority,
> Watchdog_Interval timeout
> )
> @@ -137,11 +182,14 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
> bool previous_life_protection;
> unsigned int state;
>
> - _Thread_Change_priority( executing, ceiling_priority, false );
> + _MRSP_Elevate_priority( mrsp, executing, ceiling_priority );
>
> rival.thread = executing;
> _Atomic_Init_uint( &rival.state, MRSP_RIVAL_STATE_WAITING );
> _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
> + _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node );
> + _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
> + _MRSP_Set_root( &executing->Resource_node, owner );
>
> if ( timeout > 0 ) {
> _Watchdog_Initialize(
> @@ -176,13 +224,15 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
> state = _Atomic_Load_uint( &rival.state, ATOMIC_ORDER_RELAXED );
>
> if ( ( state & MRSP_RIVAL_STATE_NEW_OWNER ) != 0 ) {
> - ++executing->resource_count;
> -
> + mrsp->initial_priority_of_owner = initial_priority;
> status = MRSP_SUCCESSFUL;
> } else {
> - if ( executing->resource_count == 0 ) {
> - _MRSP_Restore_priority( executing );
> - }
> + Resource_Node *executing_node = &executing->Resource_node;
> +
> + _Resource_Node_extract( executing_node );
> + _Resource_Node_set_dependency( executing_node, NULL );
> + _MRSP_Set_root( executing_node, executing_node );
> + _MRSP_Restore_priority( mrsp, executing, initial_priority );
>
> status = MRSP_TIMEOUT;
> }
> @@ -200,27 +250,36 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain(
> MRSP_Status status;
> const Scheduler_Control *scheduler = _Scheduler_Get( executing );
> uint32_t scheduler_index = _Scheduler_Get_index( scheduler );
> + Priority_Control initial_priority = executing->current_priority;
> Priority_Control ceiling_priority =
> _MRSP_Get_ceiling_priority( mrsp, scheduler_index );
> bool priority_ok = !_Scheduler_Is_priority_higher_than(
> scheduler,
> - executing->current_priority,
> + initial_priority,
> ceiling_priority
> );
> + Resource_Node *owner = _Resource_Get_owner( &mrsp->Resource );
I'd put this after the priority_ok check, for better readability by
keeping the error check closer to the cause. (Ahem, this should
probably be in our conventions too, eh?)
>
> if ( !priority_ok) {
> return MRSP_INVALID_PRIORITY;
> }
>
> - if ( mrsp->owner == NULL ) {
> - _MRSP_Claim_ownership( mrsp, executing, ceiling_priority );
> + if ( owner == NULL ) {
> + _MRSP_Claim_ownership(
> + mrsp, executing,
I believe we prefer to put one argument per line when breaking up
functio calls. Please add a newline between mrsp and executing. (This
also should be in the conventions, sigh.)
> + initial_priority,
> + ceiling_priority
> + );
> status = MRSP_SUCCESSFUL;
> - } else if ( mrsp->owner == executing ) {
> + } else if ( _Resource_Node_get_root( owner ) == &executing->Resource_node ) {
> + /* Nested access or deadlock */
> status = MRSP_UNSATISFIED;
> } else if ( wait ) {
> status = _MRSP_Wait_for_ownership(
> mrsp,
> + owner,
> executing,
> + initial_priority,
> ceiling_priority,
> timeout
> );
> @@ -236,25 +295,33 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
> Thread_Control *executing
> )
> {
> - uint32_t resource_count = executing->resource_count;
> -
> - if ( mrsp->owner != executing ) {
> + if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
> return MRSP_NOT_OWNER_OF_RESOURCE;
> }
>
> - if ( resource_count == 1 ) {
> - executing->resource_count = 0;
> - _MRSP_Restore_priority( executing );
> - } else {
> - executing->resource_count = resource_count - 1;
> + if (
> + !_Resource_Is_most_recent_resource_of_node(
> + &executing->Resource_node,
> + &mrsp->Resource
> + )
> + ) {
> + return MRSP_INCORRECT_STATE;
> }
>
> + _Resource_Extract( &mrsp->Resource );
> + _MRSP_Restore_priority( mrsp, executing, mrsp->initial_priority_of_owner );
> +
> if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
> - mrsp->owner = NULL;
> + _Resource_Set_owner( &mrsp->Resource, NULL );
> } else {
> MRSP_Rival *rival = (MRSP_Rival *) _Chain_First( &mrsp->Rivals );
> + Resource_Node *new_owner = &rival->thread->Resource_node;
>
> - mrsp->owner = rival->thread;
> + _Resource_Node_extract( new_owner );
> + _Resource_Node_set_dependency( new_owner, NULL );
> + _MRSP_Set_root( new_owner, new_owner );
> + _Resource_Node_add_resource( new_owner, &mrsp->Resource );
> + _Resource_Set_owner( &mrsp->Resource, new_owner );
> _MRSP_Add_state( rival, MRSP_RIVAL_STATE_NEW_OWNER );
> }
>
> @@ -263,7 +330,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
>
> RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Destroy( MRSP_Control *mrsp )
> {
> - if ( mrsp->owner != NULL ) {
> + if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) {
> return MRSP_RESOUCE_IN_USE;
> }
>
> diff --git a/doc/user/sem.t b/doc/user/sem.t
> index 95fa4b9..6bd22dd 100644
> --- a/doc/user/sem.t
> +++ b/doc/user/sem.t
> @@ -751,6 +751,10 @@ willing to be blocked waiting for the semaphore. If it is set to
> If the semaphore is available or the @code{@value{RPREFIX}NO_WAIT} option
> component is set, then timeout is ignored.
>
> +Deadlock situations are detected for MrsP semaphores and the
> + at code{@value{RPREFIX}UNSATISFIED} status code will be returned on SMP
> +configurations in this case.
> +
> @subheading NOTES:
> The following semaphore acquisition option constants
> are defined by RTEMS:
> @@ -806,7 +810,8 @@ procedure Semaphore_Release (
> @subheading DIRECTIVE STATUS CODES:
> @code{@value{RPREFIX}SUCCESSFUL} - semaphore released successfully@*
> @code{@value{RPREFIX}INVALID_ID} - invalid semaphore id@*
> - at code{@value{RPREFIX}NOT_OWNER_OF_RESOURCE} - calling task does not own semaphore
> + at code{@value{RPREFIX}NOT_OWNER_OF_RESOURCE} - calling task does not own semaphore@*
> + at code{@value{RPREFIX}INCORRECT_STATE} - invalid unlock order
>
> @subheading DESCRIPTION:
>
> @@ -838,6 +843,10 @@ calling task having its priority lowered. This will occur if
> the calling task holds no other binary semaphores and it has
> inherited a higher priority.
>
> +The MrsP semaphores must be released in the reversed obtain order, otherwise
> +the @code{@value{RPREFIX}INCORRECT_STATE} status code will be returned on SMP
> +configurations in this case.
> +
> @c
> @c
> @c
> diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
> index 4f6637a..b5590ed 100644
> --- a/testsuites/smptests/smpmrsp01/init.c
> +++ b/testsuites/smptests/smpmrsp01/init.c
> @@ -44,6 +44,7 @@ typedef struct {
> rtems_id mrsp_ids[MRSP_COUNT];
> rtems_id scheduler_ids[CPU_COUNT];
> rtems_id worker_ids[2 * CPU_COUNT];
> + rtems_id timer_id;
> volatile bool stop_worker[CPU_COUNT];
> counter counters[2 * CPU_COUNT];
> Thread_Control *worker_task;
> @@ -357,6 +358,290 @@ static void test_mrsp_nested_obtain_error(void)
> rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> }
>
> +static void test_mrsp_unlock_order_error(void)
> +{
> + rtems_status_code sc;
> + rtems_id id_a;
> + rtems_id id_b;
> +
> + puts("test MrsP unlock order error");
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'A'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + 1,
> + &id_a
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'B'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + 1,
> + &id_b
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_obtain(id_a, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_obtain(id_b, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_release(id_a);
> + rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
> +
> + sc = rtems_semaphore_release(id_b);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_release(id_a);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(id_a);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(id_b);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +}
> +
> +static void deadlock_timer(rtems_id id, void *arg)
> +{
> + test_context *ctx = &test_instance;
> + rtems_status_code sc;
> +
> + sc = rtems_task_suspend(ctx->worker_ids[0]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +}
> +
> +static void deadlock_worker(rtems_task_argument arg)
> +{
> + test_context *ctx = &test_instance;
> + rtems_status_code sc;
> +
> + sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_timer_fire_after(ctx->timer_id, 2, deadlock_timer, NULL);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_event_transient_send(ctx->main_task_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + rtems_task_suspend(RTEMS_SELF);
> + rtems_test_assert(0);
> +}
> +
> +static void test_mrsp_deadlock_error(void)
> +{
> + test_context *ctx = &test_instance;
> + rtems_status_code sc;
> + rtems_task_priority prio = 2;
> +
> + puts("test MrsP deadlock error");
> +
> + assert_prio(RTEMS_SELF, prio);
> +
> + sc = rtems_timer_create(
> + rtems_build_name('M', 'R', 'S', 'P'),
> + &ctx->timer_id
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'A'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + prio,
> + &ctx->mrsp_ids[0]
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'B'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + prio,
> + &ctx->mrsp_ids[1]
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_create(
> + rtems_build_name('W', 'O', 'R', 'K'),
> + prio,
> + RTEMS_MINIMUM_STACK_SIZE,
> + RTEMS_DEFAULT_MODES,
> + RTEMS_DEFAULT_ATTRIBUTES,
> + &ctx->worker_ids[0]
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_start(ctx->worker_ids[0], deadlock_worker, 0);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_UNSATISFIED);
> +
> + sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_resume(ctx->worker_ids[0]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_delete(ctx->worker_ids[0]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_timer_delete(ctx->timer_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +}
> +
> +static void test_mrsp_multiple_obtain(void)
> +{
> + rtems_status_code sc;
> + rtems_id sem_a_id;
> + rtems_id sem_b_id;
> + rtems_id sem_c_id;
> +
> + puts("test MrsP multiple obtain");
> +
> + change_prio(RTEMS_SELF, 4);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'A'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + 3,
> + &sem_a_id
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'B'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + 2,
> + &sem_b_id
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', ' ', ' ', 'C'),
> + 1,
> + RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
> + | RTEMS_BINARY_SEMAPHORE,
> + 1,
> + &sem_c_id
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 4);
> +
> + sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 3);
> +
> + sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 2);
> +
> + sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 1);
> +
> + sc = rtems_semaphore_release(sem_c_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 2);
> +
> + sc = rtems_semaphore_release(sem_b_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 3);
> +
> + sc = rtems_semaphore_release(sem_a_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 4);
> +
> + sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 3);
> +
> + sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 2);
> +
> + sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 1);
> + change_prio(RTEMS_SELF, 3);
> + assert_prio(RTEMS_SELF, 1);
> +
> + sc = rtems_semaphore_release(sem_c_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 2);
> +
> + sc = rtems_semaphore_release(sem_b_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 3);
> +
> + sc = rtems_semaphore_release(sem_a_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + assert_prio(RTEMS_SELF, 3);
> +
> + sc = rtems_semaphore_delete(sem_a_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(sem_b_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(sem_c_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + change_prio(RTEMS_SELF, 2);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +}
> +
> static uint32_t simple_random(uint32_t v)
> {
> v *= 1664525;
> @@ -589,6 +874,9 @@ static void Init(rtems_task_argument arg)
> test_mrsp_flush_error();
> test_mrsp_initially_locked_error();
> test_mrsp_nested_obtain_error();
> + test_mrsp_unlock_order_error();
> + test_mrsp_deadlock_error();
> + test_mrsp_multiple_obtain();
> test_mrsp_obtain_and_release();
> test_mrsp_load();
>
> --
> 1.7.7
>
> _______________________________________________
> rtems-devel mailing list
> rtems-devel at rtems.org
> http://www.rtems.org/mailman/listinfo/rtems-devel
More information about the devel
mailing list