[PATCH] rtems: Fix no protocol mutex release
Joel Sherrill
joel at rtems.org
Fri Jun 3 13:24:26 UTC 2016
Binary semaphores with no protocol can be used for condition
synchronization. This was described way back by Dijkstra. Usually the
binary semaphore starts locked, a thread will then block upon an attempt to
lock it. An ISO or other thread can release or flush to announce the
condition.
This is a normal well-known use case.
It may be under documented but it is a commonly used synchronization
method.
On Jun 3, 2016 1:19 AM, "Sebastian Huber" <
sebastian.huber at embedded-brains.de> wrote:
> The Classic binary semaphores without a locking protocol could be
> released by everyone, e.g. in contrast to the POSIX mutexes (all
> variants) or the Classic binary semphores with priority inheritance or
> ceiling there was no owner check in the release path.
>
> This behaviour was a bit unexpected and not documented. Add an owner
> check to the release path. Update sptests/sp42 accordingly.
>
> Close #2725
> ---
> cpukit/rtems/src/semrelease.c | 3 +-
> cpukit/score/include/rtems/score/coremuteximpl.h | 54 +++--------
> testsuites/sptests/sp42/init.c | 109
> ++++++++++++++---------
> testsuites/sptests/sp42/sp42.scn | 4 +-
> 4 files changed, 84 insertions(+), 86 deletions(-)
>
> diff --git a/cpukit/rtems/src/semrelease.c b/cpukit/rtems/src/semrelease.c
> index ccd4e63..39c467d 100644
> --- a/cpukit/rtems/src/semrelease.c
> +++ b/cpukit/rtems/src/semrelease.c
> @@ -64,9 +64,10 @@ rtems_status_code rtems_semaphore_release( rtems_id id )
> );
> break;
> case SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL:
> - _CORE_recursive_mutex_Surrender_no_protocol_classic(
> + _CORE_recursive_mutex_Surrender_no_protocol(
> &the_semaphore->Core_control.Mutex.Recursive,
> _Semaphore_Get_operations( the_semaphore ),
> + executing,
> &queue_context
> );
> status = STATUS_SUCCESSFUL;
> diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h
> b/cpukit/score/include/rtems/score/coremuteximpl.h
> index decf770..956dfa8 100644
> --- a/cpukit/score/include/rtems/score/coremuteximpl.h
> +++ b/cpukit/score/include/rtems/score/coremuteximpl.h
> @@ -311,22 +311,29 @@ RTEMS_INLINE_ROUTINE Status_Control
> _CORE_recursive_mutex_Seize_no_protocol(
> );
> }
>
> -RTEMS_INLINE_ROUTINE void
> -_CORE_recursive_mutex_Surrender_no_protocol_finalize(
> +RTEMS_INLINE_ROUTINE Status_Control
> _CORE_recursive_mutex_Surrender_no_protocol(
> CORE_recursive_mutex_Control *the_mutex,
> const Thread_queue_Operations *operations,
> + Thread_Control *executing,
> Thread_queue_Context *queue_context
> )
> {
> unsigned int nest_level;
> Thread_Control *new_owner;
>
> + _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
> +
> + if ( !_CORE_mutex_Is_owner( &the_mutex->Mutex, executing ) ) {
> + _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
> + return STATUS_NOT_OWNER;
> + }
> +
> nest_level = the_mutex->nest_level;
>
> if ( nest_level > 0 ) {
> the_mutex->nest_level = nest_level - 1;
> _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
> - return;
> + return STATUS_SUCCESSFUL;
> }
>
> new_owner = _Thread_queue_First_locked(
> @@ -337,7 +344,7 @@ _CORE_recursive_mutex_Surrender_no_protocol_finalize(
>
> if ( new_owner == NULL ) {
> _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
> - return;
> + return STATUS_SUCCESSFUL;
> }
>
> _Thread_queue_Extract_critical(
> @@ -346,48 +353,9 @@ _CORE_recursive_mutex_Surrender_no_protocol_finalize(
> new_owner,
> queue_context
> );
> -}
> -
> -RTEMS_INLINE_ROUTINE Status_Control
> _CORE_recursive_mutex_Surrender_no_protocol(
> - CORE_recursive_mutex_Control *the_mutex,
> - const Thread_queue_Operations *operations,
> - Thread_Control *executing,
> - Thread_queue_Context *queue_context
> -)
> -{
> - _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
> -
> - if ( !_CORE_mutex_Is_owner( &the_mutex->Mutex, executing ) ) {
> - _CORE_mutex_Release( &the_mutex->Mutex, queue_context );
> - return STATUS_NOT_OWNER;
> - }
> -
> - _CORE_recursive_mutex_Surrender_no_protocol_finalize(
> - the_mutex,
> - operations,
> - queue_context
> - );
> return STATUS_SUCCESSFUL;
> }
>
> -/*
> - * The Classic no protocol recursive mutex has the nice property that
> everyone
> - * can release it.
> - */
> -RTEMS_INLINE_ROUTINE void
> _CORE_recursive_mutex_Surrender_no_protocol_classic(
> - CORE_recursive_mutex_Control *the_mutex,
> - const Thread_queue_Operations *operations,
> - Thread_queue_Context *queue_context
> -)
> -{
> - _CORE_mutex_Acquire_critical( &the_mutex->Mutex, queue_context );
> - _CORE_recursive_mutex_Surrender_no_protocol_finalize(
> - the_mutex,
> - operations,
> - queue_context
> - );
> -}
> -
> RTEMS_INLINE_ROUTINE void _CORE_ceiling_mutex_Initialize(
> CORE_ceiling_mutex_Control *the_mutex,
> Priority_Control priority_ceiling
> diff --git a/testsuites/sptests/sp42/init.c
> b/testsuites/sptests/sp42/init.c
> index 55faa9b..6d96ede 100644
> --- a/testsuites/sptests/sp42/init.c
> +++ b/testsuites/sptests/sp42/init.c
> @@ -24,19 +24,12 @@ const char rtems_test_name[] = "SP 42";
>
> #define MAX_TASKS 20
>
> -rtems_task Init(rtems_task_argument argument);
> -rtems_task Locker_task(rtems_task_argument unused);
> -void do_test(
> - rtems_attribute attr,
> - bool extract /* TRUE if extract, not release */
> -);
> -
> /*
> * Carefully chosen to exercise threadq enqueue/dequeue priority logic.
> * Somewhat randomly sorted to ensure than if discipline is FIFO, run-time
> * behavior won't be the same when released.
> */
> -rtems_task_priority Priorities_High[MAX_TASKS] = {
> +static const rtems_task_priority Priorities_High[MAX_TASKS] = {
> 37, 37, 37, 37, /* backward - more 2-n */
> 2, 2, 2, 2, /* forward - multiple are on 2-n chain */
> 4, 3, /* forward - search forward arbitrary */
> @@ -45,7 +38,7 @@ rtems_task_priority Priorities_High[MAX_TASKS] = {
> 34, 34, 34, 34, /* backward - multple on 2-n chain */
> };
>
> -rtems_task_priority Priorities_Low[MAX_TASKS] = {
> +static const rtems_task_priority Priorities_Low[MAX_TASKS] = {
> 13, 13, 13, 13, /* backward - more 2-n */
> 2, 2, 2, 2, /* forward - multiple are on 2-n chain */
> 4, 3, /* forward - search forward arbitrary */
> @@ -54,24 +47,37 @@ rtems_task_priority Priorities_Low[MAX_TASKS] = {
> 12, 12, 12, 12, /* backward - multple on 2-n chain */
> };
>
> -rtems_task_priority *Priorities;
> +static const int Obtain_order[2][MAX_TASKS] = {
> + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
> },
> + { 4, 5, 6, 7, 9, 10, 11, 12, 13, 8, 16, 17, 18, 19, 0, 1, 2, 3, 15, 14 }
> +};
> +
> +static const rtems_task_priority *Priorities;
> +
> +static rtems_id Semaphore;
> +static rtems_id Master;
> +static rtems_id Task_id[ MAX_TASKS ];
> +static rtems_name Task_name[ MAX_TASKS ];
> +
> +static rtems_task_argument Obtain_counter;
>
> -rtems_id Semaphore;
> -rtems_id Task_id[ MAX_TASKS ];
> -rtems_name Task_name[ MAX_TASKS ];
> +static enum {
> + FIFO,
> + PRIORITY
> +} Variant;
>
> -rtems_task Locker_task(
> - rtems_task_argument unused
> +static rtems_task Locker_task(
> + rtems_task_argument task_index
> )
> {
> - rtems_id tid;
> - uint32_t task_index;
> - rtems_status_code status;
> + rtems_id tid;
> + rtems_status_code status;
> + rtems_task_argument my_obtain_counter;
>
> status = rtems_task_ident( RTEMS_SELF, RTEMS_SEARCH_ALL_NODES, &tid );
> directive_failed( status, "rtems_task_ident" );
>
> - task_index = task_number( tid ) - 1;
> + rtems_test_assert( task_index == task_number( tid ) - 1 );
>
> status = rtems_semaphore_obtain( Semaphore, RTEMS_DEFAULT_OPTIONS, 0 );
> directive_failed( status, "rtems_semaphore_obtain" );
> @@ -79,28 +85,45 @@ rtems_task Locker_task(
> put_name( Task_name[ task_index ], FALSE );
> puts( " - unblocked - OK" );
>
> + status = rtems_task_wake_after( 10 );
> + directive_failed( status, "rtems_task_wake_after" );
> +
> + my_obtain_counter = Obtain_counter;
> + rtems_test_assert( task_index == Obtain_order[ Variant ][
> Obtain_counter ] );
> + ++Obtain_counter;
> +
> + status = rtems_semaphore_release( Semaphore );
> + directive_failed( status, "rtems_semaphore_release" );
> +
> + if ( my_obtain_counter == MAX_TASKS - 1 ) {
> + status = rtems_event_transient_send( Master );
> + directive_failed( status, "rtems_event_transient_send" );
> + }
> +
> (void) rtems_task_delete( RTEMS_SELF );
> }
>
> -void do_test(
> +static void do_test(
> rtems_attribute attr,
> bool extract /* TRUE if extract, not release */
> )
> {
> - rtems_status_code status;
> - int i;
> + rtems_status_code status;
> + rtems_task_argument i;
> +
> + Variant = ( ( attr & RTEMS_PRIORITY ) != 0 ? PRIORITY : FIFO );
> + Obtain_counter = 0;
>
> status = rtems_semaphore_create(
> rtems_build_name( 'S', 'E', 'M', '0' ), /* name = SEM0 */
> - 0, /* unlocked */
> + 0, /* locked */
> RTEMS_BINARY_SEMAPHORE | attr, /* mutex w/desired
> discipline */
> 0, /* IGNORED */
> &Semaphore
> );
> directive_failed( status, "rtems_semaphore_create" );
>
> - for (i=0 ; i< MAX_TASKS ; i++ ) {
> -
> + for (i = 0 ; i < MAX_TASKS ; i++ ) {
> Task_name[ i ] = rtems_build_name(
> 'T',
> 'A',
> @@ -118,42 +141,42 @@ void do_test(
> );
> directive_failed( status, "rtems_task_create" );
>
> - status = rtems_task_start(
> - Task_id[ i ], Locker_task, (rtems_task_argument)i );
> + status = rtems_task_start( Task_id[ i ], Locker_task, i );
> directive_failed( status, "rtems_task_start" );
> -
> - status = rtems_task_wake_after( 10 );
> - directive_failed( status, "rtems_task_wake_after" );
> }
>
> - for (i=0 ; i< MAX_TASKS ; i++ ) {
> - if ( extract == FALSE ) {
> - status = rtems_semaphore_release( Semaphore );
> - directive_failed( status, "rtems_semaphore_release" );
> -
> - status = rtems_task_wake_after( 100 );
> - directive_failed( status, "rtems_task_wake_after" );
> - } else {
> + if ( extract ) {
> + for (i = 0 ; i< MAX_TASKS ; i++ ) {
> status = rtems_task_delete( Task_id[ i ] );
> directive_failed( status, "rtems_task_delete" );
> }
> }
>
> - /* one extra release for the initial state */
> + /* do the initial release */
> status = rtems_semaphore_release( Semaphore );
> directive_failed( status, "rtems_semaphore_release" );
>
> + if ( !extract ) {
> + status = rtems_event_transient_receive( RTEMS_WAIT, RTEMS_NO_TIMEOUT
> );
> + directive_failed( status, "rtems_event_transient_receive" );
> + }
> +
> /* now delete the semaphore since no one is waiting and it is unlocked
> */
> status = rtems_semaphore_delete( Semaphore );
> directive_failed( status, "rtems_semaphore_delete" );
> }
>
> -rtems_task Init(
> +static rtems_task Init(
> rtems_task_argument argument
> )
> {
> + rtems_task_priority prio;
> + rtems_status_code status;
> +
> TEST_BEGIN();
>
> + Master = rtems_task_self();
> +
> if (RTEMS_MAXIMUM_PRIORITY == 255)
> Priorities = Priorities_High;
> else if (RTEMS_MAXIMUM_PRIORITY == 15)
> @@ -163,6 +186,10 @@ rtems_task Init(
> rtems_test_exit( 0 );
> }
>
> + prio = RTEMS_MAXIMUM_PRIORITY - 1;
> + status = rtems_task_set_priority(RTEMS_SELF, prio, &prio);
> + directive_failed( status, "rtems_task_set_priority" );
> +
> if ( sizeof(Priorities_Low) / sizeof(rtems_task_priority) != MAX_TASKS
> ) {
> puts( "Priorities_Low table does not have right number of entries" );
> rtems_test_exit( 0 );
> @@ -201,6 +228,8 @@ rtems_task Init(
> #define CONFIGURE_MAXIMUM_TASKS MAX_TASKS+1
> #define CONFIGURE_MAXIMUM_SEMAPHORES 1
>
> +#define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
> +
> #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
>
> #define CONFIGURE_RTEMS_INIT_TASKS_TABLE
> diff --git a/testsuites/sptests/sp42/sp42.scn
> b/testsuites/sptests/sp42/sp42.scn
> index 01adcf7..206d05c 100644
> --- a/testsuites/sptests/sp42/sp42.scn
> +++ b/testsuites/sptests/sp42/sp42.scn
> @@ -1,4 +1,4 @@
> -*** START OF TEST 42 ***
> +*** BEGIN OF TEST SP 42 ***
> Exercising blocking discipline w/extract in FIFO order
> Exercising blocking discipline w/unblock in FIFO order
> TA00 - unblocked - OK
> @@ -44,4 +44,4 @@ TA02 - unblocked - OK
> TA03 - unblocked - OK
> TA15 - unblocked - OK
> TA14 - unblocked - OK
> -*** END OF TEST 42 ***
> +*** END OF TEST SP 42 ***
> --
> 1.8.4.5
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.rtems.org/pipermail/devel/attachments/20160603/bc11d8a1/attachment-0002.html>
More information about the devel
mailing list