[PATCH 1/2] Return status code for _Scheduler_Get_affinity()

Gedare Bloom gedare at rtems.org
Tue Apr 27 16:04:16 UTC 2021


ok

On Mon, Apr 26, 2021 at 2:41 AM Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
>
> This avoids having conditional expressions to the API-specific status
> code.
> ---
>  cpukit/include/rtems/score/schedulerimpl.h | 7 ++++---
>  cpukit/posix/src/pthreadgetaffinitynp.c    | 7 ++++---
>  cpukit/posix/src/pthreadgetattrnp.c        | 7 ++++---
>  cpukit/rtems/src/taskgetaffinity.c         | 7 ++++---
>  cpukit/score/src/schedulergetaffinity.c    | 9 +++++++--
>  5 files changed, 23 insertions(+), 14 deletions(-)
>
> diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h
> index c545615919..397789372c 100644
> --- a/cpukit/include/rtems/score/schedulerimpl.h
> +++ b/cpukit/include/rtems/score/schedulerimpl.h
> @@ -693,10 +693,11 @@ RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
>   * @param cpusetsize The size of @a cpuset.
>   * @param[out] cpuset The cpuset that serves as destination for the copy operation
>   *
> - * @retval true The copy operation was lossless.
> - * @retval false The copy operation was not lossless
> + * @retval STATUS_SUCCESSFUL The operation succeeded.
> + *
> + * @retval STATUS_INVALID_NUMBER The processor set was too small.
>   */
> -bool _Scheduler_Get_affinity(
> +Status_Control _Scheduler_Get_affinity(
>    Thread_Control *the_thread,
>    size_t          cpusetsize,
>    cpu_set_t      *cpuset
> diff --git a/cpukit/posix/src/pthreadgetaffinitynp.c b/cpukit/posix/src/pthreadgetaffinitynp.c
> index 977f82c69b..b904aea36c 100644
> --- a/cpukit/posix/src/pthreadgetaffinitynp.c
> +++ b/cpukit/posix/src/pthreadgetaffinitynp.c
> @@ -28,6 +28,7 @@
>
>  #include <rtems/score/threadimpl.h>
>  #include <rtems/score/schedulerimpl.h>
> +#include <rtems/posix/posixapi.h>
>
>  int pthread_getaffinity_np(
>    pthread_t  thread,
> @@ -38,7 +39,7 @@ int pthread_getaffinity_np(
>    Thread_Control   *the_thread;
>    ISR_lock_Context  lock_context;
>    Per_CPU_Control  *cpu_self;
> -  bool              ok;
> +  Status_Control    status;
>
>    if ( cpuset == NULL ) {
>      return EFAULT;
> @@ -53,7 +54,7 @@ int pthread_getaffinity_np(
>    cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
>    _Thread_State_acquire_critical( the_thread, &lock_context );
>
> -  ok = _Scheduler_Get_affinity(
> +  status = _Scheduler_Get_affinity(
>      the_thread,
>      cpusetsize,
>      cpuset
> @@ -61,7 +62,7 @@ int pthread_getaffinity_np(
>
>    _Thread_State_release( the_thread, &lock_context );
>    _Thread_Dispatch_enable( cpu_self );
> -  return ok ? 0 : EINVAL;
> +  return _POSIX_Get_error( status );
>  }
>
>  #endif
> diff --git a/cpukit/posix/src/pthreadgetattrnp.c b/cpukit/posix/src/pthreadgetattrnp.c
> index eae29d1de7..5572fb98a5 100644
> --- a/cpukit/posix/src/pthreadgetattrnp.c
> +++ b/cpukit/posix/src/pthreadgetattrnp.c
> @@ -26,6 +26,7 @@
>  #include <string.h>
>
>  #include <rtems/posix/pthreadimpl.h>
> +#include <rtems/posix/posixapi.h>
>  #include <rtems/posix/pthreadattrimpl.h>
>  #include <rtems/posix/priorityimpl.h>
>  #include <rtems/score/schedulerimpl.h>
> @@ -41,7 +42,7 @@ int pthread_getattr_np(
>    Thread_CPU_budget_algorithms  budget_algorithm;
>    const Scheduler_Control      *scheduler;
>    Priority_Control              priority;
> -  bool                          ok;
> +  Status_Control                status;
>
>    if ( attr == NULL ) {
>      return EINVAL;
> @@ -82,7 +83,7 @@ int pthread_getattr_np(
>
>    attr->affinityset = &attr->affinitysetpreallocated;
>    attr->affinitysetsize = sizeof( attr->affinitysetpreallocated );
> -  ok = _Scheduler_Get_affinity(
> +  status = _Scheduler_Get_affinity(
>      the_thread,
>      attr->affinitysetsize,
>      attr->affinityset
> @@ -102,5 +103,5 @@ int pthread_getattr_np(
>    attr->schedpolicy =
>      _POSIX_Thread_Translate_to_sched_policy( budget_algorithm );
>
> -  return ok ? 0 : EINVAL;
> +  return _POSIX_Get_error( status );
>  }
> diff --git a/cpukit/rtems/src/taskgetaffinity.c b/cpukit/rtems/src/taskgetaffinity.c
> index d08c2d6835..6ced283393 100644
> --- a/cpukit/rtems/src/taskgetaffinity.c
> +++ b/cpukit/rtems/src/taskgetaffinity.c
> @@ -21,6 +21,7 @@
>  #endif
>
>  #include <rtems/rtems/tasks.h>
> +#include <rtems/rtems/statusimpl.h>
>  #include <rtems/score/threadimpl.h>
>  #include <rtems/score/schedulerimpl.h>
>
> @@ -33,7 +34,7 @@ rtems_status_code rtems_task_get_affinity(
>    Thread_Control   *the_thread;
>    ISR_lock_Context  lock_context;
>    Per_CPU_Control  *cpu_self;
> -  bool              ok;
> +  Status_Control    status;
>
>    if ( cpuset == NULL ) {
>      return RTEMS_INVALID_ADDRESS;
> @@ -54,7 +55,7 @@ rtems_status_code rtems_task_get_affinity(
>    cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
>    _Thread_State_acquire_critical( the_thread, &lock_context );
>
> -  ok = _Scheduler_Get_affinity(
> +  status = _Scheduler_Get_affinity(
>      the_thread,
>      cpusetsize,
>      cpuset
> @@ -62,5 +63,5 @@ rtems_status_code rtems_task_get_affinity(
>
>    _Thread_State_release( the_thread, &lock_context );
>    _Thread_Dispatch_enable( cpu_self );
> -  return ok ? RTEMS_SUCCESSFUL : RTEMS_INVALID_NUMBER;
> +  return _Status_Get( status );
>  }
> diff --git a/cpukit/score/src/schedulergetaffinity.c b/cpukit/score/src/schedulergetaffinity.c
> index 87be699f5b..99dc53609d 100644
> --- a/cpukit/score/src/schedulergetaffinity.c
> +++ b/cpukit/score/src/schedulergetaffinity.c
> @@ -27,7 +27,7 @@
>
>  #include <rtems/score/schedulerimpl.h>
>
> -bool _Scheduler_Get_affinity(
> +Status_Control _Scheduler_Get_affinity(
>    Thread_Control *the_thread,
>    size_t          cpusetsize,
>    cpu_set_t      *cpuset
> @@ -49,5 +49,10 @@ bool _Scheduler_Get_affinity(
>    status = _Processor_mask_To_cpu_set_t( affinity, cpusetsize, cpuset );
>
>    _Scheduler_Release_critical( scheduler, &lock_context );
> -  return status == PROCESSOR_MASK_COPY_LOSSLESS;
> +
> +  if ( status == PROCESSOR_MASK_COPY_LOSSLESS ) {
> +    return STATUS_INVALID_NUMBER;
> +  }
> +
> +  return STATUS_SUCCESSFUL;
>  }
> --
> 2.26.2
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel


More information about the devel mailing list