[PATCH rtems-libbsd 3/7] sys/kern: Add lockmgr support
Gedare Bloom
gedare at rtems.org
Wed Jul 28 14:03:58 UTC 2021
On Tue, Jul 27, 2021 at 2:59 AM <chrisj at rtems.org> wrote:
>
> From: Chris Johns <chrisj at rtems.org>
>
> - See `man lockmgr`
>
> - Implement the lock_object and move the RTEMS mutex to that object
>
> - Add debug support to track the locks with gdb
>
> Update #4475
> ---
> freebsd/sys/sys/_lock.h | 10 +-
> freebsd/sys/sys/_lockmgr.h | 6 +
> freebsd/sys/sys/_mutex.h | 5 -
> freebsd/sys/sys/_rwlock.h | 5 -
> freebsd/sys/sys/_sx.h | 5 -
> rtemsbsd/include/machine/_kernel_lock.h | 2 +-
> rtemsbsd/include/machine/rtems-bsd-mutex.h | 8 +-
> .../include/machine/rtems-bsd-muteximpl.h | 87 ++-
> rtemsbsd/include/machine/rtems-bsd-thread.h | 1 +
> rtemsbsd/rtems/rtems-kernel-epoch.c | 2 +-
> rtemsbsd/rtems/rtems-kernel-jail.c | 10 +-
> rtemsbsd/rtems/rtems-kernel-lockmgr.c | 578 ++++++++++++++++++
> rtemsbsd/rtems/rtems-kernel-mutex.c | 21 +-
> rtemsbsd/rtems/rtems-kernel-muteximpl.c | 5 +-
> rtemsbsd/rtems/rtems-kernel-rwlock.c | 28 +-
> rtemsbsd/rtems/rtems-kernel-sx.c | 34 +-
> 16 files changed, 724 insertions(+), 83 deletions(-)
> create mode 100644 rtemsbsd/rtems/rtems-kernel-lockmgr.c
>
> diff --git a/freebsd/sys/sys/_lock.h b/freebsd/sys/sys/_lock.h
> index ae10254c..9e3388d5 100644
> --- a/freebsd/sys/sys/_lock.h
> +++ b/freebsd/sys/sys/_lock.h
> @@ -32,15 +32,19 @@
>
> #ifndef _SYS__LOCK_H_
> #define _SYS__LOCK_H_
> +#ifdef __rtems__
> +#include <machine/rtems-bsd-mutex.h>
> +#endif /* __rtems__ */
>
> struct lock_object {
> -#ifndef __rtems__
> const char *lo_name; /* Individual lock name. */
> u_int lo_flags;
> u_int lo_data; /* General class specific data. */
> +#ifndef __rtems__
> struct witness *lo_witness; /* Data for witness. */
> -#else /* __rtems__ */
> - unsigned int lo_flags;
> +#endif /* __rtems__ */
> +#ifdef __rtems__
Can use
#else /* __rtems__ */
instead of endif+ifdef
> + rtems_bsd_mutex lo_mtx;
> #endif /* __rtems__ */
> };
>
> diff --git a/freebsd/sys/sys/_lockmgr.h b/freebsd/sys/sys/_lockmgr.h
> index 62e50df1..8e9ac276 100644
> --- a/freebsd/sys/sys/_lockmgr.h
> +++ b/freebsd/sys/sys/_lockmgr.h
> @@ -34,7 +34,9 @@
> #define _SYS__LOCKMGR_H_
>
> #ifdef DEBUG_LOCKS
> +#ifndef __rtems__
> #include <sys/_stack.h>
> +#endif /* __rtems__ */
> #endif
>
> struct lock {
> @@ -44,7 +46,11 @@ struct lock {
> int lk_timo;
> int lk_pri;
> #ifdef DEBUG_LOCKS
> +#ifndef __rtems__
> struct stack lk_stack;
> +#else /* __rtems__ */
> + void* lk_stack;
> +#endif /* __rtems__ */
> #endif
> };
>
> diff --git a/freebsd/sys/sys/_mutex.h b/freebsd/sys/sys/_mutex.h
> index 5252aee1..418b62b4 100644
> --- a/freebsd/sys/sys/_mutex.h
> +++ b/freebsd/sys/sys/_mutex.h
> @@ -32,9 +32,6 @@
>
> #ifndef _SYS__MUTEX_H_
> #define _SYS__MUTEX_H_
> -#ifdef __rtems__
> -#include <machine/rtems-bsd-mutex.h>
> -#endif /* __rtems__ */
>
> #include <machine/param.h>
>
> @@ -51,8 +48,6 @@ struct mtx {
> struct lock_object lock_object; /* Common lock properties. */
> #ifndef __rtems__
> volatile uintptr_t mtx_lock; /* Owner and flags. */
> -#else /* __rtems__ */
> - rtems_bsd_mutex mutex;
> #endif /* __rtems__ */
> };
>
> diff --git a/freebsd/sys/sys/_rwlock.h b/freebsd/sys/sys/_rwlock.h
> index 318592d5..6110a494 100644
> --- a/freebsd/sys/sys/_rwlock.h
> +++ b/freebsd/sys/sys/_rwlock.h
> @@ -30,9 +30,6 @@
>
> #ifndef _SYS__RWLOCK_H_
> #define _SYS__RWLOCK_H_
> -#ifdef __rtems__
> -#include <machine/rtems-bsd-mutex.h>
> -#endif /* __rtems__ */
>
> #include <machine/param.h>
>
> @@ -49,8 +46,6 @@ struct rwlock {
> struct lock_object lock_object;
> #ifndef __rtems__
> volatile uintptr_t rw_lock;
> -#else /* __rtems__ */
> - rtems_bsd_mutex mutex;
> #endif /* __rtems__ */
> };
>
> diff --git a/freebsd/sys/sys/_sx.h b/freebsd/sys/sys/_sx.h
> index 328a43a4..d8f86296 100644
> --- a/freebsd/sys/sys/_sx.h
> +++ b/freebsd/sys/sys/_sx.h
> @@ -32,9 +32,6 @@
>
> #ifndef _SYS__SX_H_
> #define _SYS__SX_H_
> -#ifdef __rtems__
> -#include <machine/rtems-bsd-mutex.h>
> -#endif /* __rtems__ */
>
> /*
> * Shared/exclusive lock main structure definition.
> @@ -43,8 +40,6 @@ struct sx {
> struct lock_object lock_object;
> #ifndef __rtems__
> volatile uintptr_t sx_lock;
> -#else /* __rtems__ */
> - rtems_bsd_mutex mutex;
> #endif /* __rtems__ */
> };
>
> diff --git a/rtemsbsd/include/machine/_kernel_lock.h b/rtemsbsd/include/machine/_kernel_lock.h
> index dc4f8255..32a41cb8 100644
> --- a/rtemsbsd/include/machine/_kernel_lock.h
> +++ b/rtemsbsd/include/machine/_kernel_lock.h
> @@ -35,9 +35,9 @@
> #define _MACHINE__KERNEL_LOCK_H_
>
> #include <sys/queue.h>
> -#include <sys/_lock.h>
> #include <sys/ktr_class.h>
>
> +struct lock_object;
> struct lock_list_entry;
> struct thread;
>
> diff --git a/rtemsbsd/include/machine/rtems-bsd-mutex.h b/rtemsbsd/include/machine/rtems-bsd-mutex.h
> index dd728cdc..54873dc3 100644
> --- a/rtemsbsd/include/machine/rtems-bsd-mutex.h
> +++ b/rtemsbsd/include/machine/rtems-bsd-mutex.h
> @@ -40,16 +40,20 @@
> #ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_MUTEX_H_
> #define _RTEMS_BSD_MACHINE_RTEMS_BSD_MUTEX_H_
>
> -#include <rtems/score/thread.h>
> +#include <sys/queue.h>
> +
> #include <rtems/score/threadq.h>
>
> #ifdef __cplusplus
> extern "C" {
> #endif /* __cplusplus */
>
> -typedef struct {
> +typedef struct rtems_bsd_mutex {
> Thread_queue_Control queue;
> int nest_level;
> +#if RTEMS_DEBUG
> + TAILQ_ENTRY(rtems_bsd_mutex) mutex_list;
> +#endif /* RTEMS_DEBUG */
> } rtems_bsd_mutex;
>
> #ifdef __cplusplus
> diff --git a/rtemsbsd/include/machine/rtems-bsd-muteximpl.h b/rtemsbsd/include/machine/rtems-bsd-muteximpl.h
> index b362e524..2e180b97 100644
> --- a/rtemsbsd/include/machine/rtems-bsd-muteximpl.h
> +++ b/rtemsbsd/include/machine/rtems-bsd-muteximpl.h
> @@ -50,6 +50,8 @@
>
> #include <inttypes.h>
>
> +#include <rtems/thread.h>
> +#include <rtems/score/thread.h>
> #include <rtems/score/threadimpl.h>
> #include <rtems/score/threadqimpl.h>
>
> @@ -60,17 +62,48 @@ extern "C" {
> #define RTEMS_BSD_MUTEX_TQ_OPERATIONS \
> &_Thread_queue_Operations_priority_inherit
>
> +#if RTEMS_DEBUG
> +/*
> + * Resource tracking. In GDB you can:
> + *
> + * define mutex-owned
> + * set $m = $arg0
> + * set $c = 0
> + * while $m != 0
> + * set $c = $c + 1
> + * if $m->queue.Queue.owner != 0
> + * printf "%08x %-40s\n", $m->queue.Queue.owner, $m->queue.Queue.name
> + * end
> + * set $m = $m->mutex_list.tqe_next
> + * end
> + * printf "Total: %d\n", $c
> + * end
> + *
> + * (gdb) mutex-owned _bsd_bsd_mutexlist->tqh_first
> + */
> +extern TAILQ_HEAD(bsd_mutex_list, rtems_bsd_mutex) bsd_mutexlist;
> +extern rtems_mutex bsd_mutexlist_lock;
> +#endif /* RTEMS_DEBUG */
> +
> static inline void
> -rtems_bsd_mutex_init(struct lock_object *lock, rtems_bsd_mutex *m,
> - struct lock_class *class, const char *name, const char *type, int flags)
> +rtems_bsd_mutex_init(struct lock_object *lk, struct lock_class *class,
> + const char *name, const char *type, int flags)
> {
> + rtems_bsd_mutex *m = &lk->lo_mtx;
> +
> _Thread_queue_Initialize(&m->queue, name);
> m->nest_level = 0;
>
> - lock_init(lock, class, name, type, flags);
> + lock_init(lk, class, name, type, flags);
> +
> +#if RTEMS_DEBUG
> + rtems_mutex_lock(&bsd_mutexlist_lock);
> + TAILQ_INSERT_TAIL(&bsd_mutexlist, m, mutex_list);
> + rtems_mutex_unlock(&bsd_mutexlist_lock);
> +#endif /* RTEMS_DEBUG */
> }
>
> -void rtems_bsd_mutex_lock_more(struct lock_object *lock, rtems_bsd_mutex *m,
> +void rtems_bsd_mutex_lock_more(struct lock_object *lk,
> Thread_Control *owner, Thread_Control *executing,
> Thread_queue_Context *queue_context);
>
> @@ -117,12 +150,13 @@ rtems_bsd_mutex_set_isr_level(Thread_queue_Context *queue_context,
> }
>
> static inline void
> -rtems_bsd_mutex_lock(struct lock_object *lock, rtems_bsd_mutex *m)
> +rtems_bsd_mutex_lock(struct lock_object *lk)
> {
> ISR_Level isr_level;
> Thread_queue_Context queue_context;
> Thread_Control *executing;
> Thread_Control *owner;
> + rtems_bsd_mutex *m = &lk->lo_mtx;
>
> _Thread_queue_Context_initialize(&queue_context);
> rtems_bsd_mutex_isr_disable(isr_level, &queue_context);
> @@ -137,19 +171,20 @@ rtems_bsd_mutex_lock(struct lock_object *lock, rtems_bsd_mutex *m)
> rtems_bsd_mutex_release(m, isr_level, &queue_context);
> } else {
> rtems_bsd_mutex_set_isr_level(&queue_context, isr_level);
> - rtems_bsd_mutex_lock_more(lock, m, owner, executing,
> + rtems_bsd_mutex_lock_more(lk, owner, executing,
> &queue_context);
> }
> }
>
> static inline int
> -rtems_bsd_mutex_trylock(struct lock_object *lock, rtems_bsd_mutex *m)
> +rtems_bsd_mutex_trylock(struct lock_object *lk)
> {
> int success;
> ISR_Level isr_level;
> Thread_queue_Context queue_context;
> Thread_Control *executing;
> Thread_Control *owner;
> + rtems_bsd_mutex *m = &lk->lo_mtx;
>
> _Thread_queue_Context_initialize(&queue_context);
> rtems_bsd_mutex_isr_disable(isr_level, &queue_context);
> @@ -163,7 +198,7 @@ rtems_bsd_mutex_trylock(struct lock_object *lock, rtems_bsd_mutex *m)
> _Thread_Resource_count_increment(executing);
> success = 1;
> } else if (owner == executing) {
> - if ((lock->lo_flags & LO_RECURSABLE) == 0) {
> + if ((lk->lo_flags & LO_RECURSABLE) == 0) {
> rtems_bsd_mutex_release(m, isr_level, &queue_context);
> panic("mutex trylock: %s: not LO_RECURSABLE\n",
> m->queue.Queue.name);
> @@ -181,13 +216,14 @@ rtems_bsd_mutex_trylock(struct lock_object *lock, rtems_bsd_mutex *m)
> }
>
> static inline void
> -rtems_bsd_mutex_unlock(rtems_bsd_mutex *m)
> +rtems_bsd_mutex_unlock(struct lock_object *lk)
> {
> ISR_Level isr_level;
> Thread_queue_Context queue_context;
> Thread_Control *owner;
> Thread_Control *executing;
> int nest_level;
> + rtems_bsd_mutex *m = &lk->lo_mtx;
>
> _Thread_queue_Context_initialize(&queue_context);
> rtems_bsd_mutex_isr_disable(isr_level, &queue_context);
> @@ -226,45 +262,52 @@ rtems_bsd_mutex_unlock(rtems_bsd_mutex *m)
> }
>
> static inline Thread_Control *
> -rtems_bsd_mutex_owner(const rtems_bsd_mutex *m)
> +rtems_bsd_mutex_owner(struct lock_object *lk)
> {
> -
> + rtems_bsd_mutex *m = &lk->lo_mtx;
> return (m->queue.Queue.owner);
> }
>
> static inline int
> -rtems_bsd_mutex_owned(const rtems_bsd_mutex *m)
> +rtems_bsd_mutex_owned(struct lock_object *lk)
> {
> -
> - return (rtems_bsd_mutex_owner(m) == _Thread_Get_executing());
> + return (rtems_bsd_mutex_owner(lk) == _Thread_Get_executing());
> }
>
> static inline int
> -rtems_bsd_mutex_recursed(const rtems_bsd_mutex *m)
> +rtems_bsd_mutex_recursed(struct lock_object *lk)
> {
> -
> + rtems_bsd_mutex *m = &lk->lo_mtx;
> return (m->nest_level != 0);
> }
>
> static inline const char *
> -rtems_bsd_mutex_name(const rtems_bsd_mutex *m)
> +rtems_bsd_mutex_name(struct lock_object *lk)
> {
> -
> + rtems_bsd_mutex *m = &lk->lo_mtx;
> return (m->queue.Queue.name);
> }
>
> static inline void
> -rtems_bsd_mutex_destroy(struct lock_object *lock, rtems_bsd_mutex *m)
> +rtems_bsd_mutex_destroy(struct lock_object *lk)
> {
> + rtems_bsd_mutex *m = &lk->lo_mtx;
> +
> BSD_ASSERT(m->queue.Queue.heads == NULL);
>
> - if (rtems_bsd_mutex_owned(m)) {
> + if (rtems_bsd_mutex_owned(lk)) {
> m->nest_level = 0;
> - rtems_bsd_mutex_unlock(m);
> + rtems_bsd_mutex_unlock(lk);
> }
>
> +#if RTEMS_DEBUG
> + rtems_mutex_lock(&bsd_mutexlist_lock);
> + TAILQ_REMOVE(&bsd_mutexlist, m, mutex_list);
> + rtems_mutex_unlock(&bsd_mutexlist_lock);
> +#endif /* RTEMS_DEBUG */
> +
> _Thread_queue_Destroy(&m->queue);
> - lock_destroy(lock);
> + lock_destroy(lk);
> }
>
> #ifdef __cplusplus
> diff --git a/rtemsbsd/include/machine/rtems-bsd-thread.h b/rtemsbsd/include/machine/rtems-bsd-thread.h
> index 28286b8e..6b9259c4 100644
> --- a/rtemsbsd/include/machine/rtems-bsd-thread.h
> +++ b/rtemsbsd/include/machine/rtems-bsd-thread.h
> @@ -45,6 +45,7 @@
> #include <sys/proc.h>
>
> #include <rtems.h>
> +#include <rtems/score/thread.h>
>
> extern sbintime_t rtems_bsd_sbt_per_watchdog_tick;
>
> diff --git a/rtemsbsd/rtems/rtems-kernel-epoch.c b/rtemsbsd/rtems/rtems-kernel-epoch.c
> index 642b5854..858aa4ba 100644
> --- a/rtemsbsd/rtems/rtems-kernel-epoch.c
> +++ b/rtemsbsd/rtems/rtems-kernel-epoch.c
> @@ -32,8 +32,8 @@
>
> #include <sys/types.h>
> #include <sys/kernel.h>
> -#include <sys/epoch.h>
> #include <sys/mutex.h>
> +#include <sys/epoch.h>
> #include <sys/sx.h>
> #include <sys/systm.h>
>
> diff --git a/rtemsbsd/rtems/rtems-kernel-jail.c b/rtemsbsd/rtems/rtems-kernel-jail.c
> index e53e4cc2..da538856 100644
> --- a/rtemsbsd/rtems/rtems-kernel-jail.c
> +++ b/rtemsbsd/rtems/rtems-kernel-jail.c
> @@ -97,10 +97,12 @@ struct prison prison0 = {
> * structure.
> */
> .pr_mtx = {
> - .lock_object = { .lo_flags = LO_INITIALIZED },
> - .mutex = {
> - .queue = THREAD_QUEUE_INITIALIZER("jail mutex"),
> - .nest_level = 0
> + .lock_object = {
> + .lo_flags = LO_INITIALIZED,
> + .lo_mtx = {
> + .queue = THREAD_QUEUE_INITIALIZER("jail mutex"),
> + .nest_level = 0
> + }
> }
> },
>
> diff --git a/rtemsbsd/rtems/rtems-kernel-lockmgr.c b/rtemsbsd/rtems/rtems-kernel-lockmgr.c
> new file mode 100644
> index 00000000..36e7a82f
> --- /dev/null
> +++ b/rtemsbsd/rtems/rtems-kernel-lockmgr.c
> @@ -0,0 +1,578 @@
> +/**
> + * @file
> + *
> + * @ingroup rtems_bsd_rtems
> + *
> + * @brief TODO.
> + */
> +
> +/*
> + * Copyright (c) 2020 Chris Johns. All rights reserved.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in the
> + * documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#include <machine/rtems-bsd-kernel-space.h>
> +#include <machine/rtems-bsd-muteximpl.h>
> +#include <machine/rtems-bsd-thread.h>
> +
> +#include <sys/param.h>
> +#include <sys/types.h>
> +#include <sys/systm.h>
> +#include <sys/lock.h>
> +#include <sys/lockmgr.h>
> +#include <sys/mutex.h>
> +#include <sys/proc.h>
> +#include <sys/conf.h>
> +
> +#ifdef DEBUG_LOCKS
> +#define STACK_PRINT(lk) printf("caller: %p\n", (lk)->lk_stack)
> +#define STACK_SAVE(lk) (lk)->lk_stack = __builtin_return_address(0)
> +#define STACK_ZERO(lk) (lk)->lk_stack = NULL
> +#else
> +#define STACK_PRINT(lk)
> +#define STACK_SAVE(lk)
> +#define STACK_ZERO(lk)
> +#endif
> +
> +static void assert_lockmgr(const struct lock_object *lock, int how);
> +static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
> +static uintptr_t unlock_lockmgr(struct lock_object *lock);
> +
> +#define lockmgr_xlocked(lk) \
> + rtems_bsd_mutex_owned(RTEMS_DECONST(struct lock_object*, &lk->lock_object))
> +#define lockmgr_disowned(lk) \
> + !rtems_bsd_mutex_owned(RTEMS_DECONST(struct lock_object*, &lk->lock_object))
> +
> +struct lock_class lock_class_lockmgr = {
> + .lc_name = "lockmgr",
> + .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
> + .lc_assert = assert_lockmgr,
> +#ifdef DDB
> + .lc_ddb_show = db_show_lockmgr,
> +#endif
> + .lc_lock = lock_lockmgr,
> + .lc_unlock = unlock_lockmgr,
> +#ifdef KDTRACE_HOOKS
> + .lc_owner = owner_lockmgr,
> +#endif
> +};
> +
> +static void
> +assert_lockmgr(const struct lock_object *lock, int what)
> +{
> + panic("lockmgr locks do not support assertions");
> +}
> +
> +static void
> +lock_lockmgr(struct lock_object *lock, uintptr_t how)
> +{
> + panic("lockmgr locks do not support sleep interlocking");
> +}
> +
> +static uintptr_t
> +unlock_lockmgr(struct lock_object *lock)
> +{
> + panic("lockmgr locks do not support sleep interlocking");
> +}
> +
Should there be different messages for the two cases here?
> +static struct thread *
> +lockmgr_xholder(const struct lock *lk)
> +{
> + uintptr_t x;
> + x = lk->lk_lock;
> + if ((x & LK_SHARE))
> + return NULL;
> + return rtems_bsd_get_thread(lk->lock_object.lo_mtx.queue.Queue.owner);
> +}
> +
> +static void
> +lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
any good reason to use ilk instead of lk as elsewhere?
> +{
> + if (flags & LK_INTERLOCK) {
> + struct lock_class *class = LOCK_CLASS(ilk);
> + class->lc_unlock(ilk);
> + }
> +}
> +
> +#ifdef RTEMS_BSD_LOCKMGR_TRACE
> +static void
> +lockmgr_trace(const char* label, const char dir, const struct lock *lk)
> +{
> + printf("bsd: lck: %s %c lk=%p (%s) lk_lock=%08x rec=%d mtx=%c/%d\n",
> + label, dir, lk, lk->lock_object.lo_mtx.queue.Queue.name,
> + lk->lk_lock, lk->lk_recurse,
> + lk->lock_object.lo_mtx.queue.Queue.owner != NULL ? 'L' : 'U',
> + lk->lock_object.lo_mtx.nest_level);
> +}
> +#else
> +#define lockmgr_trace(lm, d, lk)
> +#endif
> +
> +static int
> +lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
> + const char *file, int line)
> +{
> + uintptr_t x;
> + int error = 0;
> + lockmgr_trace("slock", 'I', lk);
> + rtems_bsd_mutex_lock(&lk->lock_object);
> + x = lk->lk_lock;
> + atomic_store_rel_ptr(&lk->lk_lock, x + LK_ONE_SHARER);
> + if (rtems_bsd_mutex_recursed(&lk->lock_object))
> + lk->lk_recurse++;
> + else
> + lk->lk_recurse = 0;
> + lockmgr_trace("slock", 'O', lk);
> + LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
> + lockmgr_exit(flags, ilk, 0);
> + return error;
this is always 0 (successful)?
> +}
> +
> +static int
> +lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
> + const char *file, int line)
> +{
> + int error = 0;
> + lockmgr_trace("xlock", 'I', lk);
> + if ((flags & LK_NOWAIT) != 0) {
> + if (!rtems_bsd_mutex_trylock(&lk->lock_object))
> + error = EBUSY;
> + } else {
> + rtems_bsd_mutex_lock(&lk->lock_object);
> + }
> + if (error == 0) {
> + atomic_store_rel_ptr(&lk->lk_lock, 0);
> + if (rtems_bsd_mutex_recursed(&lk->lock_object))
> + lk->lk_recurse++;
> + else
> + lk->lk_recurse = 0;
> + lockmgr_trace("xlock", 'O', lk);
> + }
> + lockmgr_exit(flags, ilk, 0);
> + return error;
> +}
> +
> +static int
> +lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
> + const char *file, int line)
> +{
> + uintptr_t x, v;
> + int error = 0;
> + LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, line);
> + lockmgr_trace("xupgrade", 'I', lk);
> + v = lk->lk_lock;
> + x = v & ~LK_SHARE;
> + atomic_store_rel_ptr(&lk->lk_lock, x);
I think this function is to upgrade a shared (waiters/spinner) lock to
non-shared (spinner) lock? I'm not confident about correctness here.
It looks to me like there can be a race condition if multiple waiters
attempt to upgrade in parallel. It works by a preemption after the
first waiter loads `v = lk->lk_lock` and then another waiter loads
`v`. Actually, there doesn't seem to be anything that prevents two
separate calls to upgrade the lock by different waiters in sequence
either. I could be wrong (of course).
> + lockmgr_trace("xupgrade", 'O', lk);
> + lockmgr_exit(flags, ilk, 0);
> + return error;
> +}
> +
> +static int
> +lockmgr_downgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
> + const char *file, int line)
> +{
> + uintptr_t x;
> + LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
> + lockmgr_trace("xdowngrade", 'I', lk);
> + x = lk->lk_lock;
> + x &= LK_ALL_WAITERS;
> + atomic_store_rel_ptr(&lk->lk_lock, x);
> + lockmgr_trace("xdowngrade", 'O', lk);
> + return 0;
> +}
> +
> +int
> +lockmgr_lock_fast_path(struct lock *lk, u_int flags,
> + struct lock_object *ilk, const char *file, int line)
> +{
> + uintptr_t x, tid;
> + u_int op;
> + bool locked;
> +
> + if (__predict_false(panicstr != NULL))
> + return (0);
> +
> + op = flags & LK_TYPE_MASK;
> + locked = false;
> + switch (op) {
> + case LK_SHARED:
> + if (!__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
> + return (lockmgr_slock_hard(lk, flags, ilk, file, line));
> + /* fall through */
> + case LK_EXCLUSIVE:
> + return (lockmgr_xlock_hard(lk, flags, ilk, file, line));
> + break;
> + case LK_UPGRADE:
> + case LK_TRYUPGRADE:
> + return (lockmgr_upgrade(lk, flags, ilk, file, line));
> + break;
> + case LK_DOWNGRADE:
> + return (lockmgr_downgrade(lk, flags, ilk, file, line));
> + break;
> + default:
> + break;
> + }
> +
> + panic("unsupported lockmgr op: %d\n", op);
> +}
> +
> +static int
> +lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
> + const char *file, int line)
> +{
> + uintptr_t v;
> + int error = 0;
> + LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
> + lockmgr_trace("sunlock", 'I', lk);
> + x = lk->lk_lock;
> + if (x == LK_UNLOCKED)
> + panic("sunlock not locked");
> + if (rtems_bsd_mutex_recursed(&lk->lock_object))
> + lk->lk_recurse--;
> + atomic_store_rel_ptr(&lk->lk_lock, x - LK_ONE_SHARER);
> + rtems_bsd_mutex_unlock(&lk->lock_object);
> + lockmgr_trace("sunlock", 'O', lk);
> + lockmgr_exit(flags, ilk, 0);
> + return error;
> +}
> +
> +static int
> +lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
> + const char *file, int line)
> +{
> + int error = 0;
> + LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
> + lockmgr_trace("xunlock", 'I', lk);
> + if (rtems_bsd_mutex_recursed(&lk->lock_object))
> + lk->lk_recurse--;
> + if (lk->lk_recurse == 0) {
> + uintptr_t v, x;
> + x = lk->lk_lock;
> + if (x != 0)
> + x -= LK_ONE_SHARER;
> + v = x | LK_SHARE;
> + atomic_store_rel_ptr(&lk->lk_lock, v);
> + }
> + rtems_bsd_mutex_unlock(&lk->lock_object);
> + lockmgr_trace("xunlock", 'O', lk);
> + lockmgr_exit(flags, ilk, 0);
> + return error;
> +}
> +
> +int
> +lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
> +{
> + struct lock_class *class;
> + uintptr_t x, tid;
> + const char *file;
> + int line;
> +
> + if (__predict_false(panicstr != NULL))
> + return (0);
> +
> + file = __FILE__;
> + line = __LINE__;
> +
> + x = lk->lk_lock;
> + if (__predict_true(x & LK_SHARE) != 0) {
> + return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
> + } else {
> + return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
> + }
> +}
> +
> +int
> +lockstatus(const struct lock *lk)
> +{
> + uintptr_t v, x;
> + int ret;
> +
> + ret = LK_SHARED;
> + x = lk->lk_lock;
> +
> + if ((x & LK_SHARE) == 0) {
> + v = rtems_bsd_mutex_owned(RTEMS_DECONST(struct lock_object*, &lk->lock_object));
> + if (v)
> + ret = LK_EXCLUSIVE;
> + else
> + ret = LK_EXCLOTHER;
> + } else if (x == LK_UNLOCKED) {
> + ret = 0;
> + }
there's no else {} here, is it possible that (x & LK_SHARE) != 0, and
(x != LK_UNLOCKED)? If so, ret may be returned without initialization?
Maybe just initialize ret = 0.
> +
> + lockmgr_trace("status", 'O', lk);
> +
> + return (ret);
> +}
> +
> +void
> +lockallowrecurse(struct lock *lk)
> +{
> + lk->lock_object.lo_flags |= LO_RECURSABLE;
> +}
> +
> +void
> +lockallowshare(struct lock *lk)
> +{
> + lk->lock_object.lo_flags &= ~LK_NOSHARE;
> +}
> +
> +void
> +lockdisablerecurse(struct lock *lk)
> +{
> + lk->lock_object.lo_flags &= ~LO_RECURSABLE;
> +}
> +
> +void
> +lockdisableshare(struct lock *lk)
> +{
> + lk->lock_object.lo_flags |= LK_NOSHARE;
> +}
> +
> +void
> +lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
> +{
> + int iflags;
> +
> + MPASS((flags & ~LK_INIT_MASK) == 0);
> + ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
> + ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
> + &lk->lk_lock));
> +
> + iflags = LO_SLEEPABLE | LO_UPGRADABLE;
> + if (flags & LK_CANRECURSE)
> + iflags |= LO_RECURSABLE;
> + if ((flags & LK_NODUP) == 0)
> + iflags |= LO_DUPOK;
> + if (flags & LK_NOPROFILE)
> + iflags |= LO_NOPROFILE;
> + if ((flags & LK_NOWITNESS) == 0)
> + iflags |= LO_WITNESS;
> + if (flags & LK_QUIET)
> + iflags |= LO_QUIET;
> + if (flags & LK_IS_VNODE)
> + iflags |= LO_IS_VNODE;
> + iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
> +
> + rtems_bsd_mutex_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
> +
> + lk->lk_lock = LK_UNLOCKED;
> + lk->lk_recurse = 0;
> + lk->lk_exslpfail = 0;
> + lk->lk_timo = timo;
> + lk->lk_pri = pri;
> + STACK_ZERO(lk);
> +}
> +
> +void
> +lockdestroy(struct lock *lk)
> +{
> + KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
> + KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
> + KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
> + rtems_bsd_mutex_destroy(&lk->lock_object);
> +}
> +
> +void
> +lockmgr_printinfo(const struct lock *lk)
> +{
> + struct thread *td;
> + uintptr_t x;
> +
> + if (lk->lk_lock == LK_UNLOCKED)
> + printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
> + else if (lk->lk_lock & LK_SHARE)
> + printf("lock type %s: SHARED (count %ju)\n",
> + lk->lock_object.lo_name,
> + (uintmax_t)LK_SHARERS(lk->lk_lock));
> + else {
> + td = lockmgr_xholder(lk);
> + if (td == NULL)
> + printf("lock type %s: not owned\n",
> + lk->lock_object.lo_name);
> + else
> + printf("lock type %s: EXCL by thread %p "
> + "(pid %d, rtems, tid %d)\n", lk->lock_object.lo_name,
> + td, td->td_proc->p_pid,
> + td->td_tid);
> + }
> +
> + x = lk->lk_lock;
> + if (x & LK_EXCLUSIVE_WAITERS)
> + printf(" with exclusive waiters pending\n");
> + if (x & LK_SHARED_WAITERS)
> + printf(" with shared waiters pending\n");
> + if (x & LK_EXCLUSIVE_SPINNERS)
> + printf(" with exclusive spinners pending\n");
> +
> + STACK_PRINT(lk);
> +}
> +
> +int
> +__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
> + const char *wmesg, int prio, int timo, const char *file, int line)
> +{
> + struct lock_class *class;
> + uintptr_t x;
> + u_int op = (flags & LK_TYPE_MASK);
> + int error = 0;
> +
> + if (panicstr != NULL)
> + return (0);
> +
> + class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
> +
> + if (lk->lock_object.lo_flags & LK_NOSHARE) {
> + switch (op) {
> + case LK_SHARED:
> + op = LK_EXCLUSIVE;
> + break;
> + case LK_UPGRADE:
> + case LK_TRYUPGRADE:
> + case LK_DOWNGRADE:
> + if (flags & LK_INTERLOCK)
> + class->lc_unlock(ilk);
> + return (0);
> + }
> + }
> +
> + switch (op) {
> + case LK_SHARED:
> + return (lockmgr_slock_hard(lk, flags, ilk, file, line));
> + break;
> + case LK_UPGRADE:
> + case LK_TRYUPGRADE:
> + return (lockmgr_upgrade(lk, flags, ilk, file, line));
> + break;
> + case LK_EXCLUSIVE:
> + return (lockmgr_xlock_hard(lk, flags, ilk, file, line));
> + break;
> + case LK_DOWNGRADE:
> + error = lockmgr_downgrade(lk, flags, ilk, file, line);
> + break;
> + case LK_RELEASE:
> + lockmgr_trace("release", '-', lk);
> + x = lk->lk_lock;
> + if (__predict_true(x & LK_SHARE) != 0) {
> + return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
> + } else {
> + return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
> + }
> + break;
> + case LK_DRAIN:
> + break;
> + default:
> + if (flags & LK_INTERLOCK)
> + class->lc_unlock(ilk);
> + panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
> + }
> +
> + if (flags & LK_INTERLOCK)
> + class->lc_unlock(ilk);
> +
> + return (error);
> +}
> +
> +void
> +_lockmgr_disown(struct lock *lk, const char *file, int line)
> +{
> +}
> +
> +#ifdef INVARIANT_SUPPORT
> +
> +#ifndef INVARIANTS
> +#undef _lockmgr_assert
> +#endif
> +
> +void
> +_lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
> +{
> + int slocked = 0;
> +
> + if (panicstr != NULL)
> + return;
> + switch (what) {
> + case KA_SLOCKED:
> + case KA_SLOCKED | KA_NOTRECURSED:
> + case KA_SLOCKED | KA_RECURSED:
> + slocked = 1;
> + case KA_LOCKED:
> + case KA_LOCKED | KA_NOTRECURSED:
> + case KA_LOCKED | KA_RECURSED:
> +#ifdef WITNESS
> +
> + /*
> + * We cannot trust WITNESS if the lock is held in exclusive
> + * mode and a call to lockmgr_disown() happened.
> + * Workaround this skipping the check if the lock is held in
> + * exclusive mode even for the KA_LOCKED case.
> + */
> + if (slocked || (lk->lk_lock & LK_SHARE)) {
> + witness_assert(&lk->lock_object, what, file, line);
> + break;
> + }
> +#endif
> + if (lk->lk_lock == LK_UNLOCKED ||
> + ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
> + (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
> + panic("Lock %s not %slocked @ %s:%d\n",
> + lk->lock_object.lo_name, slocked ? "share" : "",
> + file, line);
> +
> + if ((lk->lk_lock & LK_SHARE) == 0) {
> + if (lockmgr_recursed(lk)) {
> + if (what & KA_NOTRECURSED)
> + panic("Lock %s recursed @ %s:%d\n",
> + lk->lock_object.lo_name, file,
> + line);
> + } else if (what & KA_RECURSED)
> + panic("Lock %s not recursed @ %s:%d\n",
> + lk->lock_object.lo_name, file, line);
> + }
> + break;
> + case KA_XLOCKED:
> + case KA_XLOCKED | KA_NOTRECURSED:
> + case KA_XLOCKED | KA_RECURSED:
> + if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
> + panic("Lock %s not exclusively locked @ %s:%d\n",
> + lk->lock_object.lo_name, file, line);
> + if (lockmgr_recursed(lk)) {
> + if (what & KA_NOTRECURSED)
> + panic("Lock %s recursed @ %s:%d\n",
> + lk->lock_object.lo_name, file, line);
> + } else if (what & KA_RECURSED)
> + panic("Lock %s not recursed @ %s:%d\n",
> + lk->lock_object.lo_name, file, line);
> + break;
> + case KA_UNLOCKED:
> + if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
> + panic("Lock %s exclusively locked @ %s:%d\n",
> + lk->lock_object.lo_name, file, line);
> + break;
> + default:
> + panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
> + line);
> + }
> +}
> +#endif
> diff --git a/rtemsbsd/rtems/rtems-kernel-mutex.c b/rtemsbsd/rtems/rtems-kernel-mutex.c
> index 47f36832..ef73c3dd 100644
> --- a/rtemsbsd/rtems/rtems-kernel-mutex.c
> +++ b/rtemsbsd/rtems/rtems-kernel-mutex.c
> @@ -48,6 +48,11 @@
> #include <sys/proc.h>
> #include <sys/conf.h>
>
> +#if RTEMS_DEBUG
> +struct bsd_mutex_list bsd_mutexlist = TAILQ_HEAD_INITIALIZER(bsd_mutexlist);
> +rtems_mutex bsd_mutexlist_lock = RTEMS_MUTEX_INITIALIZER("mmutexlist");
> +#endif /* RTEMS_DEBUG */
> +
> static void assert_mtx(const struct lock_object *lock, int what);
> static void lock_mtx(struct lock_object *lock, uintptr_t how);
> static uintptr_t unlock_mtx(struct lock_object *lock);
> @@ -110,26 +115,26 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
> if (opts & MTX_RECURSE)
> flags |= LO_RECURSABLE;
>
> - rtems_bsd_mutex_init(&m->lock_object, &m->mutex, class, name, type,
> + rtems_bsd_mutex_init(&m->lock_object, class, name, type,
> flags);
> }
>
> void
> _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
> {
> - rtems_bsd_mutex_lock(&m->lock_object, &m->mutex);
> + rtems_bsd_mutex_lock(&m->lock_object);
> }
>
> int
> mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
> {
> - return (rtems_bsd_mutex_trylock(&m->lock_object, &m->mutex));
> + return (rtems_bsd_mutex_trylock(&m->lock_object));
> }
>
> void
> _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
> {
> - rtems_bsd_mutex_unlock(&m->mutex);
> + rtems_bsd_mutex_unlock(&m->lock_object);
> }
>
> /*
> @@ -139,7 +144,7 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
> void
> _mtx_assert(struct mtx *m, int what, const char *file, int line)
> {
> - const char *name = rtems_bsd_mutex_name(&m->mutex);
> + const char *name = rtems_bsd_mutex_name(&m->lock_object);
>
> switch (what) {
> case MA_OWNED:
> @@ -168,12 +173,12 @@ _mtx_assert(struct mtx *m, int what, const char *file, int line)
>
> int mtx_owned(struct mtx *m)
> {
> - return (rtems_bsd_mutex_owned(&m->mutex));
> + return (rtems_bsd_mutex_owned(&m->lock_object));
> }
>
> int mtx_recursed(struct mtx *m)
> {
> - return (rtems_bsd_mutex_recursed(&m->mutex));
> + return (rtems_bsd_mutex_recursed(&m->lock_object));
> }
>
> void
> @@ -188,7 +193,7 @@ void
> mtx_destroy(struct mtx *m)
> {
>
> - rtems_bsd_mutex_destroy(&m->lock_object, &m->mutex);
> + rtems_bsd_mutex_destroy(&m->lock_object);
> }
>
> void
> diff --git a/rtemsbsd/rtems/rtems-kernel-muteximpl.c b/rtemsbsd/rtems/rtems-kernel-muteximpl.c
> index 8a832b4e..a57d1bc8 100644
> --- a/rtemsbsd/rtems/rtems-kernel-muteximpl.c
> +++ b/rtemsbsd/rtems/rtems-kernel-muteximpl.c
> @@ -43,12 +43,13 @@
> #include <rtems/score/schedulerimpl.h>
>
> void
> -rtems_bsd_mutex_lock_more(struct lock_object *lock, rtems_bsd_mutex *m,
> +rtems_bsd_mutex_lock_more(struct lock_object *lk,
> Thread_Control *owner, Thread_Control *executing,
> Thread_queue_Context *queue_context)
> {
> + rtems_bsd_mutex *m = &lk->lo_mtx;
> if (owner == executing) {
> - if ((lock->lo_flags & LO_RECURSABLE) == 0) {
> + if ((lk->lo_flags & LO_RECURSABLE) == 0) {
> _Thread_queue_Release(&m->queue, queue_context);
> panic("mutex lock: %s: not LO_RECURSABLE\n",
> m->queue.Queue.name);
> diff --git a/rtemsbsd/rtems/rtems-kernel-rwlock.c b/rtemsbsd/rtems/rtems-kernel-rwlock.c
> index c204c04f..a4e98edd 100644
> --- a/rtemsbsd/rtems/rtems-kernel-rwlock.c
> +++ b/rtemsbsd/rtems/rtems-kernel-rwlock.c
> @@ -70,9 +70,9 @@ struct lock_class lock_class_rw = {
> .lc_unlock = unlock_rw,
> };
>
> -#define rw_wowner(rw) rtems_bsd_mutex_owner(&(rw)->mutex)
> +#define rw_wowner(rw) rtems_bsd_mutex_owner(&(rw)->lock_object)
>
> -#define rw_recursed(rw) rtems_bsd_mutex_recursed(&(rw)->mutex)
> +#define rw_recursed(rw) rtems_bsd_mutex_recursed(&(rw)->lock_object)
>
> void
> assert_rw(const struct lock_object *lock, int what)
> @@ -101,11 +101,9 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
> {
> int flags;
>
> - flags = LO_UPGRADABLE;
> - if (opts & RW_RECURSE)
> - flags |= LO_RECURSABLE;
> + flags = LO_UPGRADABLE | LO_RECURSABLE;
>
> - rtems_bsd_mutex_init(&rw->lock_object, &rw->mutex, &lock_class_rw,
> + rtems_bsd_mutex_init(&rw->lock_object, &lock_class_rw,
> name, NULL, flags);
> }
>
> @@ -113,7 +111,7 @@ void
> rw_destroy(struct rwlock *rw)
> {
>
> - rtems_bsd_mutex_destroy(&rw->lock_object, &rw->mutex);
> + rtems_bsd_mutex_destroy(&rw->lock_object);
> }
>
> void
> @@ -128,43 +126,43 @@ rw_sysinit(void *arg)
> int
> rw_wowned(struct rwlock *rw)
> {
> - return (rtems_bsd_mutex_owned(&rw->mutex));
> + return (rtems_bsd_mutex_owned(&rw->lock_object));
> }
>
> void
> _rw_wlock(struct rwlock *rw, const char *file, int line)
> {
> - rtems_bsd_mutex_lock(&rw->lock_object, &rw->mutex);
> + rtems_bsd_mutex_lock(&rw->lock_object);
> }
>
> int
> _rw_try_wlock(struct rwlock *rw, const char *file, int line)
> {
> - return (rtems_bsd_mutex_trylock(&rw->lock_object, &rw->mutex));
> + return (rtems_bsd_mutex_trylock(&rw->lock_object));
> }
>
> void
> _rw_wunlock(struct rwlock *rw, const char *file, int line)
> {
> - rtems_bsd_mutex_unlock(&rw->mutex);
> + rtems_bsd_mutex_unlock(&rw->lock_object);
> }
>
> void
> _rw_rlock(struct rwlock *rw, const char *file, int line)
> {
> - rtems_bsd_mutex_lock(&rw->lock_object, &rw->mutex);
> + rtems_bsd_mutex_lock(&rw->lock_object);
> }
>
> int
> _rw_try_rlock(struct rwlock *rw, const char *file, int line)
> {
> - return (rtems_bsd_mutex_trylock(&rw->lock_object, &rw->mutex));
> + return (rtems_bsd_mutex_trylock(&rw->lock_object));
> }
>
> void
> _rw_runlock(struct rwlock *rw, const char *file, int line)
> {
> - rtems_bsd_mutex_unlock(&rw->mutex);
> + rtems_bsd_mutex_unlock(&rw->lock_object);
> }
>
> int
> @@ -188,7 +186,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
> void
> _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
> {
> - const char *name = rtems_bsd_mutex_name(&rw->mutex);
> + const char *name = rtems_bsd_mutex_name(&rw->lock_object);
>
> switch (what) {
> case RA_LOCKED:
> diff --git a/rtemsbsd/rtems/rtems-kernel-sx.c b/rtemsbsd/rtems/rtems-kernel-sx.c
> index 4f8bea4d..827b3397 100644
> --- a/rtemsbsd/rtems/rtems-kernel-sx.c
> +++ b/rtemsbsd/rtems/rtems-kernel-sx.c
> @@ -59,9 +59,9 @@ struct lock_class lock_class_sx = {
> .lc_unlock = unlock_sx,
> };
>
> -#define sx_xholder(sx) rtems_bsd_mutex_owner(&(sx)->mutex)
> +#define sx_xholder(sx) rtems_bsd_mutex_owner(&(sx)->lock_object)
>
> -#define sx_recursed(sx) rtems_bsd_mutex_recursed(&(sx)->mutex)
> +#define sx_recursed(sx) rtems_bsd_mutex_recursed(&(sx)->lock_object)
>
> void
> assert_sx(const struct lock_object *lock, int what)
> @@ -90,7 +90,7 @@ sx_sysinit(void *arg)
> {
> struct sx_args *sargs = arg;
>
> - sx_init(sargs->sa_sx, sargs->sa_desc);
> + sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
> }
>
> void
> @@ -102,7 +102,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
> if (opts & SX_RECURSE)
> flags |= LO_RECURSABLE;
>
> - rtems_bsd_mutex_init(&sx->lock_object, &sx->mutex, &lock_class_sx,
> + rtems_bsd_mutex_init(&sx->lock_object, &lock_class_sx,
> description, NULL, flags);
> }
>
> @@ -110,14 +110,14 @@ void
> sx_destroy(struct sx *sx)
> {
>
> - rtems_bsd_mutex_destroy(&sx->lock_object, &sx->mutex);
> + rtems_bsd_mutex_destroy(&sx->lock_object);
> }
>
> int
> _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
> {
>
> - rtems_bsd_mutex_lock(&sx->lock_object, &sx->mutex);
> + rtems_bsd_mutex_lock(&sx->lock_object);
> return (0);
> }
>
> @@ -125,14 +125,14 @@ int
> sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
> {
>
> - return (rtems_bsd_mutex_trylock(&sx->lock_object, &sx->mutex));
> + return (rtems_bsd_mutex_trylock(&sx->lock_object));
> }
>
> void
> _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
> {
>
> - rtems_bsd_mutex_unlock(&sx->mutex);
> + rtems_bsd_mutex_unlock(&sx->lock_object);
> }
>
> int
> @@ -142,6 +142,13 @@ sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
> return (1);
> }
>
> +int
> +sx_try_upgrade_(struct sx *sx, const char *file, int line)
> +{
> +
> + return (1);
> +}
> +
> void
> sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
> {
> @@ -149,6 +156,13 @@ sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
> /* Do nothing */
> }
>
> +int
> +sx_try_downgrade_(struct sx *sx, const char *file, int line)
> +{
> +
> + return (1);
> +}
> +
> #ifdef INVARIANT_SUPPORT
> /*
> * In the non-WITNESS case, sx_assert() can only detect that at least
> @@ -158,7 +172,7 @@ sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
> void
> _sx_assert(const struct sx *sx, int what, const char *file, int line)
> {
> - const char *name = rtems_bsd_mutex_name(&sx->mutex);
> + const char *name = rtems_bsd_mutex_name(&sx->lock_object);
>
> switch (what) {
> case SA_SLOCKED:
> @@ -205,5 +219,5 @@ _sx_assert(const struct sx *sx, int what, const char *file, int line)
> int
> sx_xlocked(struct sx *sx)
> {
> - return (rtems_bsd_mutex_owned(&sx->mutex));
> + return (rtems_bsd_mutex_owned(&sx->lock_object));
> }
> --
> 2.24.1
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel
More information about the devel
mailing list