[rtems-libbsd commit] SLEEPQUEUE(9): Port to RTEMS

Sebastian Huber sebh at rtems.org
Tue Mar 24 14:28:56 UTC 2015


Module:    rtems-libbsd
Branch:    master
Commit:    8475e7aa0a46ea1ef96c03968a80530f0c44c0ef
Changeset: http://git.rtems.org/rtems-libbsd/commit/?id=8475e7aa0a46ea1ef96c03968a80530f0c44c0ef

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Mar 24 10:02:45 2015 +0100

SLEEPQUEUE(9): Port to RTEMS

---

 Makefile                                           |   1 +
 freebsd-to-rtems.py                                |   2 +
 freebsd/sys/kern/subr_sleepqueue.c                 | 279 +++++++++++++++++++++
 freebsd/sys/sys/proc.h                             |  27 +-
 libbsd.txt                                         |   2 +
 rtemsbsd/include/machine/rtems-bsd-thread.h        |   9 -
 .../rtems/bsd/local/opt_sleepqueue_profiling.h     |   0
 rtemsbsd/include/sys/sleepqueue.h                  |   1 -
 rtemsbsd/rtems/rtems-bsd-thread.c                  |  16 +-
 9 files changed, 313 insertions(+), 24 deletions(-)

diff --git a/Makefile b/Makefile
index e23e596..608c881 100644
--- a/Makefile
+++ b/Makefile
@@ -188,6 +188,7 @@ LIB_C_FILES += freebsd/sys/kern/subr_lock.c
 LIB_C_FILES += freebsd/sys/kern/subr_module.c
 LIB_C_FILES += freebsd/sys/kern/subr_rman.c
 LIB_C_FILES += freebsd/sys/kern/subr_sbuf.c
+LIB_C_FILES += freebsd/sys/kern/subr_sleepqueue.c
 LIB_C_FILES += freebsd/sys/kern/subr_taskqueue.c
 LIB_C_FILES += freebsd/sys/kern/subr_uio.c
 LIB_C_FILES += freebsd/sys/kern/subr_unit.c
diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py
index 24055e8..a424770 100755
--- a/freebsd-to-rtems.py
+++ b/freebsd-to-rtems.py
@@ -834,6 +834,7 @@ base.addKernelSpaceHeaderFiles(
 		'sys/sys/sigio.h',
 		'sys/sys/_sigset.h',
 		'sys/sys/smp.h',
+		'sys/sys/sleepqueue.h',
 		'sys/sys/_sockaddr_storage.h',
 		'sys/sys/sockbuf.h',
 		'sys/sys/socket.h',
@@ -897,6 +898,7 @@ base.addKernelSpaceSourceFiles(
 		'sys/kern/subr_module.c',
 		'sys/kern/subr_rman.c',
 		'sys/kern/subr_sbuf.c',
+		'sys/kern/subr_sleepqueue.c',
 		'sys/kern/subr_taskqueue.c',
 		'sys/kern/subr_uio.c',
 		'sys/kern/subr_unit.c',
diff --git a/freebsd/sys/kern/subr_sleepqueue.c b/freebsd/sys/kern/subr_sleepqueue.c
index 3774e66..00740ae 100644
--- a/freebsd/sys/kern/subr_sleepqueue.c
+++ b/freebsd/sys/kern/subr_sleepqueue.c
@@ -2,6 +2,7 @@
 
 /*-
  * Copyright (c) 2004 John Baldwin <jhb at FreeBSD.org>
+ * Copyright (c) 2015 embedded brains GmbH <rtems at embedded-brains.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,6 +86,10 @@ __FBSDID("$FreeBSD$");
 #ifdef DDB
 #include <ddb/ddb.h>
 #endif
+#ifdef __rtems__
+#include <machine/rtems-bsd-thread.h>
+#include <rtems/score/threadimpl.h>
+#endif /* __rtems__ */
 
 /*
  * Constants for the hash table of sleep queue chains.  These constants are
@@ -155,9 +160,11 @@ static uma_zone_t sleepq_zone;
 /*
  * Prototypes for non-exported routines.
  */
+#ifndef __rtems__
 static int	sleepq_catch_signals(void *wchan, int pri);
 static int	sleepq_check_signals(void);
 static int	sleepq_check_timeout(void);
+#endif /* __rtems__ */
 #ifdef INVARIANTS
 static void	sleepq_dtor(void *mem, int size, void *arg);
 #endif
@@ -165,7 +172,11 @@ static int	sleepq_init(void *mem, int size, int flags);
 static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
 		    int pri);
 static void	sleepq_switch(void *wchan, int pri);
+#ifndef __rtems__
 static void	sleepq_timeout(void *arg);
+#else /* __rtems__ */
+static void	sleepq_timeout(Objects_Id id, void *arg);
+#endif /* __rtems__ */
 
 SDT_PROBE_DECLARE(sched, , , sleep);
 SDT_PROBE_DECLARE(sched, , , wakeup);
@@ -206,7 +217,9 @@ init_sleepqueues(void)
 	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
 #endif
 	
+#ifndef __rtems__
 	thread0.td_sleepqueue = sleepq_alloc();
+#endif /* __rtems__ */
 }
 
 /*
@@ -286,6 +299,11 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
 	struct sleepqueue_chain *sc;
 	struct sleepqueue *sq;
 	struct thread *td;
+#ifdef __rtems__
+	ISR_lock_Context lock_context;
+	Thread_Control *executing;
+	struct thread *succ;
+#endif /* __rtems__ */
 
 	td = curthread;
 	sc = SC_LOOKUP(wchan);
@@ -341,17 +359,40 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
 		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
 	}
 	thread_lock(td);
+#ifndef __rtems__
 	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
+#else /* __rtems__ */
+	/* FIXME: This is broken with clustered scheduling */
+	succ = NULL;
+	TAILQ_FOREACH(succ, &sq->sq_blocked[queue], td_slpq) {
+		if (td->td_thread->current_priority <
+		    succ->td_thread->current_priority)
+			break;
+	}
+	if (succ == NULL)
+		TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
+	else
+		TAILQ_INSERT_BEFORE(succ, td, td_slpq);
+#endif /* __rtems__ */
 	sq->sq_blockedcnt[queue]++;
+#ifdef __rtems__
+	executing = td->td_thread;
+	_Objects_ISR_disable_and_acquire(&executing->Object, &lock_context);
+	td->td_sq_state = TD_SQ_TIRED;
+#endif /* __rtems__ */
 	td->td_sleepqueue = NULL;
 	td->td_sqqueue = queue;
 	td->td_wchan = wchan;
 	td->td_wmesg = wmesg;
+#ifndef __rtems__
 	if (flags & SLEEPQ_INTERRUPTIBLE) {
 		td->td_flags |= TDF_SINTR;
 		td->td_flags &= ~TDF_SLEEPABORT;
 	}
 	thread_unlock(td);
+#else /* __rtems__ */
+	_Objects_Release_and_ISR_enable(&executing->Object, &lock_context);
+#endif /* __rtems__ */
 }
 
 /*
@@ -361,6 +402,7 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
 void
 sleepq_set_timeout(void *wchan, int timo)
 {
+#ifndef __rtems__
 	struct sleepqueue_chain *sc;
 	struct thread *td;
 
@@ -371,6 +413,17 @@ sleepq_set_timeout(void *wchan, int timo)
 	MPASS(td->td_sleepqueue == NULL);
 	MPASS(wchan != NULL);
 	callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
+#else /* __rtems__ */
+	Thread_Control *executing;
+
+	_Thread_Disable_dispatch();
+	executing = _Thread_Executing;
+	BSD_ASSERT(executing->Timer.state == WATCHDOG_INACTIVE);
+	_Watchdog_Initialize(&executing->Timer, sleepq_timeout,
+	    0, executing);
+	_Watchdog_Insert_ticks(&executing->Timer, (Watchdog_Interval)timo);
+	_Thread_Enable_dispatch();
+#endif /* __rtems__ */
 }
 
 /*
@@ -389,6 +442,7 @@ sleepq_sleepcnt(void *wchan, int queue)
 	return (sq->sq_blockedcnt[queue]);
 }
 
+#ifndef __rtems__
 /*
  * Marks the pending sleep of the current thread as interruptible and
  * makes an initial check for pending signals before putting a thread
@@ -483,6 +537,7 @@ out:
 	MPASS(td->td_lock != &sc->sc_lock);
 	return (ret);
 }
+#endif /* __rtems__ */
 
 /*
  * Switches to another thread if we are still asleep on a sleep queue.
@@ -491,6 +546,7 @@ out:
 static void
 sleepq_switch(void *wchan, int pri)
 {
+#ifndef __rtems__
 	struct sleepqueue_chain *sc;
 	struct sleepqueue *sq;
 	struct thread *td;
@@ -542,6 +598,106 @@ sleepq_switch(void *wchan, int pri)
 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
 	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
+#else /* __rtems__ */
+	Thread_Control *executing;
+	ISR_lock_Context lock_context;
+	struct thread *td;
+	bool block;
+	bool remove;
+
+	sleepq_release(wchan);
+
+	executing = _Thread_Acquire_executing(&lock_context);
+	td = rtems_bsd_get_thread(executing);
+	BSD_ASSERT(td != NULL);
+
+	block = false;
+	remove = false;
+	switch (td->td_sq_state) {
+	case TD_SQ_TIRED:
+		BSD_ASSERT(td->td_wchan == wchan);
+		td->td_sq_state = TD_SQ_SLEEPY;
+		block = true;
+		break;
+	case TD_SQ_NIGHTMARE:
+		BSD_ASSERT(td->td_wchan == wchan);
+		td->td_sq_state = TD_SQ_PANIC;
+		remove = true;
+		break;
+	default:
+		BSD_ASSERT(td->td_wchan == NULL);
+		BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
+		break;
+	}
+
+	if (block) {
+		Per_CPU_Control *cpu_self;
+		bool unblock;
+
+		cpu_self = _Objects_Release_and_thread_dispatch_disable(
+		    &executing->Object, &lock_context);
+
+		_Giant_Acquire(cpu_self);
+		_Thread_Set_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
+		_Giant_Release(cpu_self);
+
+		_Objects_ISR_disable_and_acquire(&executing->Object,
+		    &lock_context);
+
+		unblock = false;
+		switch (td->td_sq_state) {
+		case TD_SQ_NIGHTMARE:
+			BSD_ASSERT(td->td_wchan == wchan);
+			td->td_sq_state = TD_SQ_PANIC;
+			unblock = true;
+			remove = true;
+			break;
+		case TD_SQ_WAKEUP:
+			BSD_ASSERT(td->td_wchan == NULL);
+			unblock = true;
+			break;
+		default:
+			BSD_ASSERT(td->td_wchan == wchan);
+			BSD_ASSERT(td->td_sq_state == TD_SQ_SLEEPY);
+			td->td_sq_state = TD_SQ_SLEEPING;
+			break;
+		}
+
+		_Objects_Release_and_ISR_enable(&executing->Object,
+		    &lock_context);
+
+		if (unblock) {
+			_Giant_Acquire(cpu_self);
+			_Watchdog_Remove(&executing->Timer);
+			_Thread_Clear_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
+			_Giant_Release(cpu_self);
+		}
+
+		_Thread_Dispatch_enable(cpu_self);
+
+		_Objects_ISR_disable_and_acquire(&executing->Object,
+		    &lock_context);
+
+		switch (td->td_sq_state) {
+		case TD_SQ_NIGHTMARE:
+			BSD_ASSERT(td->td_wchan == wchan);
+			td->td_sq_state = TD_SQ_PANIC;
+			remove = true;
+			break;
+		default:
+			BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP ||
+			    td->td_sq_state == TD_SQ_PANIC);
+			break;
+		}
+	}
+
+	_Objects_Release_and_ISR_enable(&executing->Object,
+	    &lock_context);
+
+	if (remove) {
+		sleepq_remove(td, wchan);
+	}
+#endif /* __rtems__ */
 }
 
 /*
@@ -553,6 +709,7 @@ sleepq_check_timeout(void)
 	struct thread *td;
 
 	td = curthread;
+#ifndef __rtems__
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 
 	/*
@@ -581,8 +738,12 @@ sleepq_check_timeout(void)
 		mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
 	}
 	return (0);
+#else /* __rtems__ */
+	return (td->td_sq_state);
+#endif /* __rtems__ */
 }
 
+#ifndef __rtems__
 /*
  * Check to see if we were awoken by a signal.
  */
@@ -605,6 +766,7 @@ sleepq_check_signals(void)
 
 	return (0);
 }
+#endif /* __rtems__ */
 
 /*
  * Block the current thread until it is awakened from its sleep queue.
@@ -612,15 +774,20 @@ sleepq_check_signals(void)
 void
 sleepq_wait(void *wchan, int pri)
 {
+#ifndef __rtems__
 	struct thread *td;
 
 	td = curthread;
 	MPASS(!(td->td_flags & TDF_SINTR));
 	thread_lock(td);
+#endif /* __rtems__ */
 	sleepq_switch(wchan, pri);
+#ifndef __rtems__
 	thread_unlock(td);
+#endif /* __rtems__ */
 }
 
+#ifndef __rtems__
 /*
  * Block the current thread until it is awakened from its sleep queue
  * or it is interrupted by a signal.
@@ -638,6 +805,7 @@ sleepq_wait_sig(void *wchan, int pri)
 		return (rcatch);
 	return (rval);
 }
+#endif /* __rtems__ */
 
 /*
  * Block the current thread until it is awakened from its sleep queue
@@ -646,19 +814,26 @@ sleepq_wait_sig(void *wchan, int pri)
 int
 sleepq_timedwait(void *wchan, int pri)
 {
+#ifndef __rtems__
 	struct thread *td;
+#endif /* __rtems__ */
 	int rval;
 
+#ifndef __rtems__
 	td = curthread;
 	MPASS(!(td->td_flags & TDF_SINTR));
 	thread_lock(td);
+#endif /* __rtems__ */
 	sleepq_switch(wchan, pri);
 	rval = sleepq_check_timeout();
+#ifndef __rtems__
 	thread_unlock(td);
+#endif /* __rtems__ */
 
 	return (rval);
 }
 
+#ifndef __rtems__
 /*
  * Block the current thread until it is awakened from its sleep queue,
  * it is interrupted by a signal, or it times out waiting to be awakened.
@@ -678,6 +853,7 @@ sleepq_timedwait_sig(void *wchan, int pri)
 		return (rvals);
 	return (rvalt);
 }
+#endif /* __rtems__ */
 
 /*
  * Returns the type of sleepqueue given a waitchannel.
@@ -709,6 +885,13 @@ static int
 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
 {
 	struct sleepqueue_chain *sc;
+#ifdef __rtems__
+	Thread_Control *thread;
+	ISR_lock_Context lock_context;
+	bool unblock;
+
+	BSD_ASSERT(sq != NULL);
+#endif /* __rtems__ */
 
 	MPASS(td != NULL);
 	MPASS(sq->sq_wchan != NULL);
@@ -740,9 +923,15 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
 	} else
 		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
 	LIST_REMOVE(td->td_sleepqueue, sq_hash);
+#ifdef __rtems__
+	(void)sc;
+	thread = td->td_thread;
+	_Objects_ISR_disable_and_acquire(&thread->Object, &lock_context);
+#endif /* __rtems__ */
 
 	td->td_wmesg = NULL;
 	td->td_wchan = NULL;
+#ifndef __rtems__
 	td->td_flags &= ~TDF_SINTR;
 
 	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
@@ -764,6 +953,39 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
 		TD_CLR_SLEEPING(td);
 		return (setrunnable(td));
 	}
+#else /* __rtems__ */
+	unblock = _Watchdog_Is_active(&thread->Timer);
+	switch (td->td_sq_state) {
+	case TD_SQ_SLEEPING:
+		unblock = true;
+		/* FALLTHROUGH */
+	case TD_SQ_TIRED:
+	case TD_SQ_SLEEPY:
+	case TD_SQ_NIGHTMARE:
+		td->td_sq_state = TD_SQ_WAKEUP;
+		break;
+	default:
+		BSD_ASSERT(td->td_sq_state == TD_SQ_PANIC);
+		break;
+	}
+
+	if (unblock) {
+		Per_CPU_Control *cpu_self;
+
+		cpu_self = _Objects_Release_and_thread_dispatch_disable(
+		    &thread->Object, &lock_context);
+		_Giant_Acquire(cpu_self);
+
+		_Watchdog_Remove(&thread->Timer);
+		_Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
+
+		_Giant_Release(cpu_self);
+		_Thread_Dispatch_enable(cpu_self);
+	} else {
+		_Objects_Release_and_ISR_enable(&thread->Object,
+		    &lock_context);
+	}
+#endif /* __rtems__ */
 	return (0);
 }
 
@@ -811,7 +1033,11 @@ int
 sleepq_signal(void *wchan, int flags, int pri, int queue)
 {
 	struct sleepqueue *sq;
+#ifndef __rtems__
 	struct thread *td, *besttd;
+#else /* __rtems__ */
+	struct thread *besttd;
+#endif /* __rtems__ */
 	int wakeup_swapper;
 
 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
@@ -823,6 +1049,7 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
 
+#ifndef __rtems__
 	/*
 	 * Find the highest priority thread on the queue.  If there is a
 	 * tie, use the thread that first appears in the queue as it has
@@ -834,6 +1061,9 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
 		if (besttd == NULL || td->td_priority < besttd->td_priority)
 			besttd = td;
 	}
+#else /* __rtems__ */
+	besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
+#endif /* __rtems__ */
 	MPASS(besttd != NULL);
 	thread_lock(besttd);
 	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
@@ -871,6 +1101,7 @@ sleepq_broadcast(void *wchan, int flags, int pri, int queue)
 	return (wakeup_swapper);
 }
 
+#ifndef __rtems__
 /*
  * Time sleeping threads out.  When the timeout expires, the thread is
  * removed from the sleep queue and made runnable if it is still asleep.
@@ -940,6 +1171,52 @@ sleepq_timeout(void *arg)
 	if (wakeup_swapper)
 		kick_proc0();
 }
+#else /* __rtems__ */
+static void
+sleepq_timeout(Objects_Id id, void *arg)
+{
+	Thread_Control *thread;
+	struct thread *td;
+	ISR_lock_Context lock_context;
+	bool unblock;
+
+	thread = arg;
+	td = rtems_bsd_get_thread(thread);
+	BSD_ASSERT(td != NULL);
+
+	_Objects_ISR_disable_and_acquire(&thread->Object, &lock_context);
+
+	unblock = false;
+	switch (td->td_sq_state) {
+	case TD_SQ_SLEEPING:
+		unblock = true;
+		/* Fall through */
+	case TD_SQ_TIRED:
+	case TD_SQ_SLEEPY:
+		td->td_sq_state = TD_SQ_NIGHTMARE;
+		break;
+	default:
+		BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
+		break;
+	}
+
+	if (unblock) {
+		Per_CPU_Control *cpu_self;
+
+		cpu_self = _Objects_Release_and_thread_dispatch_disable(
+		    &thread->Object, &lock_context);
+		_Giant_Acquire(cpu_self);
+
+		_Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
+
+		_Giant_Release(cpu_self);
+		_Thread_Dispatch_enable(cpu_self);
+	} else {
+		_Objects_Release_and_ISR_enable(&thread->Object,
+		    &lock_context);
+	}
+}
+#endif /* __rtems__ */
 
 /*
  * Resumes a specific thread from the sleep queue associated with a specific
@@ -980,6 +1257,7 @@ sleepq_remove(struct thread *td, void *wchan)
 		kick_proc0();
 }
 
+#ifndef __rtems__
 /*
  * Abort a thread as if an interrupt had occurred.  Only abort
  * interruptible waits (unfortunately it isn't safe to abort others).
@@ -1021,6 +1299,7 @@ sleepq_abort(struct thread *td, int intrval)
 	/* Thread is asleep on sleep queue sq, so wake it up. */
 	return (sleepq_resume_thread(sq, td, 0));
 }
+#endif /* __rtems__ */
 
 #ifdef SLEEPQUEUE_PROFILING
 #define	SLEEPQ_PROF_LOCATIONS	1024
diff --git a/freebsd/sys/sys/proc.h b/freebsd/sys/sys/proc.h
index cab2eac..95b1b9c 100644
--- a/freebsd/sys/sys/proc.h
+++ b/freebsd/sys/sys/proc.h
@@ -195,6 +195,16 @@ struct rusage_ext {
 	uint64_t	rux_su;         /* (c) Previous sys time in usec. */
 	uint64_t	rux_tu;         /* (c) Previous total time in usec. */
 };
+#ifdef __rtems__
+enum thread_sq_states {
+	TD_SQ_WAKEUP,
+	TD_SQ_PANIC = EWOULDBLOCK,
+	TD_SQ_TIRED,
+	TD_SQ_SLEEPY,
+	TD_SQ_SLEEPING,
+	TD_SQ_NIGHTMARE
+};
+#endif /* __rtems__ */
 
 /*
  * Kernel runnable context (thread).
@@ -212,7 +222,9 @@ struct thread {
 	struct proc	*td_proc;	/* (*) Associated process. */
 	TAILQ_ENTRY(thread) td_plist;	/* (*) All threads in this proc. */
 	TAILQ_ENTRY(thread) td_runq;	/* (t) Run queue. */
+#endif /* __rtems__ */
 	TAILQ_ENTRY(thread) td_slpq;	/* (t) Sleep queue. */
+#ifndef __rtems__
 	TAILQ_ENTRY(thread) td_lockq;	/* (t) Lock queue. */
 	LIST_ENTRY(thread) td_hash;	/* (d) Hash chain. */
 	struct cpuset	*td_cpuset;	/* (t) CPU affinity mask. */
@@ -233,11 +245,14 @@ struct thread {
 	int		td_inhibitors;	/* (t) Why can not run. */
 	int		td_pflags;	/* (k) Private thread (TDP_*) flags. */
 	int		td_dupfd;	/* (k) Ret value from fdopen. XXX */
-	int		td_sqqueue;	/* (t) Sleepqueue queue blocked on. */
 #endif /* __rtems__ */
+#ifdef __rtems__
+	enum thread_sq_states td_sq_state;
+#endif /* __rtems__ */
+	int		td_sqqueue;	/* (t) Sleepqueue queue blocked on. */
 	void		*td_wchan;	/* (t) Sleep address. */
-#ifndef __rtems__
 	const char	*td_wmesg;	/* (t) Reason for sleep. */
+#ifndef __rtems__
 	u_char		td_lastcpu;	/* (t) Last cpu we were on. */
 	u_char		td_oncpu;	/* (t) Which cpu we are on. */
 	volatile u_char td_owepreempt;  /* (k*) Preempt on last critical_exit */
@@ -337,12 +352,16 @@ struct thread {
 struct mtx *thread_lock_block(struct thread *);
 void thread_lock_unblock(struct thread *, struct mtx *);
 void thread_lock_set(struct thread *, struct mtx *);
+#ifndef __rtems__
 #define	THREAD_LOCK_ASSERT(td, type)					\
 do {									\
 	struct mtx *__m = (td)->td_lock;				\
 	if (__m != &blocked_lock)					\
 		mtx_assert(__m, (type));				\
 } while (0)
+#else /* __rtems__ */
+#define	THREAD_LOCK_ASSERT(td, type)
+#endif /* __rtems__ */
 
 #ifdef INVARIANTS
 #define	THREAD_LOCKPTR_ASSERT(td, lock)					\
@@ -889,7 +908,11 @@ void	fork_exit(void (*)(void *, struct trapframe *), void *,
 void	fork_return(struct thread *, struct trapframe *);
 int	inferior(struct proc *p);
 void	kern_yield(int);
+#ifndef __rtems__
 void 	kick_proc0(void);
+#else /* __rtems__ */
+#define	kick_proc0()
+#endif /* __rtems__ */
 int	leavepgrp(struct proc *p);
 int	maybe_preempt(struct thread *td);
 void	maybe_yield(void);
diff --git a/libbsd.txt b/libbsd.txt
index f965667..aa865b4 100644
--- a/libbsd.txt
+++ b/libbsd.txt
@@ -316,6 +316,8 @@ rtems_mdns_gethostname().
 
 == Issues and TODO
 
+* Priority queues are broken with clustered scheduling.
+
 * Per-CPU data should be enabled once the new stack is ready for SMP.
 
 * Per-CPU NETISR(9) should be enabled onece the new stack is ready for SMP.
diff --git a/rtemsbsd/include/machine/rtems-bsd-thread.h b/rtemsbsd/include/machine/rtems-bsd-thread.h
index 2f99ad1..9a2caee 100644
--- a/rtemsbsd/include/machine/rtems-bsd-thread.h
+++ b/rtemsbsd/include/machine/rtems-bsd-thread.h
@@ -43,20 +43,11 @@
 #include <rtems/bsd/sys/param.h>
 #include <rtems/bsd/sys/types.h>
 #include <sys/proc.h>
-#include <sys/queue.h>
 
-#include <rtems/score/threadq.h>
 #include <rtems.h>
 
 #define BSD_TASK_NAME rtems_build_name('_', 'B', 'S', 'D')
 
-struct sleepqueue {
-	Thread_queue_Control sq_blocked;
-	LIST_ENTRY(sleepqueue) sq_hash;
-	LIST_HEAD(, sleepqueue) sq_free;
-	void *sq_wchan;
-};
-
 struct thread *
 rtems_bsd_get_thread(const Thread_Control *thread);
 
diff --git a/rtemsbsd/include/rtems/bsd/local/opt_sleepqueue_profiling.h b/rtemsbsd/include/rtems/bsd/local/opt_sleepqueue_profiling.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/include/sys/sleepqueue.h b/rtemsbsd/include/sys/sleepqueue.h
deleted file mode 100644
index 936ffd8..0000000
--- a/rtemsbsd/include/sys/sleepqueue.h
+++ /dev/null
@@ -1 +0,0 @@
-/* EMPTY */
diff --git a/rtemsbsd/rtems/rtems-bsd-thread.c b/rtemsbsd/rtems/rtems-bsd-thread.c
index ef94188..77057ba 100644
--- a/rtemsbsd/rtems/rtems-bsd-thread.c
+++ b/rtemsbsd/rtems/rtems-bsd-thread.c
@@ -49,6 +49,7 @@
 #include <sys/kthread.h>
 #include <sys/malloc.h>
 #include <sys/selinfo.h>
+#include <sys/sleepqueue.h>
 
 #include <rtems/bsd/bsd.h>
 
@@ -100,23 +101,14 @@ struct thread *
 rtems_bsd_thread_create(Thread_Control *thread, int wait)
 {
 	struct thread *td = malloc(sizeof(*td), M_TEMP, M_ZERO | wait);
-	struct sleepqueue *sq = malloc(sizeof(*sq), M_TEMP, wait);
+	struct sleepqueue *sq = sleepq_alloc();
 
 	if (td != NULL && sq != NULL) {
 		td->td_thread = thread;
 		td->td_sleepqueue = sq;
-
-		LIST_INIT(&sq->sq_free);
-
-		_Thread_queue_Initialize(
-			&sq->sq_blocked,
-			THREAD_QUEUE_DISCIPLINE_PRIORITY,
-			STATES_WAITING_FOR_BSD_WAKEUP,
-			EWOULDBLOCK
-		);
 	} else {
 		free(td, M_TEMP);
-		free(sq, M_TEMP);
+		sleepq_free(sq);
 		td = NULL;
 	}
 
@@ -185,7 +177,7 @@ rtems_bsd_extension_thread_delete(
 
 	if (td != NULL) {
 		seltdfini(td);
-		free(td->td_sleepqueue, M_TEMP);
+		sleepq_free(td->td_sleepqueue);
 		free(td, M_TEMP);
 	}
 }



More information about the vc mailing list