[rtems-libbsd commit] Provide SWI(9) and TIMEOUT(9)

Joel Sherrill joel.sherrill at OARcorp.com
Thu May 10 14:55:17 UTC 2012


Thanks for committing all this Sebastian.



On 05/10/2012 09:28 AM, Sebastian Huber wrote:
> Module:    rtems-libbsd
> Branch:    master
> Commit:    ee6b343cbaa434d8c31a4d71dac5316b4404da35
> Changeset: http://git.rtems.org/rtems-libbsd/commit/?id=ee6b343cbaa434d8c31a4d71dac5316b4404da35
>
> Author:    Christian Mauderer<christian.mauderer at embedded-brains.de>
> Date:      Thu May 10 15:06:10 2012 +0200
>
> Provide SWI(9) and TIMEOUT(9)
>
> ---
>
>   Makefile                                     |    3 +-
>   freebsd-to-rtems.py                          |    4 +-
>   freebsd/kern/kern_intr.c                     |   87 ++-
>   freebsd/kern/kern_timeout.c                  |  927 ++++++++++++++++++++++++++
>   freebsd/sys/callout.h                        |   11 -
>   freebsd/sys/mutex.h                          |    5 +
>   freebsd/sys/sleepqueue.h                     |    1 +
>   rtemsbsd/freebsd/machine/rtems-bsd-symbols.h |    5 -
>   rtemsbsd/src/rtems-bsd-callout.c             |  140 ----
>   rtemsbsd/src/rtems-bsd-shell.c               |   21 -
>   rtemsbsd/src/rtems-bsd-timeout.c             |  129 ----
>   testsuite/swi01/Makefile                     |   29 +
>   testsuite/swi01/init.c                       |   76 +++
>   testsuite/swi01/swi_test.c                   |  208 ++++++
>   testsuite/swi01/swi_test.h                   |   45 ++
>   testsuite/timeout01/Makefile                 |   29 +
>   testsuite/timeout01/init.c                   |   81 +++
>   testsuite/timeout01/timeout_helper.c         |  142 ++++
>   testsuite/timeout01/timeout_helper.h         |   46 ++
>   testsuite/timeout01/timeout_test.c           |  282 ++++++++
>   testsuite/timeout01/timeout_test.h           |   45 ++
>   21 files changed, 1994 insertions(+), 322 deletions(-)
>
> diff --git a/Makefile b/Makefile
> index 5338592..f93e38b 100644
> --- a/Makefile
> +++ b/Makefile
> @@ -36,7 +36,6 @@ C_FILES += rtemsbsd/src/rtems-bsd-generic.c
>   C_FILES += rtemsbsd/src/rtems-bsd-panic.c
>   C_FILES += rtemsbsd/src/rtems-bsd-synch.c
>   C_FILES += rtemsbsd/src/rtems-bsd-signal.c
> -C_FILES += rtemsbsd/src/rtems-bsd-callout.c
>   C_FILES += rtemsbsd/src/rtems-bsd-init.c
>   C_FILES += rtemsbsd/src/rtems-bsd-init-with-irq.c
>   C_FILES += rtemsbsd/src/rtems-bsd-assert.c
> @@ -52,7 +51,6 @@ C_FILES += rtemsbsd/src/rtems-bsd-sysctl.c
>   C_FILES += rtemsbsd/src/rtems-bsd-sysctlbyname.c
>   C_FILES += rtemsbsd/src/rtems-bsd-sysctlnametomib.c
>   C_FILES += rtemsbsd/src/rtems-bsd-taskqueue.c
> -C_FILES += rtemsbsd/src/rtems-bsd-timeout.c
>   C_FILES += rtemsbsd/src/rtems-bsd-timesupport.c
>   C_FILES += rtemsbsd/src/rtems-bsd-newproc.c
>   C_FILES += rtemsbsd/src/rtems-bsd-vm_glue.c
> @@ -322,6 +320,7 @@ C_FILES += freebsd/local/pcib_if.c
>   C_FILES += freebsd/kern/init_main.c
>   C_FILES += freebsd/kern/kern_linker.c
>   C_FILES += freebsd/kern/kern_mib.c
> +C_FILES += freebsd/kern/kern_timeout.c
>   C_FILES += freebsd/kern/kern_mbuf.c
>   C_FILES += freebsd/kern/kern_module.c
>   C_FILES += freebsd/kern/kern_sysctl.c
> diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py
> index 43597b4..ef31a87 100755
> --- a/freebsd-to-rtems.py
> +++ b/freebsd-to-rtems.py
> @@ -529,7 +529,6 @@ rtems.addRTEMSSourceFiles(
>                  'src/rtems-bsd-panic.c',
>                  'src/rtems-bsd-synch.c',
>                  'src/rtems-bsd-signal.c',
> -               'src/rtems-bsd-callout.c',
>                  'src/rtems-bsd-init.c',
>                  'src/rtems-bsd-init-with-irq.c',
>                  'src/rtems-bsd-assert.c',
> @@ -547,7 +546,6 @@ rtems.addRTEMSSourceFiles(
>                  'src/rtems-bsd-sysctlbyname.c',
>                  'src/rtems-bsd-sysctlnametomib.c',
>                  'src/rtems-bsd-taskqueue.c',
> -               'src/rtems-bsd-timeout.c',
>                  'src/rtems-bsd-timesupport.c',
>                  'src/rtems-bsd-newproc.c',
>                  'src/rtems-bsd-vm_glue.c',
> @@ -575,6 +573,7 @@ rtems.addEmptyHeaderFiles(
>                  'sys/exec.h',
>                  'sys/fail.h',
>                  'sys/limits.h',
> +               'sys/sleepqueue.h',
>                  'sys/namei.h',
>                  'sys/_pthreadtypes.h',
>                  #'sys/resourcevar.h',
> @@ -1119,6 +1118,7 @@ devUsbBase.addSourceFiles(
>                  'kern/init_main.c',
>                  'kern/kern_linker.c',
>                  'kern/kern_mib.c',
> +               'kern/kern_timeout.c',
>                  'kern/kern_mbuf.c',
>                  'kern/kern_module.c',
>                  'kern/kern_sysctl.c',
> diff --git a/freebsd/kern/kern_intr.c b/freebsd/kern/kern_intr.c
> index f93a72c..982d5e8 100644
> --- a/freebsd/kern/kern_intr.c
> +++ b/freebsd/kern/kern_intr.c
> @@ -55,19 +55,22 @@ __FBSDID("$FreeBSD$");
>   #include<freebsd/sys/sysctl.h>
>   #include<freebsd/sys/syslog.h>
>   #include<freebsd/sys/unistd.h>
> -#ifndef __rtems__
>   #include<freebsd/sys/vmmeter.h>
> -#endif /* __rtems__ */
>   #include<freebsd/machine/atomic.h>
>   #include<freebsd/machine/cpu.h>
>   #ifndef __rtems__
>   #include<freebsd/machine/md_var.h>
>   #include<freebsd/machine/stdarg.h>
> +#else /* __rtems__ */
> +  #ifdef INTR_FILTER
> +    #error INTR_FILTER is currently not suppported with RTEMS
> +  #endif
> +  #define RTEMSBSD_SWI_WAKEUP_EVENT RTEMS_EVENT_31
> +#endif /* __rtems__ */
>   #ifdef DDB
>   #include<freebsd/ddb/ddb.h>
>   #include<freebsd/ddb/db_sym.h>
>   #endif
> -#endif /* __rtems__ */
>
>   /*
>    * Describe an interrupt thread.  There is one of these per interrupt event.
> @@ -88,24 +91,27 @@ struct      intr_entropy {
>   };
>
>   struct intr_event *clk_intr_event;
> +#ifndef __rtems__
>   struct intr_event *tty_intr_event;
>   void   *vm_ih;
> +#endif /* __rtems__ */
>   struct proc *intrproc;
>
>   static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
>
>   static int intr_storm_threshold = 1000;
> +#ifndef __rtems__
>   TUNABLE_INT("hw.intr_storm_threshold",&intr_storm_threshold);
>   SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
>       &intr_storm_threshold, 0,
>       "Number of consecutive interrupts before storm protection is enabled");
> +#endif /* __rtems__ */
>   static TAILQ_HEAD(, intr_event) event_list =
>       TAILQ_HEAD_INITIALIZER(event_list);
>   static struct mtx event_lock;
>   MTX_SYSINIT(intr_event_list,&event_lock, "intr event list", MTX_DEF);
>
>   static void    intr_event_update(struct intr_event *ie);
> -#ifndef __rtems__
>   #ifdef INTR_FILTER
>   static int     intr_event_schedule_thread(struct intr_event *ie,
>                      struct intr_thread *ithd);
> @@ -117,7 +123,9 @@ static struct intr_thread *ithread_create(const char *name,
>   static int     intr_event_schedule_thread(struct intr_event *ie);
>   static struct intr_thread *ithread_create(const char *name);
>   #endif
> +#ifndef __rtems__
>   static void    ithread_destroy(struct intr_thread *ithread);
> +#endif /* __rtems__ */
>   static void    ithread_execute_handlers(struct proc *p,
>                      struct intr_event *ie);
>   #ifdef INTR_FILTER
> @@ -125,7 +133,6 @@ static void priv_ithread_execute_handler(struct proc *p,
>                      struct intr_handler *ih);
>   #endif
>   static void    ithread_loop(void *);
> -#endif /* __rtems__ */
>   static void    ithread_update(struct intr_thread *ithd);
>   #ifndef __rtems__
>   static void    start_softintr(void *);
> @@ -172,6 +179,7 @@ intr_priority(enum intr_type flags)
>          return pri;
>   }
>
> +#endif /* __rtems__ */
>   /*
>    * Update an ithread based on the associated intr_event.
>    */
> @@ -194,10 +202,13 @@ ithread_update(struct intr_thread *ithd)
>          /* Update name and priority. */
>          strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
>          thread_lock(td);
> +#ifndef __rtems__
>          sched_prio(td, pri);
> +#else /* __rtems__ */
> +#warning TODO: set thread priority
> +#endif /* __rtems__ */
>          thread_unlock(td);
>   }
> -#endif /* __rtems__ */
>
>   /*
>    * Regenerate the full name of an interrupt event and update its priority.
> @@ -257,7 +268,6 @@ intr_event_update(struct intr_event *ie)
>                  ithread_update(ie->ie_thread);
>          CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
>   }
> -#ifndef __rtems__
>
>   int
>   intr_event_create(struct intr_event **event, void *source, int flags, int irq,
> @@ -296,6 +306,7 @@ intr_event_create(struct intr_event **event, void *source, int flags, int irq,
>          return (0);
>   }
>
> +#ifndef __rtems__
>   /*
>    * Bind an interrupt event to the specified CPU.  Note that not all
>    * platforms support binding an interrupt to a CPU.  For those
> @@ -453,6 +464,7 @@ intr_event_destroy(struct intr_event *ie)
>          return (0);
>   }
>
> +#endif /* __rtems__ */
>   #ifndef INTR_FILTER
>   static struct intr_thread *
>   ithread_create(const char *name)
> @@ -469,15 +481,20 @@ ithread_create(const char *name)
>          if (error)
>                  panic("kproc_create() failed with %d", error);
>          thread_lock(td);
> +#ifndef __rtems__
>          sched_class(td, PRI_ITHD);
>          TD_SET_IWAIT(td);
> +#endif /* __rtems__ */
>          thread_unlock(td);
> +#ifndef __rtems__
>          td->td_pflags |= TDP_ITHREAD;
> +#endif /* __rtems__ */
>          ithd->it_thread = td;
>          CTR2(KTR_INTR, "%s: created %s", __func__, name);
>          return (ithd);
>   }
>   #else
> +#ifndef __rtems__
>   static struct intr_thread *
>   ithread_create(const char *name, struct intr_handler *ih)
>   {
> @@ -501,7 +518,9 @@ ithread_create(const char *name, struct intr_handler *ih)
>          CTR2(KTR_INTR, "%s: created %s", __func__, name);
>          return (ithd);
>   }
> +#endif /* __rtems__ */
>   #endif
> +#ifndef __rtems__
>
>   static void
>   ithread_destroy(struct intr_thread *ithread)
> @@ -518,8 +537,8 @@ ithread_destroy(struct intr_thread *ithread)
>          }
>          thread_unlock(td);
>   }
> -#endif /* __rtems__ */
>
> +#endif /* __rtems__ */
>   #ifndef INTR_FILTER
>   int
>   intr_event_add_handler(struct intr_event *ie, const char *name,
> @@ -594,6 +613,7 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
>          return (0);
>   }
>   #else
> +#ifndef __rtems__
>   int
>   intr_event_add_handler(struct intr_event *ie, const char *name,
>       driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
> @@ -675,6 +695,7 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
>                  *cookiep = ih;
>          return (0);
>   }
> +#endif /* __rtems__ */
>   #endif
>
>   #ifndef __rtems__
> @@ -753,7 +774,9 @@ intr_handler_source(void *cookie)
>          return (ie->ie_source);
>   }
>
> +#endif /* __rtems__ */
>   #ifndef INTR_FILTER
> +#ifndef __rtems__
>   int
>   intr_event_remove_handler(void *cookie)
>   {
> @@ -843,6 +866,7 @@ ok:
>          return (0);
>   }
>
> +#endif /* __rtems__ */
>   static int
>   intr_event_schedule_thread(struct intr_event *ie)
>   {
> @@ -886,6 +910,7 @@ intr_event_schedule_thread(struct intr_event *ie)
>           */
>          it->it_need = 1;
>          thread_lock(td);
> +#ifndef __rtems__
>          if (TD_AWAITING_INTR(td)) {
>                  CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
>                      td->td_name);
> @@ -895,11 +920,19 @@ intr_event_schedule_thread(struct intr_event *ie)
>                  CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
>                      __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
>          }
> +#else /* __rtems__ */
> +       /* Send event to wake the thread up.
> +        * TODO: eventually replace event by a better mechanism
> +        */
> +       rtems_status_code sc = rtems_event_send(td->td_id, RTEMSBSD_SWI_WAKEUP_EVENT);
> +       BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
> +#endif /* __rtems__ */
>          thread_unlock(td);
>
>          return (0);
>   }
>   #else
> +#ifndef __rtems__
>   int
>   intr_event_remove_handler(void *cookie)
>   {
> @@ -1053,8 +1086,8 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
>
>          return (0);
>   }
> -#endif
>   #endif /* __rtems__ */
> +#endif
>
>   /*
>    * Allow interrupt event binding for software interrupt handlers -- a no-op,
> @@ -1099,19 +1132,20 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
>              (pri * RQ_PPQ) + PI_SOFT, flags, cookiep);
>          if (error)
>                  return (error);
> +#ifndef __rtems__
>          if (pri == SWI_CLOCK) {
>                  struct proc *p;
>                  p = ie->ie_thread->it_thread->td_proc;
>                  PROC_LOCK(p);
> -#ifndef __rtems__
>                  p->p_flag |= P_NOLOAD;
> -#endif /* __rtems__ */
>                  PROC_UNLOCK(p);
>          }
> +#else /* __rtems__ */
> +       // Do _not_ ignore the thread in the load avarage
> +#endif /* __rtems__ */
>          return (0);
>   }
>
> -#ifndef __rtems__
>   /*
>    * Schedule a software interrupt thread.
>    */
> @@ -1133,7 +1167,9 @@ swi_sched(void *cookie, int flags)
>          atomic_store_rel_int(&ih->ih_need, 1);
>
>          if (!(flags&  SWI_DELAY)) {
> +#ifndef __rtems__
>                  PCPU_INC(cnt.v_soft);
> +#endif /* __rtems__ */
>   #ifdef INTR_FILTER
>                  error = intr_event_schedule_thread(ie, ie->ie_thread);
>   #else
> @@ -1143,6 +1179,7 @@ swi_sched(void *cookie, int flags)
>          }
>   }
>
> +#ifndef __rtems__
>   /*
>    * Remove a software interrupt handler.  Currently this code does not
>    * remove the associated interrupt event if it becomes empty.  Calling code
> @@ -1189,6 +1226,7 @@ priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
>   }
>   #endif
>
> +#endif /* __rtems__ */
>   /*
>    * This is a public function for use by drivers that mux interrupt
>    * handlers for child devices from their interrupt handler.
> @@ -1245,12 +1283,17 @@ static void
>   ithread_execute_handlers(struct proc *p, struct intr_event *ie)
>   {
>
> +#ifndef __rtems__
>          /* Interrupt handlers should not sleep. */
>          if (!(ie->ie_flags&  IE_SOFT))
>                  THREAD_NO_SLEEPING();
>          intr_event_execute_handlers(p, ie);
>          if (!(ie->ie_flags&  IE_SOFT))
>                  THREAD_SLEEPING_OK();
> +#else /* __rtems__ */
> +       /* We only have soft-threads, so the two queries are not necessary. */
> +       intr_event_execute_handlers(p, ie);
> +#endif /* __rtems__ */
>
>          /*
>           * Interrupt storm handling:
> @@ -1264,12 +1307,14 @@ ithread_execute_handlers(struct proc *p, struct intr_event *ie)
>           */
>          if (intr_storm_threshold != 0&&  ie->ie_count>= intr_storm_threshold&&
>              !(ie->ie_flags&  IE_SOFT)) {
> +#ifndef __rtems__
>                  /* Report the message only once every second. */
>                  if (ppsratecheck(&ie->ie_warntm,&ie->ie_warncnt, 1)) {
>                          printf(
>          "interrupt storm detected on \"%s\"; throttling interrupt source\n",
>                              ie->ie_name);
>                  }
> +#endif /* __rtems__ */
>                  pause("istorm", 1);
>          } else
>                  ie->ie_count++;
> @@ -1342,13 +1387,27 @@ ithread_loop(void *arg)
>                   */
>                  thread_lock(td);
>                  if (!ithd->it_need&&  !(ithd->it_flags&  IT_DEAD)) {
> +#ifndef __rtems__
>                          TD_SET_IWAIT(td);
>                          ie->ie_count = 0;
>                          mi_switch(SW_VOL | SWT_IWAIT, NULL);
> +#else /* __rtems__ */
> +                       /* wait for wakeup event
> +                        * TODO: eventually replace event by a better mechanism
> +                        */
> +                       rtems_event_set event_out;
> +                       rtems_status_code sc = rtems_event_receive(
> +                               RTEMSBSD_SWI_WAKEUP_EVENT,
> +                               RTEMS_WAIT | RTEMS_EVENT_ALL,
> +                               RTEMS_NO_TIMEOUT,
> +&event_out);
> +                       BSD_ASSERT(sc == RTEMS_SUCCESSFUL);
> +#endif /* __rtems__ */
>                  }
>                  thread_unlock(td);
>          }
>   }
> +#ifndef __rtems__
>
>   /*
>    * Main interrupt handling body.
> @@ -1444,7 +1503,9 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
>          td->td_intr_nesting_level--;
>          return (0);
>   }
> +#endif /* __rtems__ */
>   #else
> +#ifndef __rtems__
>   /*
>    * This is the main code for interrupt threads.
>    */
> @@ -1647,7 +1708,9 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
>          td->td_intr_nesting_level--;
>          return (0);
>   }
> +#endif /* __rtems__ */
>   #endif
> +#ifndef __rtems__
>
>   #ifdef DDB
>   /*
> diff --git a/freebsd/kern/kern_timeout.c b/freebsd/kern/kern_timeout.c
> new file mode 100644
> index 0000000..536ca3f
> --- /dev/null
> +++ b/freebsd/kern/kern_timeout.c
> @@ -0,0 +1,927 @@
> +#include<freebsd/machine/rtems-bsd-config.h>
> +
> +/*-
> + * Copyright (c) 1982, 1986, 1991, 1993
> + *     The Regents of the University of California.  All rights reserved.
> + * (c) UNIX System Laboratories, Inc.
> + * All or some portions of this file are derived from material licensed
> + * to the University of California by American Telephone and Telegraph
> + * Co. or Unix System Laboratories, Inc. and are reproduced herein with
> + * the permission of UNIX System Laboratories, Inc.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + * 4. Neither the name of the University nor the names of its contributors
> + *    may be used to endorse or promote products derived from this software
> + *    without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + *
> + *     From: @(#)kern_clock.c  8.5 (Berkeley) 1/21/94
> + */
> +
> +#include<freebsd/sys/cdefs.h>
> +__FBSDID("$FreeBSD$");
> +
> +#include<freebsd/local/opt_kdtrace.h>
> +
> +#include<freebsd/sys/param.h>
> +#include<freebsd/sys/systm.h>
> +#include<freebsd/sys/bus.h>
> +#include<freebsd/sys/callout.h>
> +#include<freebsd/sys/condvar.h>
> +#include<freebsd/sys/interrupt.h>
> +#include<freebsd/sys/kernel.h>
> +#include<freebsd/sys/ktr.h>
> +#include<freebsd/sys/lock.h>
> +#include<freebsd/sys/malloc.h>
> +#include<freebsd/sys/mutex.h>
> +#include<freebsd/sys/proc.h>
> +#include<freebsd/sys/sdt.h>
> +#include<freebsd/sys/sleepqueue.h>
> +#include<freebsd/sys/sysctl.h>
> +#include<freebsd/sys/smp.h>
> +
> +#ifdef __rtems__
> +int ncallout = 16;
> +#endif /* __rtems__ */
> +SDT_PROVIDER_DEFINE(callout_execute);
> +SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start);
> +SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
> +    "struct callout *");
> +SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end);
> +SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
> +    "struct callout *");
> +
> +static int avg_depth;
> +SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD,&avg_depth, 0,
> +    "Average number of items examined per softclock call. Units = 1/1000");
> +static int avg_gcalls;
> +SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD,&avg_gcalls, 0,
> +    "Average number of Giant callouts made per softclock call. Units = 1/1000");
> +static int avg_lockcalls;
> +SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD,&avg_lockcalls, 0,
> +    "Average number of lock callouts made per softclock call. Units = 1/1000");
> +static int avg_mpcalls;
> +SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD,&avg_mpcalls, 0,
> +    "Average number of MP callouts made per softclock call. Units = 1/1000");
> +/*
> + * TODO:
> + *     allocate more timeout table slots when table overflows.
> + */
> +int callwheelsize, callwheelbits, callwheelmask;
> +
> +/*
> + * There is one struct callout_cpu per cpu, holding all relevant
> + * state for the callout processing thread on the individual CPU.
> + * In particular:
> + *     cc_ticks is incremented once per tick in callout_cpu().
> + *     It tracks the global 'ticks' but in a way that the individual
> + *     threads should not worry about races in the order in which
> + *     hardclock() and hardclock_cpu() run on the various CPUs.
> + *     cc_softclock is advanced in callout_cpu() to point to the
> + *     first entry in cc_callwheel that may need handling. In turn,
> + *     a softclock() is scheduled so it can serve the various entries i
> + *     such that cc_softclock<= i<= cc_ticks .
> + *     XXX maybe cc_softclock and cc_ticks should be volatile ?
> + *
> + *     cc_ticks is also used in callout_reset_cpu() to determine
> + *     when the callout should be served.
> + */
> +struct callout_cpu {
> +       struct mtx              cc_lock;
> +       struct callout          *cc_callout;
> +       struct callout_tailq    *cc_callwheel;
> +       struct callout_list     cc_callfree;
> +       struct callout          *cc_next;
> +       struct callout          *cc_curr;
> +       void                    *cc_cookie;
> +       int                     cc_ticks;
> +       int                     cc_softticks;
> +       int                     cc_cancel;
> +       int                     cc_waiting;
> +};
> +
> +#ifdef SMP
> +struct callout_cpu cc_cpu[MAXCPU];
> +#define        CC_CPU(cpu)     (&cc_cpu[(cpu)])
> +#define        CC_SELF()       CC_CPU(PCPU_GET(cpuid))
> +#else
> +struct callout_cpu cc_cpu;
> +#define        CC_CPU(cpu)&cc_cpu
> +#define        CC_SELF()&cc_cpu
> +#endif
> +#define        CC_LOCK(cc)     mtx_lock_spin(&(cc)->cc_lock)
> +#define        CC_UNLOCK(cc)   mtx_unlock_spin(&(cc)->cc_lock)
> +
> +static int timeout_cpu;
> +
> +MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
> +
> +/**
> + * Locked by cc_lock:
> + *   cc_curr         - If a callout is in progress, it is curr_callout.
> + *                     If curr_callout is non-NULL, threads waiting in
> + *                     callout_drain() will be woken up as soon as the
> + *                     relevant callout completes.
> + *   cc_cancel       - Changing to 1 with both callout_lock and c_lock held
> + *                     guarantees that the current callout will not run.
> + *                     The softclock() function sets this to 0 before it
> + *                     drops callout_lock to acquire c_lock, and it calls
> + *                     the handler only if curr_cancelled is still 0 after
> + *                     c_lock is successfully acquired.
> + *   cc_waiting      - If a thread is waiting in callout_drain(), then
> + *                     callout_wait is nonzero.  Set only when
> + *                     curr_callout is non-NULL.
> + */
> +
> +/*
> + * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
> + *
> + *     This code is called very early in the kernel initialization sequence,
> + *     and may be called more then once.
> + */
> +caddr_t
> +kern_timeout_callwheel_alloc(caddr_t v)
> +{
> +       struct callout_cpu *cc;
> +
> +       timeout_cpu = PCPU_GET(cpuid);
> +       cc = CC_CPU(timeout_cpu);
> +       /*
> +        * Calculate callout wheel size
> +        */
> +       for (callwheelsize = 1, callwheelbits = 0;
> +            callwheelsize<  ncallout;
> +            callwheelsize<<= 1, ++callwheelbits)
> +               ;
> +       callwheelmask = callwheelsize - 1;
> +
> +       cc->cc_callout = (struct callout *)v;
> +       v = (caddr_t)(cc->cc_callout + ncallout);
> +       cc->cc_callwheel = (struct callout_tailq *)v;
> +       v = (caddr_t)(cc->cc_callwheel + callwheelsize);
> +       return(v);
> +}
> +
> +static void
> +callout_cpu_init(struct callout_cpu *cc)
> +{
> +       struct callout *c;
> +       int i;
> +
> +       mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
> +       SLIST_INIT(&cc->cc_callfree);
> +       for (i = 0; i<  callwheelsize; i++) {
> +               TAILQ_INIT(&cc->cc_callwheel[i]);
> +       }
> +       if (cc->cc_callout == NULL)
> +               return;
> +       for (i = 0; i<  ncallout; i++) {
> +               c =&cc->cc_callout[i];
> +               callout_init(c, 0);
> +               c->c_flags = CALLOUT_LOCAL_ALLOC;
> +               SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
> +       }
> +}
> +
> +/*
> + * kern_timeout_callwheel_init() - initialize previously reserved callwheel
> + *                                space.
> + *
> + *     This code is called just once, after the space reserved for the
> + *     callout wheel has been finalized.
> + */
> +void
> +kern_timeout_callwheel_init(void)
> +{
> +       callout_cpu_init(CC_CPU(timeout_cpu));
> +}
> +
> +/*
> + * Start standard softclock thread.
> + */
> +void    *softclock_ih;
> +
> +static void
> +start_softclock(void *dummy)
> +{
> +       struct callout_cpu *cc;
> +#ifdef SMP
> +       int cpu;
> +#endif
> +
> +       cc = CC_CPU(timeout_cpu);
> +       if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
> +           INTR_MPSAFE,&softclock_ih))
> +               panic("died while creating standard software ithreads");
> +       cc->cc_cookie = softclock_ih;
> +#ifdef SMP
> +       for (cpu = 0; cpu<= mp_maxid; cpu++) {
> +               if (cpu == timeout_cpu)
> +                       continue;
> +               if (CPU_ABSENT(cpu))
> +                       continue;
> +               cc = CC_CPU(cpu);
> +               if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
> +                   INTR_MPSAFE,&cc->cc_cookie))
> +                       panic("died while creating standard software ithreads");
> +               cc->cc_callout = NULL;  /* Only cpu0 handles timeout(). */
> +               cc->cc_callwheel = malloc(
> +                   sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
> +                   M_WAITOK);
> +               callout_cpu_init(cc);
> +       }
> +#endif
> +}
> +
> +SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
> +
> +void
> +callout_tick(void)
> +{
> +       struct callout_cpu *cc;
> +       int need_softclock;
> +       int bucket;
> +
> +       /*
> +        * Process callouts at a very low cpu priority, so we don't keep the
> +        * relatively high clock interrupt priority any longer than necessary.
> +        */
> +       need_softclock = 0;
> +       cc = CC_SELF();
> +       mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
> +       cc->cc_ticks++;
> +       for (; (cc->cc_softticks - cc->cc_ticks)<= 0; cc->cc_softticks++) {
> +               bucket = cc->cc_softticks&  callwheelmask;
> +               if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
> +                       need_softclock = 1;
> +                       break;
> +               }
> +       }
> +       mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
> +       /*
> +        * swi_sched acquires the thread lock, so we don't want to call it
> +        * with cc_lock held; incorrect locking order.
> +        */
> +       if (need_softclock)
> +               swi_sched(cc->cc_cookie, 0);
> +}
> +
> +static struct callout_cpu *
> +callout_lock(struct callout *c)
> +{
> +       struct callout_cpu *cc;
> +       int cpu;
> +
> +       for (;;) {
> +               cpu = c->c_cpu;
> +               cc = CC_CPU(cpu);
> +               CC_LOCK(cc);
> +               if (cpu == c->c_cpu)
> +                       break;
> +               CC_UNLOCK(cc);
> +       }
> +       return (cc);
> +}
> +
> +/*
> + * The callout mechanism is based on the work of Adam M. Costello and
> + * George Varghese, published in a technical report entitled "Redesigning
> + * the BSD Callout and Timer Facilities" and modified slightly for inclusion
> + * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
> + * used in this implementation was published by G. Varghese and T. Lauck in
> + * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
> + * the Efficient Implementation of a Timer Facility" in the Proceedings of
> + * the 11th ACM Annual Symposium on Operating Systems Principles,
> + * Austin, Texas Nov 1987.
> + */
> +
> +/*
> + * Software (low priority) clock interrupt.
> + * Run periodic events from timeout queue.
> + */
> +void
> +softclock(void *arg)
> +{
> +       struct callout_cpu *cc;
> +       struct callout *c;
> +       struct callout_tailq *bucket;
> +       int curticks;
> +       int steps;      /* #steps since we last allowed interrupts */
> +       int depth;
> +       int mpcalls;
> +       int lockcalls;
> +       int gcalls;
> +#ifdef DIAGNOSTIC
> +       struct bintime bt1, bt2;
> +       struct timespec ts2;
> +       static uint64_t maxdt = 36893488147419102LL;    /* 2 msec */
> +       static timeout_t *lastfunc;
> +#endif
> +
> +#ifndef MAX_SOFTCLOCK_STEPS
> +#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
> +#endif /* MAX_SOFTCLOCK_STEPS */
> +
> +       mpcalls = 0;
> +       lockcalls = 0;
> +       gcalls = 0;
> +       depth = 0;
> +       steps = 0;
> +       cc = (struct callout_cpu *)arg;
> +       CC_LOCK(cc);
> +       while (cc->cc_softticks - 1 != cc->cc_ticks) {
> +               /*
> +                * cc_softticks may be modified by hard clock, so cache
> +                * it while we work on a given bucket.
> +                */
> +               curticks = cc->cc_softticks;
> +               cc->cc_softticks++;
> +               bucket =&cc->cc_callwheel[curticks&  callwheelmask];
> +               c = TAILQ_FIRST(bucket);
> +               while (c) {
> +                       depth++;
> +                       if (c->c_time != curticks) {
> +                               c = TAILQ_NEXT(c, c_links.tqe);
> +                               ++steps;
> +                               if (steps>= MAX_SOFTCLOCK_STEPS) {
> +                                       cc->cc_next = c;
> +                                       /* Give interrupts a chance. */
> +                                       CC_UNLOCK(cc);
> +                                       ;       /* nothing */
> +                                       CC_LOCK(cc);
> +                                       c = cc->cc_next;
> +                                       steps = 0;
> +                               }
> +                       } else {
> +                               void (*c_func)(void *);
> +                               void *c_arg;
> +                               struct lock_class *class;
> +                               struct lock_object *c_lock;
> +                               int c_flags, sharedlock;
> +
> +                               cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
> +                               TAILQ_REMOVE(bucket, c, c_links.tqe);
> +                               class = (c->c_lock != NULL) ?
> +                                   LOCK_CLASS(c->c_lock) : NULL;
> +                               sharedlock = (c->c_flags&  CALLOUT_SHAREDLOCK) ?
> +                                   0 : 1;
> +                               c_lock = c->c_lock;
> +                               c_func = c->c_func;
> +                               c_arg = c->c_arg;
> +                               c_flags = c->c_flags;
> +                               if (c->c_flags&  CALLOUT_LOCAL_ALLOC) {
> +                                       c->c_flags = CALLOUT_LOCAL_ALLOC;
> +                               } else {
> +                                       c->c_flags =
> +                                           (c->c_flags&  ~CALLOUT_PENDING);
> +                               }
> +                               cc->cc_curr = c;
> +                               cc->cc_cancel = 0;
> +                               CC_UNLOCK(cc);
> +                               if (c_lock != NULL) {
> +                                       class->lc_lock(c_lock, sharedlock);
> +                                       /*
> +                                        * The callout may have been cancelled
> +                                        * while we switched locks.
> +                                        */
> +                                       if (cc->cc_cancel) {
> +                                               class->lc_unlock(c_lock);
> +                                               goto skip;
> +                                       }
> +                                       /* The callout cannot be stopped now. */
> +                                       cc->cc_cancel = 1;
> +
> +                                       if (c_lock ==&Giant.lock_object) {
> +                                               gcalls++;
> +                                               CTR3(KTR_CALLOUT,
> +                                                   "callout %p func %p arg %p",
> +                                                   c, c_func, c_arg);
> +                                       } else {
> +                                               lockcalls++;
> +                                               CTR3(KTR_CALLOUT, "callout lock"
> +                                                   " %p func %p arg %p",
> +                                                   c, c_func, c_arg);
> +                                       }
> +                               } else {
> +                                       mpcalls++;
> +                                       CTR3(KTR_CALLOUT,
> +                                           "callout mpsafe %p func %p arg %p",
> +                                           c, c_func, c_arg);
> +                               }
> +#ifdef DIAGNOSTIC
> +                               binuptime(&bt1);
> +#endif
> +#ifndef __rtems__
> +                               THREAD_NO_SLEEPING();
> +                               SDT_PROBE(callout_execute, kernel, ,
> +                                   callout_start, c, 0, 0, 0, 0);
> +#endif /* __rtems__ */
> +                               c_func(c_arg);
> +#ifndef __rtems__
> +                               SDT_PROBE(callout_execute, kernel, ,
> +                                   callout_end, c, 0, 0, 0, 0);
> +                               THREAD_SLEEPING_OK();
> +#endif /* __rtems__ */
> +#ifdef DIAGNOSTIC
> +                               binuptime(&bt2);
> +                               bintime_sub(&bt2,&bt1);
> +                               if (bt2.frac>  maxdt) {
> +                                       if (lastfunc != c_func ||
> +                                           bt2.frac>  maxdt * 2) {
> +                                               bintime2timespec(&bt2,&ts2);
> +                                               printf(
> +                       "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
> +                                                   c_func, c_arg,
> +                                                   (intmax_t)ts2.tv_sec,
> +                                                   ts2.tv_nsec);
> +                                       }
> +                                       maxdt = bt2.frac;
> +                                       lastfunc = c_func;
> +                               }
> +#endif
> +                               CTR1(KTR_CALLOUT, "callout %p finished", c);
> +                               if ((c_flags&  CALLOUT_RETURNUNLOCKED) == 0)
> +                                       class->lc_unlock(c_lock);
> +                       skip:
> +                               CC_LOCK(cc);
> +                               /*
> +                                * If the current callout is locally
> +                                * allocated (from timeout(9))
> +                                * then put it on the freelist.
> +                                *
> +                                * Note: we need to check the cached
> +                                * copy of c_flags because if it was not
> +                                * local, then it's not safe to deref the
> +                                * callout pointer.
> +                                */
> +                               if (c_flags&  CALLOUT_LOCAL_ALLOC) {
> +                                       KASSERT(c->c_flags ==
> +                                           CALLOUT_LOCAL_ALLOC,
> +                                           ("corrupted callout"));
> +                                       c->c_func = NULL;
> +                                       SLIST_INSERT_HEAD(&cc->cc_callfree, c,
> +                                           c_links.sle);
> +                               }
> +                               cc->cc_curr = NULL;
> +                               if (cc->cc_waiting) {
> +                                       /*
> +                                        * There is someone waiting
> +                                        * for the callout to complete.
> +                                        */
> +                                       cc->cc_waiting = 0;
> +                                       CC_UNLOCK(cc);
> +                                       wakeup(&cc->cc_waiting);
> +                                       CC_LOCK(cc);
> +                               }
> +                               steps = 0;
> +                               c = cc->cc_next;
> +                       }
> +               }
> +       }
> +       avg_depth += (depth * 1000 - avg_depth)>>  8;
> +       avg_mpcalls += (mpcalls * 1000 - avg_mpcalls)>>  8;
> +       avg_lockcalls += (lockcalls * 1000 - avg_lockcalls)>>  8;
> +       avg_gcalls += (gcalls * 1000 - avg_gcalls)>>  8;
> +       cc->cc_next = NULL;
> +       CC_UNLOCK(cc);
> +}
> +
> +/*
> + * timeout --
> + *     Execute a function after a specified length of time.
> + *
> + * untimeout --
> + *     Cancel previous timeout function call.
> + *
> + * callout_handle_init --
> + *     Initialize a handle so that using it with untimeout is benign.
> + *
> + *     See AT&T BCI Driver Reference Manual for specification.  This
> + *     implementation differs from that one in that although an
> + *     identification value is returned from timeout, the original
> + *     arguments to timeout as well as the identifier are used to
> + *     identify entries for untimeout.
> + */
> +struct callout_handle
> +timeout(ftn, arg, to_ticks)
> +       timeout_t *ftn;
> +       void *arg;
> +       int to_ticks;
> +{
> +       struct callout_cpu *cc;
> +       struct callout *new;
> +       struct callout_handle handle;
> +
> +       cc = CC_CPU(timeout_cpu);
> +       CC_LOCK(cc);
> +       /* Fill in the next free callout structure. */
> +       new = SLIST_FIRST(&cc->cc_callfree);
> +       if (new == NULL)
> +               /* XXX Attempt to malloc first */
> +               panic("timeout table full");
> +       SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
> +       callout_reset(new, to_ticks, ftn, arg);
> +       handle.callout = new;
> +       CC_UNLOCK(cc);
> +
> +       return (handle);
> +}
> +
> +void
> +untimeout(ftn, arg, handle)
> +       timeout_t *ftn;
> +       void *arg;
> +       struct callout_handle handle;
> +{
> +       struct callout_cpu *cc;
> +
> +       /*
> +        * Check for a handle that was initialized
> +        * by callout_handle_init, but never used
> +        * for a real timeout.
> +        */
> +       if (handle.callout == NULL)
> +               return;
> +
> +       cc = callout_lock(handle.callout);
> +       if (handle.callout->c_func == ftn&&  handle.callout->c_arg == arg)
> +               callout_stop(handle.callout);
> +       CC_UNLOCK(cc);
> +}
> +
> +void
> +callout_handle_init(struct callout_handle *handle)
> +{
> +       handle->callout = NULL;
> +}
> +
> +/*
> + * New interface; clients allocate their own callout structures.
> + *
> + * callout_reset() - establish or change a timeout
> + * callout_stop() - disestablish a timeout
> + * callout_init() - initialize a callout structure so that it can
> + *     safely be passed to callout_reset() and callout_stop()
> + *
> + *<sys/callout.h>  defines three convenience macros:
> + *
> + * callout_active() - returns truth if callout has not been stopped,
> + *     drained, or deactivated since the last time the callout was
> + *     reset.
> + * callout_pending() - returns truth if callout is still waiting for timeout
> + * callout_deactivate() - marks the callout as having been serviced
> + */
> +int
> +callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
> +    void *arg, int cpu)
> +{
> +       struct callout_cpu *cc;
> +       int cancelled = 0;
> +
> +       /*
> +        * Don't allow migration of pre-allocated callouts lest they
> +        * become unbalanced.
> +        */
> +       if (c->c_flags&  CALLOUT_LOCAL_ALLOC)
> +               cpu = c->c_cpu;
> +retry:
> +       cc = callout_lock(c);
> +       if (cc->cc_curr == c) {
> +               /*
> +                * We're being asked to reschedule a callout which is
> +                * currently in progress.  If there is a lock then we
> +                * can cancel the callout if it has not really started.
> +                */
> +               if (c->c_lock != NULL&&  !cc->cc_cancel)
> +                       cancelled = cc->cc_cancel = 1;
> +               if (cc->cc_waiting) {
> +                       /*
> +                        * Someone has called callout_drain to kill this
> +                        * callout.  Don't reschedule.
> +                        */
> +                       CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
> +                           cancelled ? "cancelled" : "failed to cancel",
> +                           c, c->c_func, c->c_arg);
> +                       CC_UNLOCK(cc);
> +                       return (cancelled);
> +               }
> +       }
> +       if (c->c_flags&  CALLOUT_PENDING) {
> +               if (cc->cc_next == c) {
> +                       cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
> +               }
> +               TAILQ_REMOVE(&cc->cc_callwheel[c->c_time&  callwheelmask], c,
> +                   c_links.tqe);
> +
> +               cancelled = 1;
> +               c->c_flags&= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
> +       }
> +       /*
> +        * If the lock must migrate we have to check the state again as
> +        * we can't hold both the new and old locks simultaneously.
> +        */
> +       if (c->c_cpu != cpu) {
> +               c->c_cpu = cpu;
> +               CC_UNLOCK(cc);
> +               goto retry;
> +       }
> +
> +       if (to_ticks<= 0)
> +               to_ticks = 1;
> +
> +       c->c_arg = arg;
> +       c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
> +       c->c_func = ftn;
> +       c->c_time = cc->cc_ticks + to_ticks;
> +       TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time&  callwheelmask],
> +                         c, c_links.tqe);
> +       CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
> +           cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
> +       CC_UNLOCK(cc);
> +
> +       return (cancelled);
> +}
> +
> +/*
> + * Common idioms that can be optimized in the future.
> + */
> +int
> +callout_schedule_on(struct callout *c, int to_ticks, int cpu)
> +{
> +       return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
> +}
> +
> +int
> +callout_schedule(struct callout *c, int to_ticks)
> +{
> +       return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
> +}
> +
> +int
> +_callout_stop_safe(c, safe)
> +       struct  callout *c;
> +       int     safe;
> +{
> +       struct callout_cpu *cc;
> +       struct lock_class *class;
> +#ifndef __rtems__
> +       int use_lock, sq_locked;
> +#else /* __rtems__ */
> +       int use_lock;
> +#endif /* __rtems__ */
> +
> +       /*
> +        * Some old subsystems don't hold Giant while running a callout_stop(),
> +        * so just discard this check for the moment.
> +        */
> +       if (!safe&&  c->c_lock != NULL) {
> +               if (c->c_lock ==&Giant.lock_object)
> +                       use_lock = mtx_owned(&Giant);
> +               else {
> +                       use_lock = 1;
> +                       class = LOCK_CLASS(c->c_lock);
> +                       class->lc_assert(c->c_lock, LA_XLOCKED);
> +               }
> +       } else
> +               use_lock = 0;
> +
> +#ifndef __rtems__
> +       sq_locked = 0;
> +again:
> +#endif /* __rtems__ */
> +       cc = callout_lock(c);
> +       /*
> +        * If the callout isn't pending, it's not on the queue, so
> +        * don't attempt to remove it from the queue.  We can try to
> +        * stop it by other means however.
> +        */
> +       if (!(c->c_flags&  CALLOUT_PENDING)) {
> +               c->c_flags&= ~CALLOUT_ACTIVE;
> +
> +               /*
> +                * If it wasn't on the queue and it isn't the current
> +                * callout, then we can't stop it, so just bail.
> +                */
> +               if (cc->cc_curr != c) {
> +                       CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
> +                           c, c->c_func, c->c_arg);
> +                       CC_UNLOCK(cc);
> +#ifndef __rtems__
> +                       if (sq_locked)
> +                               sleepq_release(&cc->cc_waiting);
> +#endif /* __rtems__ */
> +                       return (0);
> +               }
> +
> +               if (safe) {
> +                       /*
> +                        * The current callout is running (or just
> +                        * about to run) and blocking is allowed, so
> +                        * just wait for the current invocation to
> +                        * finish.
> +                        */
> +                       while (cc->cc_curr == c) {
> +#ifndef __rtems__
> +
> +                               /*
> +                                * Use direct calls to sleepqueue interface
> +                                * instead of cv/msleep in order to avoid
> +                                * a LOR between cc_lock and sleepqueue
> +                                * chain spinlocks.  This piece of code
> +                                * emulates a msleep_spin() call actually.
> +                                *
> +                                * If we already have the sleepqueue chain
> +                                * locked, then we can safely block.  If we
> +                                * don't already have it locked, however,
> +                                * we have to drop the cc_lock to lock
> +                                * it.  This opens several races, so we
> +                                * restart at the beginning once we have
> +                                * both locks.  If nothing has changed, then
> +                                * we will end up back here with sq_locked
> +                                * set.
> +                                */
> +                               if (!sq_locked) {
> +                                       CC_UNLOCK(cc);
> +                                       sleepq_lock(&cc->cc_waiting);
> +                                       sq_locked = 1;
> +                                       goto again;
> +                               }
> +                               cc->cc_waiting = 1;
> +                               DROP_GIANT();
> +                               CC_UNLOCK(cc);
> +                               sleepq_add(&cc->cc_waiting,
> +&cc->cc_lock.lock_object, "codrain",
> +                                   SLEEPQ_SLEEP, 0);
> +                               sleepq_wait(&cc->cc_waiting, 0);
> +                               sq_locked = 0;
> +
> +                               /* Reacquire locks previously released. */
> +                               PICKUP_GIANT();
> +                               CC_LOCK(cc);
> +#else /* __rtems__ */
> +                               BSD_ASSERT(0);
> +#endif /* __rtems__ */
> +                       }
> +               } else if (use_lock&&  !cc->cc_cancel) {
> +                       /*
> +                        * The current callout is waiting for its
> +                        * lock which we hold.  Cancel the callout
> +                        * and return.  After our caller drops the
> +                        * lock, the callout will be skipped in
> +                        * softclock().
> +                        */
> +                       cc->cc_cancel = 1;
> +                       CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
> +                           c, c->c_func, c->c_arg);
> +                       CC_UNLOCK(cc);
> +                       KASSERT(!sq_locked, ("sleepqueue chain locked"));
> +                       return (1);
> +               }
> +               CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
> +                   c, c->c_func, c->c_arg);
> +               CC_UNLOCK(cc);
> +               KASSERT(!sq_locked, ("sleepqueue chain still locked"));
> +               return (0);
> +       }
> +#ifndef __rtems__
> +       if (sq_locked)
> +               sleepq_release(&cc->cc_waiting);
> +#endif /* __rtems__ */
> +
> +       c->c_flags&= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
> +
> +       if (cc->cc_next == c) {
> +               cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
> +       }
> +       TAILQ_REMOVE(&cc->cc_callwheel[c->c_time&  callwheelmask], c,
> +           c_links.tqe);
> +
> +       CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
> +           c, c->c_func, c->c_arg);
> +
> +       if (c->c_flags&  CALLOUT_LOCAL_ALLOC) {
> +               c->c_func = NULL;
> +               SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
> +       }
> +       CC_UNLOCK(cc);
> +       return (1);
> +}
> +
> +void
> +callout_init(c, mpsafe)
> +       struct  callout *c;
> +       int mpsafe;
> +{
> +       bzero(c, sizeof *c);
> +       if (mpsafe) {
> +               c->c_lock = NULL;
> +               c->c_flags = CALLOUT_RETURNUNLOCKED;
> +       } else {
> +               c->c_lock =&Giant.lock_object;
> +               c->c_flags = 0;
> +       }
> +       c->c_cpu = timeout_cpu;
> +}
> +
> +void
> +_callout_init_lock(c, lock, flags)
> +       struct  callout *c;
> +       struct  lock_object *lock;
> +       int flags;
> +{
> +       bzero(c, sizeof *c);
> +       c->c_lock = lock;
> +       KASSERT((flags&  ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
> +           ("callout_init_lock: bad flags %d", flags));
> +       KASSERT(lock != NULL || (flags&  CALLOUT_RETURNUNLOCKED) == 0,
> +           ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
> +       KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags&
> +           (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
> +           __func__));
> +       c->c_flags = flags&  (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
> +       c->c_cpu = timeout_cpu;
> +}
> +
> +#ifdef APM_FIXUP_CALLTODO
> +/*
> + * Adjust the kernel calltodo timeout list.  This routine is used after
> + * an APM resume to recalculate the calltodo timer list values with the
> + * number of hz's we have been sleeping.  The next hardclock() will detect
> + * that there are fired timers and run softclock() to execute them.
> + *
> + * Please note, I have not done an exhaustive analysis of what code this
> + * might break.  I am motivated to have my select()'s and alarm()'s that
> + * have expired during suspend firing upon resume so that the applications
> + * which set the timer can do the maintanence the timer was for as close
> + * as possible to the originally intended time.  Testing this code for a
> + * week showed that resuming from a suspend resulted in 22 to 25 timers
> + * firing, which seemed independant on whether the suspend was 2 hours or
> + * 2 days.  Your milage may vary.   - Ken Key<key at cs.utk.edu>
> + */
> +void
> +adjust_timeout_calltodo(time_change)
> +    struct timeval *time_change;
> +{
> +       register struct callout *p;
> +       unsigned long delta_ticks;
> +
> +       /*
> +        * How many ticks were we asleep?
> +        * (stolen from tvtohz()).
> +        */
> +
> +       /* Don't do anything */
> +       if (time_change->tv_sec<  0)
> +               return;
> +       else if (time_change->tv_sec<= LONG_MAX / 1000000)
> +               delta_ticks = (time_change->tv_sec * 1000000 +
> +                              time_change->tv_usec + (tick - 1)) / tick + 1;
> +       else if (time_change->tv_sec<= LONG_MAX / hz)
> +               delta_ticks = time_change->tv_sec * hz +
> +                             (time_change->tv_usec + (tick - 1)) / tick + 1;
> +       else
> +               delta_ticks = LONG_MAX;
> +
> +       if (delta_ticks>  INT_MAX)
> +               delta_ticks = INT_MAX;
> +
> +       /*
> +        * Now rip through the timer calltodo list looking for timers
> +        * to expire.
> +        */
> +
> +       /* don't collide with softclock() */
> +       CC_LOCK(cc);
> +       for (p = calltodo.c_next; p != NULL; p = p->c_next) {
> +               p->c_time -= delta_ticks;
> +
> +               /* Break if the timer had more time on it than delta_ticks */
> +               if (p->c_time>  0)
> +                       break;
> +
> +               /* take back the ticks the timer didn't use (p->c_time<= 0) */
> +               delta_ticks = -p->c_time;
> +       }
> +       CC_UNLOCK(cc);
> +
> +       return;
> +}
> +#endif /* APM_FIXUP_CALLTODO */
> diff --git a/freebsd/sys/callout.h b/freebsd/sys/callout.h
> index ed4e20e..4686a4d 100644
> --- a/freebsd/sys/callout.h
> +++ b/freebsd/sys/callout.h
> @@ -46,23 +46,16 @@ SLIST_HEAD(callout_list, callout);
>   TAILQ_HEAD(callout_tailq, callout);
>
>   struct callout {
> -#ifndef __rtems__
>          union {
>                  SLIST_ENTRY(callout) sle;
>                  TAILQ_ENTRY(callout) tqe;
>          } c_links;
>          int     c_time;                         /* ticks to the event */
> -#else /* __rtems__ */
> -       rtems_chain_node c_node;
> -       rtems_id c_id;
> -#endif /* __rtems__ */
>          void    *c_arg;                         /* function argument */
>          void    (*c_func)(void *);              /* function to call */
>          struct lock_object *c_lock;             /* lock to handle */
>          int     c_flags;                        /* state of this entry */
> -#ifndef __rtems__
>          volatile int c_cpu;                     /* CPU we're scheduled on */
> -#endif /* __rtems__ */
>   };
>
>   #define        CALLOUT_LOCAL_ALLOC     0x0001 /* was allocated from callfree */
> @@ -92,12 +85,8 @@ void _callout_init_lock(struct callout *, struct lock_object *, int);
>             NULL, (flags))
>   #define        callout_pending(c)      ((c)->c_flags&  CALLOUT_PENDING)
>   int    callout_reset_on(struct callout *, int, void (*)(void *), void *, int);
> -#ifndef __rtems__
>   #define        callout_reset(c, on_tick, fn, arg)                              \
>       callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu)
> -#else /* __rtems__ */
> -int callout_reset(struct callout *, int, void (*)(void *), void *);
> -#endif /* __rtems__ */
>   #define        callout_reset_curcpu(c, on_tick, fn, arg)                       \
>       callout_reset_on((c), (on_tick), (fn), (arg), PCPU_GET(cpuid))
>   int    callout_schedule(struct callout *, int);
> diff --git a/freebsd/sys/mutex.h b/freebsd/sys/mutex.h
> index 434a1ea..0e8e173 100644
> --- a/freebsd/sys/mutex.h
> +++ b/freebsd/sys/mutex.h
> @@ -122,10 +122,15 @@ void      _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
>   int    _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
>   void   _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
>   void   _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
> +#ifndef __rtems__
>   void   _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
>               int line);
>   void   _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
>               int line);
> +#else /* __rtems__ */
> +#define _mtx_lock_spin_flags _mtx_lock_flags
> +#define _mtx_unlock_spin_flags _mtx_unlock_flags
> +#endif /* __rtems__ */
>   #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
>   void   _mtx_assert(struct mtx *m, int what, const char *file, int line);
>   #endif
> diff --git a/freebsd/sys/sleepqueue.h b/freebsd/sys/sleepqueue.h
> new file mode 100644
> index 0000000..936ffd8
> --- /dev/null
> +++ b/freebsd/sys/sleepqueue.h
> @@ -0,0 +1 @@
> +/* EMPTY */
> diff --git a/rtemsbsd/freebsd/machine/rtems-bsd-symbols.h b/rtemsbsd/freebsd/machine/rtems-bsd-symbols.h
> index 0fdfda5..c58259d 100644
> --- a/rtemsbsd/freebsd/machine/rtems-bsd-symbols.h
> +++ b/rtemsbsd/freebsd/machine/rtems-bsd-symbols.h
> @@ -136,11 +136,6 @@
>   #define bus_teardown_intr_method_default _bsd_bus_teardown_intr_method_default
>   #define bus_write_ivar_desc _bsd_bus_write_ivar_desc
>   #define bus_write_ivar_method_default _bsd_bus_write_ivar_method_default
> -#define callout_init _bsd_callout_init
> -#define _callout_init_lock _bsd__callout_init_lock
> -#define callout_reset _bsd_callout_reset
> -#define callout_schedule _bsd_callout_schedule
> -#define _callout_stop_safe _bsd__callout_stop_safe
>   #define cam_fetch_status_entry _bsd_cam_fetch_status_entry
>   #define cam_quirkmatch _bsd_cam_quirkmatch
>   #define cam_sim_alloc _bsd_cam_sim_alloc
> diff --git a/rtemsbsd/src/rtems-bsd-callout.c b/rtemsbsd/src/rtems-bsd-callout.c
> deleted file mode 100644
> index 25fd410..0000000
> --- a/rtemsbsd/src/rtems-bsd-callout.c
> +++ /dev/null
> @@ -1,140 +0,0 @@
> -/**
> - * @file
> - *
> - * @ingroup rtems_bsd_rtems
> - *
> - * @brief TODO.
> - */
> -
> -/*
> - * Copyright (c) 2009, 2010 embedded brains GmbH.
> - * All rights reserved.
> - *
> - *  embedded brains GmbH
> - *  Obere Lagerstr. 30
> - *  82178 Puchheim
> - *  Germany
> - *<rtems at embedded-brains.de>
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions
> - * are met:
> - * 1. Redistributions of source code must retain the above copyright
> - *    notice, this list of conditions and the following disclaimer.
> - * 2. Redistributions in binary form must reproduce the above copyright
> - *    notice, this list of conditions and the following disclaimer in the
> - *    documentation and/or other materials provided with the distribution.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> - * SUCH DAMAGE.
> - */
> -
> -#include<freebsd/machine/rtems-bsd-config.h>
> -
> -#include<freebsd/sys/param.h>
> -#include<freebsd/sys/types.h>
> -#include<freebsd/sys/systm.h>
> -#include<freebsd/sys/callout.h>
> -#include<freebsd/sys/lock.h>
> -#include<freebsd/sys/mutex.h>
> -
> -RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_callout_chain);
> -
> -static void
> -rtems_bsd_callout_dispatch(rtems_id id, void *arg)
> -{
> -       rtems_status_code sc = RTEMS_SUCCESSFUL;
> -       struct callout *c = arg;
> -
> -       if (c->c_lock != NULL) {
> -               sc = rtems_semaphore_obtain(c->c_lock->lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> -               BSD_ASSERT_SC(sc);
> -       }
> -
> -       if (c->c_func != NULL) {
> -               (*c->c_func)(c->c_arg);
> -       }
> -
> -       if (c->c_lock != NULL&&  (c->c_flags&  CALLOUT_RETURNUNLOCKED) == 0) {
> -               sc = rtems_semaphore_release(c->c_lock->lo_id);
> -               BSD_ASSERT_SC(sc);
> -       }
> -}
> -
> -void
> -callout_init(struct callout *c, int mpsafe)
> -{
> -       _callout_init_lock(c, mpsafe ? NULL :&Giant.lock_object, mpsafe ? CALLOUT_RETURNUNLOCKED : 0);
> -}
> -
> -void
> -_callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
> -{
> -       rtems_status_code sc = RTEMS_SUCCESSFUL;
> -       rtems_id id = RTEMS_ID_NONE;
> -
> -       sc = rtems_timer_create(rtems_build_name('_', 'T', 'M', 'R'),&id);
> -       BSD_ASSERT_SC(sc);
> -
> -       c->c_id = id;
> -       c->c_lock = lock;
> -       c->c_flags = flags;
> -       c->c_func = NULL;
> -       c->c_arg = NULL;
> -
> -       rtems_chain_append(&rtems_bsd_callout_chain,&c->c_node);
> -}
> -
> -int
> -callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
> -{
> -       /* FIXME: Integer conversions */
> -
> -       rtems_status_code sc = RTEMS_SUCCESSFUL;
> -
> -       if (to_ticks<= 0) {
> -               to_ticks = 1;
> -       }
> -
> -       c->c_func = ftn;
> -       c->c_arg = arg;
> -
> -       sc = rtems_timer_server_fire_after(c->c_id, (rtems_interval) to_ticks, rtems_bsd_callout_dispatch, c);
> -       BSD_ASSERT_SC(sc);
> -
> -       return 0;
> -}
> -
> -int
> -callout_schedule(struct callout *c, int to_ticks)
> -{
> -       return callout_reset(c, to_ticks, c->c_func, c->c_arg);
> -}
> -
> -int
> -_callout_stop_safe(struct callout *c, int safe)
> -{
> -       rtems_status_code sc = RTEMS_SUCCESSFUL;
> -
> -       if (!safe) {
> -               sc = rtems_timer_cancel(c->c_id);
> -               BSD_ASSERT_SC(sc);
> -       } else {
> -               sc = rtems_timer_delete(c->c_id);
> -               BSD_ASSERT_SC(sc);
> -
> -               c->c_id = RTEMS_ID_NONE;
> -               rtems_chain_extract(&c->c_node);
> -       }
> -
> -       return 0;
> -}
> diff --git a/rtemsbsd/src/rtems-bsd-shell.c b/rtemsbsd/src/rtems-bsd-shell.c
> index 786b712..f80e937 100644
> --- a/rtemsbsd/src/rtems-bsd-shell.c
> +++ b/rtemsbsd/src/rtems-bsd-shell.c
> @@ -52,23 +52,6 @@
>   #include<rtems/shell.h>
>
>   static void
> -rtems_bsd_dump_callout(void)
> -{
> -       rtems_chain_control *chain =&rtems_bsd_callout_chain;
> -       rtems_chain_node *node = rtems_chain_first(chain);
> -
> -       printf("callout dump:\n");
> -
> -       while (!rtems_chain_is_tail(chain, node)) {
> -               struct callout *c = (struct callout *) node;
> -
> -               printf("\t%08x\n", c->c_id);
> -
> -               node = rtems_chain_next(node);
> -       }
> -}
> -
> -static void
>   rtems_bsd_dump_mtx(void)
>   {
>          rtems_chain_control *chain =&rtems_bsd_mtx_chain;
> @@ -169,10 +152,6 @@ rtems_bsd_info(int argc, char **argv)
>                          rtems_bsd_dump_thread();
>                          usage = false;
>                  }
> -               if (CMP("callout")) {
> -                       rtems_bsd_dump_callout();
> -                       usage = false;
> -               }
>          }
>
>          if (usage) {
> diff --git a/rtemsbsd/src/rtems-bsd-timeout.c b/rtemsbsd/src/rtems-bsd-timeout.c
> deleted file mode 100644
> index 0055c42..0000000
> --- a/rtemsbsd/src/rtems-bsd-timeout.c
> +++ /dev/null
> @@ -1,129 +0,0 @@
> -/**
> - * @file
> - *
> - * @ingroup rtems_bsd_rtems
> - *
> - * @brief TODO.
> - */
> -
> -/*
> - * COPYRIGHT (c) 2012.
> - * On-Line Applications Research Corporation (OAR).
> - * All rights reserved.
> - *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions
> - * are met:
> - * 1. Redistributions of source code must retain the above copyright
> - *    notice, this list of conditions and the following disclaimer.
> - * 2. Redistributions in binary form must reproduce the above copyright
> - *    notice, this list of conditions and the following disclaimer in the
> - *    documentation and/or other materials provided with the distribution.
> - *
> - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> - * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> - * SUCH DAMAGE.
> - */
> -
> -#include<freebsd/machine/rtems-bsd-config.h>
> -#include<freebsd/sys/cdefs.h>
> -__FBSDID("$FreeBSD$");
> -
> -#include<freebsd/sys/param.h>
> -#include<freebsd/sys/systm.h>
> -#include<freebsd/sys/bus.h>
> -#include<freebsd/sys/callout.h>
> -#include<freebsd/sys/condvar.h>
> -#include<freebsd/sys/interrupt.h>
> -#include<freebsd/sys/kernel.h>
> -#include<freebsd/sys/ktr.h>
> -#include<freebsd/sys/lock.h>
> -#include<freebsd/sys/malloc.h>
> -#include<freebsd/sys/mutex.h>
> -#include<freebsd/sys/proc.h>
> -#include<freebsd/sys/sdt.h>
> -
> -static int timeout_cpu;
> -/*
> - * There is one struct callout_cpu per cpu, holding all relevant
> - * state for the callout processing thread on the individual CPU.
> - * In particular:
> - *     cc_ticks is incremented once per tick in callout_cpu().
> - *     It tracks the global 'ticks' but in a way that the individual
> - *     threads should not worry about races in the order in which
> - *     hardclock() and hardclock_cpu() run on the various CPUs.
> - *     cc_softclock is advanced in callout_cpu() to point to the
> - *     first entry in cc_callwheel that may need handling. In turn,
> - *     a softclock() is scheduled so it can serve the various entries i
> - *     such that cc_softclock<= i<= cc_ticks .
> - *     XXX maybe cc_softclock and cc_ticks should be volatile ?
> - *
> - *     cc_ticks is also used in callout_reset_cpu() to determine
> - *     when the callout should be served.
> - */
> -struct callout_cpu {
> -       struct mtx              cc_lock;
> -       struct callout          *cc_callout;
> -       struct callout_tailq    *cc_callwheel;
> -       struct callout_list     cc_callfree;
> -       struct callout          *cc_next;
> -       struct callout          *cc_curr;
> -       void                    *cc_cookie;
> -       int                     cc_ticks;
> -       int                     cc_softticks;
> -       int                     cc_cancel;
> -       int                     cc_waiting;
> -};
> -
> -/*
> - * timeout --
> - *     Execute a function after a specified length of time.
> - *
> - * untimeout --
> - *     Cancel previous timeout function call.
> - *
> - * callout_handle_init --
> - *     Initialize a handle so that using it with untimeout is benign.
> - *
> - *     See AT&T BCI Driver Reference Manual for specification.  This
> - *     implementation differs from that one in that although an
> - *     identification value is returned from timeout, the original
> - *     arguments to timeout as well as the identifier are used to
> - *     identify entries for untimeout.
> - */
> -
> -struct callout_handle
> -timeout(ftn, arg, to_ticks)
> -       timeout_t *ftn;
> -       void *arg;
> -       int to_ticks;
> -{
> -       struct callout_cpu *cc;
> -       struct callout *new;
> -       struct callout_handle handle;
> -
> -#if 0
> -       cc = CC_CPU(timeout_cpu);
> -       CC_LOCK(cc);
> -       /* Fill in the next free callout structure. */
> -       new = SLIST_FIRST(&cc->cc_callfree);
> -       if (new == NULL)
> -               /* XXX Attempt to malloc first */
> -               panic("timeout table full");
> -       SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
> -       callout_reset(new, to_ticks, ftn, arg);
> -       handle.callout = new;
> -       CC_UNLOCK(cc);
> -#endif
> -       return (handle);
> -}
> -
> -
> diff --git a/testsuite/swi01/Makefile b/testsuite/swi01/Makefile
> new file mode 100644
> index 0000000..dd7c688
> --- /dev/null
> +++ b/testsuite/swi01/Makefile
> @@ -0,0 +1,29 @@
> +include ../../config.inc
> +
> +include $(RTEMS_MAKEFILE_PATH)/Makefile.inc
> +include $(RTEMS_CUSTOM)
> +include $(PROJECT_ROOT)/make/leaf.cfg
> +
> +APP_PIECES = init swi_test
> +
> +APP_O_FILES = $(APP_PIECES:%=%.o)
> +APP_DEP_FILES = $(APP_PIECES:%=%.dep)
> +
> +APP = app.exe
> +
> +DEPFLAGS = -MT $@ -MD -MP -MF $*.dep
> +AM_CPPFLAGS += -I $(INSTALL_BASE)/include -I.
> +
> +CFLAGS += $(DEPFLAGS) $(GCCFLAGS) $(AM_CPPFLAGS) -Wno-unused -Wl,-Map,app.map
> +
> +LINK_LIBS += $(INSTALL_BASE)/libbsd.a
> +
> +all: $(APP)
> +
> +$(APP): $(APP_O_FILES)
> +       $(CC) $(CFLAGS) $^ $(LINK_LIBS) -o $(APP)
> +
> +clean:
> +       rm -f app.map $(APP) $(APP_O_FILES) $(APP_DEP_FILES)
> +
> +-include $(APP_DEP_FILES)
> diff --git a/testsuite/swi01/init.c b/testsuite/swi01/init.c
> new file mode 100644
> index 0000000..9bc13dc
> --- /dev/null
> +++ b/testsuite/swi01/init.c
> @@ -0,0 +1,76 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#include<stdlib.h>
> +#include<assert.h>
> +
> +#include<rtems.h>
> +
> +#include<freebsd/bsd.h>
> +
> +#include "swi_test.h"
> +
> +static void Init(rtems_task_argument arg)
> +{
> +       rtems_status_code sc;
> +
> +       puts("\n\n*** TEST SOFTWARE INTERRUPT 1 ***");
> +
> +       sc = rtems_bsd_initialize();
> +       assert(sc == RTEMS_SUCCESSFUL);
> +
> +       swi_test();
> +
> +       puts("*** END OF TEST SOFTWARE INTERRUPT 1 ***");
> +
> +       exit(0);
> +}
> +
> +#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
> +#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
> +
> +#define CONFIGURE_USE_IMFS_AS_BASE_FILESYSTEM
> +
> +#define CONFIGURE_FILESYSTEM_IMFS
> +
> +#define CONFIGURE_LIBIO_MAXIMUM_FILE_DESCRIPTORS 32
> +
> +#define CONFIGURE_UNLIMITED_OBJECTS
> +
> +#define CONFIGURE_UNIFIED_WORK_AREAS
> +
> +#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
> +
> +#define CONFIGURE_STACK_CHECKER_ENABLED
> +
> +#define CONFIGURE_INIT
> +
> +#include<rtems/confdefs.h>
> diff --git a/testsuite/swi01/swi_test.c b/testsuite/swi01/swi_test.c
> new file mode 100644
> index 0000000..99ba94f
> --- /dev/null
> +++ b/testsuite/swi01/swi_test.c
> @@ -0,0 +1,208 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#include<assert.h>
> +
> +#include<stdio.h>
> +#include<unistd.h>
> +
> +#include<freebsd/machine/rtems-bsd-config.h>
> +
> +#include<freebsd/sys/types.h>
> +#include<freebsd/sys/systm.h>
> +
> +#include<freebsd/sys/param.h>
> +#include<freebsd/sys/bus.h>
> +#include<freebsd/sys/interrupt.h>
> +
> +#define SWI_TEST_THREAD_PRIO (0)
> +
> +// Time to wait for swi-test-handler
> +#define SWI_SLEEP_TIME (100)
> +
> +enum arg {
> +       HANDLER_NOT_VISITED,
> +       HANDLER_VISITED,
> +};
> +
> +// The swi handler function, that will be called by all tests.
> +void swi_test_handler(void *arg)
> +{
> +       enum arg* argument = arg;
> +
> +       printf("This is swi_test_handler.\n");
> +
> +       *argument = HANDLER_VISITED;
> +}
> +
> +void swi_test_normal_handler()
> +{
> +       struct intr_event *test_intr_event = NULL;
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       void *test_ih = NULL;
> +       int retval = 0;
> +
> +       printf("== Create thread and install a functional handler.\n");
> +
> +       retval = swi_add(&test_intr_event, "swi_test", swi_test_handler,&argument,
> +               SWI_TEST_THREAD_PRIO, 0,&test_ih);
> +       assert(retval == 0);
> +
> +       swi_sched(test_ih, 0);
> +
> +       usleep(SWI_SLEEP_TIME);
> +
> +       assert(argument == HANDLER_VISITED);
> +}
> +
> +void swi_test_exclusive_handler()
> +{
> +       struct intr_event *test_intr_event = NULL;
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       void *test_ih = NULL;
> +       int retval = 0;
> +
> +       printf("== Create a thread with a exclusive handler.\n");
> +
> +       retval = swi_add(&test_intr_event, "swi_test", swi_test_handler,&argument,
> +               SWI_TEST_THREAD_PRIO, INTR_EXCL,&test_ih);
> +       assert(retval == 0);
> +
> +       swi_sched(test_ih, 0);
> +
> +       usleep(SWI_SLEEP_TIME);
> +
> +       assert(argument == HANDLER_VISITED);
> +}
> +
> +void swi_test_error_number_of_processes_exceeded()
> +{
> +       // [EAGAIN] The system-imposed limit on the total number of processes
> +       // under execution would be exceeded.  The limit is given by the
> +       // sysctl(3) MIB variable KERN_MAXPROC.
> +#warning TODO: write test case
> +}
> +
> +void swi_test_error_intr_entropy_set()
> +{
> +       struct intr_event *test_intr_event = NULL;
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       void *test_ih = NULL;
> +       int retval = 0;
> +
> +       printf("== Set the INTR_ENTROPY flag.\n");
> +
> +       retval = swi_add(&test_intr_event, "swi_test", swi_test_handler,&argument,
> +               SWI_TEST_THREAD_PRIO, INTR_ENTROPY,&test_ih);
> +       assert(retval == EINVAL);
> +
> +       usleep(SWI_SLEEP_TIME);
> +
> +       assert(argument == HANDLER_NOT_VISITED);
> +}
> +
> +void swi_test_error_point_to_hardware_interrupt_thread()
> +{
> +       //[EINVAL] The ithdp argument points to a hardware interrupt thread.
> +#warning TODO: write test case
> +}
> +
> +void swi_test_error_name_null()
> +{
> +       struct intr_event *test_intr_event = NULL;
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       void *test_ih = NULL;
> +       int retval = 0;
> +
> +       printf("== Set name to NULL.\n");
> +
> +       retval = swi_add(&test_intr_event, NULL, swi_test_handler,&argument,
> +               SWI_TEST_THREAD_PRIO, 0,&test_ih);
> +       assert(retval == EINVAL);
> +
> +       usleep(SWI_SLEEP_TIME);
> +
> +       assert(argument == HANDLER_NOT_VISITED);
> +}
> +
> +void swi_test_error_handler_null()
> +{
> +       struct intr_event *test_intr_event = NULL;
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       void *test_ih = NULL;
> +       int retval = 0;
> +
> +       printf("== Set handler to NULL.\n");
> +
> +       retval = swi_add(&test_intr_event, "swi_test", NULL,&argument,
> +               SWI_TEST_THREAD_PRIO, 0,&test_ih);
> +       assert(retval == EINVAL);
> +
> +       usleep(SWI_SLEEP_TIME);
> +
> +       assert(argument == HANDLER_NOT_VISITED);
> +}
> +
> +void swi_test_error_has_allready_exclusive()
> +{
> +       struct intr_event *test_intr_event = NULL;
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       void *test_ih1 = NULL;
> +       void *test_ih2 = NULL;
> +       int retval = 0;
> +
> +       printf("== Create a thread with a exclusive handler and try to add another handler.\n");
> +
> +       retval = swi_add(&test_intr_event, "swi_test1", swi_test_handler,&argument,
> +               SWI_TEST_THREAD_PRIO, INTR_EXCL,&test_ih1);
> +       assert(retval == 0);
> +
> +       retval = swi_add(&test_intr_event, "swi_test2", swi_test_handler,&argument,
> +               SWI_TEST_THREAD_PRIO, 0,&test_ih2);
> +       assert(retval == EINVAL);
> +
> +       usleep(SWI_SLEEP_TIME);
> +
> +       assert(argument == HANDLER_NOT_VISITED);
> +}
> +
> +void swi_test(void)
> +{
> +       swi_test_normal_handler();
> +       swi_test_exclusive_handler();
> +       swi_test_error_number_of_processes_exceeded();
> +       swi_test_error_intr_entropy_set();
> +       swi_test_error_point_to_hardware_interrupt_thread();
> +       swi_test_error_name_null();
> +       swi_test_error_handler_null();
> +       swi_test_error_has_allready_exclusive();
> +}
> +
> diff --git a/testsuite/swi01/swi_test.h b/testsuite/swi01/swi_test.h
> new file mode 100644
> index 0000000..e7708b6
> --- /dev/null
> +++ b/testsuite/swi01/swi_test.h
> @@ -0,0 +1,45 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#ifndef SWI_TEST_H
> +#define SWI_TEST_H
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif /* __cplusplus */
> +
> +void swi_test(void);
> +
> +#ifdef __cplusplus
> +}
> +#endif /* __cplusplus */
> +
> +#endif /* TEST_H */
> diff --git a/testsuite/timeout01/Makefile b/testsuite/timeout01/Makefile
> new file mode 100644
> index 0000000..6e38da1
> --- /dev/null
> +++ b/testsuite/timeout01/Makefile
> @@ -0,0 +1,29 @@
> +include ../../config.inc
> +
> +include $(RTEMS_MAKEFILE_PATH)/Makefile.inc
> +include $(RTEMS_CUSTOM)
> +include $(PROJECT_ROOT)/make/leaf.cfg
> +
> +APP_PIECES = init timeout_test timeout_helper
> +
> +APP_O_FILES = $(APP_PIECES:%=%.o)
> +APP_DEP_FILES = $(APP_PIECES:%=%.dep)
> +
> +APP = app.exe
> +
> +DEPFLAGS = -MT $@ -MD -MP -MF $*.dep
> +AM_CPPFLAGS += -I $(INSTALL_BASE)/include -I.
> +
> +CFLAGS += $(DEPFLAGS) $(GCCFLAGS) $(AM_CPPFLAGS) -Wno-unused -Wl,-Map,app.map
> +
> +LINK_LIBS += $(INSTALL_BASE)/libbsd.a
> +
> +all: $(APP)
> +
> +$(APP): $(APP_O_FILES)
> +       $(CC) $(CFLAGS) $^ $(LINK_LIBS) -o $(APP)
> +
> +clean:
> +       rm -f app.map $(APP) $(APP_O_FILES) $(APP_DEP_FILES)
> +
> +-include $(APP_DEP_FILES)
> diff --git a/testsuite/timeout01/init.c b/testsuite/timeout01/init.c
> new file mode 100644
> index 0000000..f1bd529
> --- /dev/null
> +++ b/testsuite/timeout01/init.c
> @@ -0,0 +1,81 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#include<stdlib.h>
> +#include<stdio.h>
> +#include<assert.h>
> +
> +#include<rtems.h>
> +
> +#include<freebsd/bsd.h>
> +
> +#include "timeout_test.h"
> +#include "timeout_helper.h"
> +
> +static void Init(rtems_task_argument arg)
> +{
> +       rtems_status_code sc;
> +
> +       puts("\n\n*** TEST TIMOUT 1 ***");
> +
> +       sc = rtems_bsd_initialize();
> +       assert(sc == RTEMS_SUCCESSFUL);
> +
> +       timeout_table_init();
> +       callout_tick_task_init();
> +
> +       timeout_test();
> +
> +       puts("*** END OF TEST TIMOUT 1 ***");
> +
> +       exit(0);
> +}
> +
> +#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
> +#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
> +
> +#define CONFIGURE_USE_IMFS_AS_BASE_FILESYSTEM
> +
> +#define CONFIGURE_FILESYSTEM_IMFS
> +
> +#define CONFIGURE_LIBIO_MAXIMUM_FILE_DESCRIPTORS 32
> +
> +#define CONFIGURE_UNLIMITED_OBJECTS
> +
> +#define CONFIGURE_UNIFIED_WORK_AREAS
> +
> +#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
> +
> +#define CONFIGURE_STACK_CHECKER_ENABLED
> +
> +#define CONFIGURE_INIT
> +
> +#include<rtems/confdefs.h>
> diff --git a/testsuite/timeout01/timeout_helper.c b/testsuite/timeout01/timeout_helper.c
> new file mode 100644
> index 0000000..88d8510
> --- /dev/null
> +++ b/testsuite/timeout01/timeout_helper.c
> @@ -0,0 +1,142 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#include<freebsd/machine/rtems-bsd-config.h>
> +
> +#include<freebsd/sys/param.h>
> +#include<freebsd/sys/systm.h>
> +
> +#include<assert.h>
> +#include<malloc.h>
> +
> +#include "timeout_helper.h"
> +
> +void timeout_table_init()
> +{
> +       size_t size = 0;
> +       caddr_t v = 0;
> +       void* firstaddr = 0;
> +
> +       /* calculates how much memory is needed */
> +       v = kern_timeout_callwheel_alloc(v);
> +
> +       /* allocate memory */
> +       size = (size_t)v;
> +       firstaddr = malloc(round_page(size));
> +       assert(firstaddr != NULL);
> +
> +       /* now set correct addresses for callwheel */
> +       v = (caddr_t) firstaddr;
> +       v = kern_timeout_callwheel_alloc(v);
> +
> +       assert((size_t)((void *)v - firstaddr) == size);
> +
> +       /* Initialize the callouts we just allocated. */
> +       kern_timeout_callwheel_init();
> +}
> +
> +#define CALLOUT_TICK_TASK_PRIO         (PRIORITY_DEFAULT_MAXIMUM - 1)
> +#define CALLOUT_TICK_TASK_STACK_SIZE   (1024)
> +#define CALLOUT_TICK_TASK_NAME         rtems_build_name('C', 'O', 'U', 'T')
> +#define CALLOUT_TICKS_PER_CALLOUT_TICK 1
> +
> +static const rtems_event_set TRIGGER_EVENT = RTEMS_EVENT_13;
> +
> +static void callout_tick_task_trigger(rtems_id timer, void *arg)
> +{
> +       rtems_status_code status;
> +       rtems_id *task_id = arg;
> +
> +       status = rtems_event_send(*task_id, TRIGGER_EVENT);
> +       assert(status == RTEMS_SUCCESSFUL);
> +
> +       status = rtems_timer_reset(timer);
> +       assert(status == RTEMS_SUCCESSFUL);
> +}
> +
> +rtems_task callout_tick_task(rtems_task_argument arg)
> +{
> +       rtems_name name;
> +       rtems_id timer;
> +       rtems_status_code status;
> +       const rtems_interval ticks_per_ms = CALLOUT_TICKS_PER_CALLOUT_TICK;
> +       rtems_id self_id = rtems_task_self();
> +
> +       name = CALLOUT_TICK_TASK_NAME;
> +
> +       status = rtems_timer_create( name,&timer );
> +       assert(status == RTEMS_SUCCESSFUL);
> +
> +       status = rtems_timer_fire_after( timer, ticks_per_ms, callout_tick_task_trigger,&self_id );
> +       assert(status == RTEMS_SUCCESSFUL);
> +
> +       while ( 1 ) {
> +               rtems_event_set event;
> +
> +               status = rtems_event_receive(
> +                               TRIGGER_EVENT,
> +                               RTEMS_EVENT_ALL | RTEMS_WAIT,
> +                               RTEMS_NO_TIMEOUT,
> +&event
> +               );
> +               assert(status == RTEMS_SUCCESSFUL);
> +               assert(event == TRIGGER_EVENT);
> +
> +               callout_tick();
> +       }
> +}
> +
> +
> +void callout_tick_task_init(void)
> +{
> +       static bool initialized = false;
> +       rtems_status_code sc = RTEMS_SUCCESSFUL;
> +
> +       if (!initialized) {
> +               rtems_id id = RTEMS_ID_NONE;
> +
> +               initialized = true;
> +
> +               sc = rtems_task_create(
> +                       CALLOUT_TICK_TASK_NAME,
> +                       CALLOUT_TICK_TASK_PRIO,
> +                       CALLOUT_TICK_TASK_STACK_SIZE,
> +                       RTEMS_DEFAULT_MODES,
> +                       RTEMS_DEFAULT_ATTRIBUTES,
> +&id
> +               );
> +               assert(sc == RTEMS_SUCCESSFUL);
> +
> +               sc = rtems_task_start(id, callout_tick_task, 0);
> +               assert(sc == RTEMS_SUCCESSFUL);
> +       }
> +}
> +
> diff --git a/testsuite/timeout01/timeout_helper.h b/testsuite/timeout01/timeout_helper.h
> new file mode 100644
> index 0000000..7e5acce
> --- /dev/null
> +++ b/testsuite/timeout01/timeout_helper.h
> @@ -0,0 +1,46 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#ifndef TIMEOUT_HELPER_H
> +#define TIMEOUT_HELPER_H
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif /* __cplusplus */
> +
> +void timeout_table_init(void);
> +void callout_tick_task_init(void);
> +
> +#ifdef __cplusplus
> +}
> +#endif /* __cplusplus */
> +
> +#endif /* TIMEOUT_HELPER_H */
> diff --git a/testsuite/timeout01/timeout_test.c b/testsuite/timeout01/timeout_test.c
> new file mode 100644
> index 0000000..b89feb8
> --- /dev/null
> +++ b/testsuite/timeout01/timeout_test.c
> @@ -0,0 +1,282 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#include<assert.h>
> +
> +#include<stdio.h>
> +#include<unistd.h>
> +
> +#include<freebsd/machine/rtems-bsd-config.h>
> +
> +#include<freebsd/sys/types.h>
> +#include<freebsd/sys/systm.h>
> +
> +#include<freebsd/sys/param.h>
> +#include<freebsd/sys/lock.h>
> +#include<freebsd/sys/mutex.h>
> +#include<freebsd/sys/rwlock.h>
> +
> +#define TIMEOUT_MILLISECONDS   (100)
> +
> +// test after TEST_NOT_FIRED_MS, if handlar has not been executed
> +#define TEST_NOT_FIRED_MS      (TIMEOUT_MILLISECONDS * 80 / 100)
> +// test TEST_FIRED_MS after TEST_NOT_FIRED_MS, if handlar has been executed
> +#define TEST_FIRED_MS          (TIMEOUT_MILLISECONDS * 40 / 100)
> +// delay handler by this time with a mutex
> +#define TEST_DELAY_MS          (TIMEOUT_MILLISECONDS)
> +
> +enum arg {
> +       HANDLER_NOT_VISITED,
> +       HANDLER_VISITED,
> +};
> +
> +static void timeout_handler(void *arg)
> +{
> +       enum arg* argument = arg;
> +
> +       printf("This is the timout_handler.\n");
> +
> +       *argument = HANDLER_VISITED;
> +}
> +
> +void timeout_test_timeout()
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout_handle handle;
> +
> +       printf("== Start a timeout and test if handler has been called.\n");
> +
> +       callout_handle_init(&handle);
> +
> +       handle = timeout(timeout_handler,&argument, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS));
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_VISITED);
> +}
> +
> +void timeout_test_cancel_timeout()
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout_handle handle;
> +
> +       printf("== Start a timeout and cancel it.\n");
> +
> +       callout_handle_init(&handle);
> +
> +       handle = timeout(timeout_handler,&argument, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS));
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +
> +       untimeout(timeout_handler,&argument, handle);
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +}
> +
> +void timeout_test_callout(int mpsave)
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout callout;
> +       int retval = 0;
> +       printf("== Start a callout and test if handler has been called. mpsave=%d\n", mpsave);
> +
> +       callout_init(&callout, mpsave);
> +
> +       retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler,&argument);
> +       assert(retval == 0);
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_VISITED);
> +
> +       callout_deactivate(&callout);
> +}
> +
> +void timeout_test_cancel_callout(int mpsave, bool use_drain)
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout callout;
> +       int retval = 0;
> +       printf("== Start a callout and cancel it with %s. mpsave=%d\n", use_drain ? "drain" : "stop", mpsave);
> +
> +       callout_init(&callout, mpsave);
> +
> +       retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler,&argument);
> +       assert(retval == 0);
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +
> +       if(!use_drain)
> +       {
> +               retval = callout_stop(&callout);
> +       }
> +       else
> +       {
> +               retval = callout_drain(&callout);
> +       }
> +       assert(retval != 0);
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       callout_deactivate(&callout);
> +}
> +
> +void timeout_test_callout_reschedule(int mpsave, bool use_reset)
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout callout;
> +       int retval = 0;
> +       printf("== Start a callout and reschedule it after some time with %s. mpsave=%d\n", use_reset ? "reset" : "schedule", mpsave);
> +
> +       callout_init(&callout, mpsave);
> +
> +       retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler,&argument);
> +       assert(retval == 0);
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       if(!use_reset)
> +       {
> +               retval = callout_schedule(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS));
> +       }
> +       else
> +       {
> +               retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler,&argument);
> +       }
> +       assert(retval != 0);
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_VISITED);
> +
> +       callout_deactivate(&callout);
> +}
> +
> +void timeout_test_callout_mutex(bool delay_with_lock)
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout callout;
> +       struct mtx mtx;
> +       int retval = 0;
> +       printf("== Start a callout with a mutex%s\n", delay_with_lock ? " and delay execution by locking it." : ".");
> +
> +       mtx_init(&mtx, "callouttest", NULL, MTX_DEF);
> +       callout_init_mtx(&callout,&mtx, 0);
> +
> +       retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler,&argument);
> +       assert(retval == 0);
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       if(delay_with_lock)
> +       {
> +               retval = mtx_trylock(&mtx);
> +               assert(retval != 0);
> +
> +               usleep(TEST_DELAY_MS * 1000);
> +               assert(argument == HANDLER_NOT_VISITED);
> +
> +               mtx_unlock(&mtx);
> +       }
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_VISITED);
> +
> +       callout_deactivate(&callout);
> +
> +       mtx_destroy(&mtx);
> +}
> +
> +void timeout_test_callout_rwlock(bool delay_with_lock)
> +{
> +       enum arg argument = HANDLER_NOT_VISITED;
> +       struct callout callout;
> +       struct rwlock rw;
> +       int retval = 0;
> +       printf("== Start a callout with a rwlock%s\n", delay_with_lock ? " and delay execution by locking it." : ".");
> +
> +       rw_init(&rw, "callouttest");
> +       callout_init_rw(&callout,&rw, 0);
> +
> +       retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler,&argument);
> +       assert(retval == 0);
> +
> +       usleep(TEST_NOT_FIRED_MS * 1000);
> +       assert(argument == HANDLER_NOT_VISITED);
> +
> +       if(delay_with_lock)
> +       {
> +               retval = rw_try_wlock(&rw);
> +               assert(retval != 0);
> +
> +               usleep(TEST_DELAY_MS * 1000);
> +               assert(argument == HANDLER_NOT_VISITED);
> +
> +               rw_wunlock(&rw);
> +       }
> +
> +       usleep(TEST_FIRED_MS * 1000);
> +       assert(argument == HANDLER_VISITED);
> +
> +       callout_deactivate(&callout);
> +}
> +
> +void timeout_test(void)
> +{
> +       int mpsave = 0;
> +
> +       timeout_test_timeout();
> +       timeout_test_cancel_timeout();
> +
> +       for(mpsave = 0; mpsave<=1; mpsave++)
> +       {
> +               timeout_test_callout(mpsave);
> +               timeout_test_cancel_callout(mpsave, false);
> +               timeout_test_cancel_callout(mpsave, true);
> +               timeout_test_callout_reschedule(mpsave, false);
> +               timeout_test_callout_reschedule(mpsave, true);
> +       }
> +
> +       timeout_test_callout_mutex(false);
> +       timeout_test_callout_mutex(true);
> +       timeout_test_callout_rwlock(false);
> +       timeout_test_callout_rwlock(true);
> +}
> diff --git a/testsuite/timeout01/timeout_test.h b/testsuite/timeout01/timeout_test.h
> new file mode 100644
> index 0000000..25d4cb2
> --- /dev/null
> +++ b/testsuite/timeout01/timeout_test.h
> @@ -0,0 +1,45 @@
> +/*
> + * Copyright (c) 2012 embedded brains GmbH.  All rights reserved.
> + *
> + *  embedded brains GmbH
> + *  Obere Lagerstr. 30
> + *  82178 Puchheim
> + *  Germany
> + *<rtems at embedded-brains.de>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + *    notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + *    notice, this list of conditions and the following disclaimer in the
> + *    documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + */
> +
> +#ifndef TIMEOUT_TEST_H
> +#define TIMEOUT_TEST_H
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif /* __cplusplus */
> +
> +void timeout_test(void);
> +
> +#ifdef __cplusplus
> +}
> +#endif /* __cplusplus */
> +
> +#endif /* TIMEOUT_TEST_H */
>
> _______________________________________________
> rtems-vc mailing list
> rtems-vc at rtems.org
> http://www.rtems.org/mailman/listinfo/rtems-vc


-- 
Joel Sherrill, Ph.D.             Director of Research&   Development
joel.sherrill at OARcorp.com        On-Line Applications Research
Ask me about RTEMS: a free RTOS  Huntsville AL 35805
     Support Available             (256) 722-9985





More information about the devel mailing list