[PATCH v2] rtems: Add rtems_interrupt_server_create()
Gedare Bloom
gedare at rtems.org
Sat Aug 1 05:05:09 UTC 2020
On Fri, Jul 31, 2020 at 5:53 PM Chris Johns <chrisj at rtems.org> wrote:
>
> OK for the 5 branch.
>
> Will there be an update to the configuration section in the Classic API manual?
>
https://devel.rtems.org/ticket/3269
Seb: "It is on my TODO list to document it in the RTEMS Classic API Guide."
> On 1/8/20 2:10 am, Sebastian Huber wrote:
> > Add rtems_interrupt_server_destroy().
> >
> > Before this patch, the only way to create interrupt servers was
> > rtems_interrupt_server_initialize(). This function creates the default
> > interrupt server and in SMP configurations additional interrupt servers
> > for the additional processors. The interrupt server is heavily used by
> > libbsd. This includes the epoch based reclamation which performs time
> > consuming resource and memory deallocation work. This does not work well
> > with time critical services, for example an UART over SPI or I2C. One
> > approach to address this problem is to allow the application to create
> > custom interrupt servers with the right priority and task properties.
> > The interrupt server API accounted for this, however, it was not
> > implemented before this patch.
> >
> > Closes #4033.
> > ---
> > bsps/shared/irq/irq-server.c | 365 ++++++++++++++++++---------
> > cpukit/include/rtems/irq-extension.h | 143 ++++++++++-
> > 2 files changed, 378 insertions(+), 130 deletions(-)
> >
> > diff --git a/bsps/shared/irq/irq-server.c b/bsps/shared/irq/irq-server.c
> > index 93e2d144d8..eb5b0b3998 100644
> > --- a/bsps/shared/irq/irq-server.c
> > +++ b/bsps/shared/irq/irq-server.c
> > @@ -7,13 +7,7 @@
> > */
> >
> > /*
> > - * Copyright (c) 2009, 2019 embedded brains GmbH. All rights reserved.
> > - *
> > - * embedded brains GmbH
> > - * Dornierstr. 4
> > - * 82178 Puchheim
> > - * Germany
> > - * <rtems at embedded-brains.de>
> > + * Copyright (C) 2009, 2020 embedded brains GmbH (http://www.embedded-brains.de)
> > *
> > * The license and distribution terms for this file may be
> > * found in the file LICENSE in this distribution or at
> > @@ -21,6 +15,7 @@
> > */
> >
> > #include <stdlib.h>
> > +#include <string.h>
> >
> > #include <rtems.h>
> > #include <rtems/chain.h>
> > @@ -30,54 +25,43 @@
> >
> > #define BSP_INTERRUPT_SERVER_MANAGEMENT_VECTOR (BSP_INTERRUPT_VECTOR_MAX + 1)
> >
> > -typedef struct {
> > - RTEMS_INTERRUPT_LOCK_MEMBER(lock);
> > - rtems_chain_control entries;
> > - rtems_id server;
> > - unsigned errors;
> > -} bsp_interrupt_server_context;
> > +static rtems_interrupt_server_control bsp_interrupt_server_default;
> >
> > -#if defined(RTEMS_SMP)
> > -static bsp_interrupt_server_context *bsp_interrupt_server_instances;
> > -#else
> > -static bsp_interrupt_server_context bsp_interrupt_server_instance;
> > -#endif
> > +static rtems_chain_control bsp_interrupt_server_chain =
> > + RTEMS_CHAIN_INITIALIZER_EMPTY(bsp_interrupt_server_chain);
> >
> > -static bsp_interrupt_server_context *bsp_interrupt_server_get_context(
> > +static rtems_interrupt_server_control *bsp_interrupt_server_get_context(
> > uint32_t server_index,
> > rtems_status_code *sc
> > )
> > {
> > -#if defined(RTEMS_SMP)
> > - if (bsp_interrupt_server_instances == NULL) {
> > - *sc = RTEMS_INCORRECT_STATE;
> > - return NULL;
> > - }
> > -#else
> > - if (bsp_interrupt_server_instance.server == RTEMS_ID_NONE) {
> > - *sc = RTEMS_INCORRECT_STATE;
> > - return NULL;
> > - }
> > -#endif
> > + rtems_chain_node *node;
> > +
> > + bsp_interrupt_lock();
> > + node = rtems_chain_first(&bsp_interrupt_server_chain);
> >
> > - if (server_index >= rtems_scheduler_get_processor_maximum()) {
> > - *sc = RTEMS_INVALID_ID;
> > - return NULL;
> > + while (node != rtems_chain_tail(&bsp_interrupt_server_chain)) {
> > + rtems_interrupt_server_control *s;
> > +
> > + s = RTEMS_CONTAINER_OF(node, rtems_interrupt_server_control, node);
> > + if (s->index == server_index) {
> > + bsp_interrupt_unlock();
> > + return s;
> > + }
> > +
> > + node = rtems_chain_next(node);
> > }
> >
> > - *sc = RTEMS_SUCCESSFUL;
> > -#if defined(RTEMS_SMP)
> > - return &bsp_interrupt_server_instances[server_index];
> > -#else
> > - return &bsp_interrupt_server_instance;
> > -#endif
> > + bsp_interrupt_unlock();
> > + *sc = RTEMS_INVALID_ID;
> > + return NULL;
> > }
> >
> > static void bsp_interrupt_server_trigger(void *arg)
> > {
> > rtems_interrupt_lock_context lock_context;
> > rtems_interrupt_server_entry *e = arg;
> > - bsp_interrupt_server_context *s = e->server;
> > + rtems_interrupt_server_control *s = e->server;
> >
> > if (bsp_interrupt_is_valid_vector(e->vector)) {
> > bsp_interrupt_vector_disable(e->vector);
> > @@ -137,7 +121,7 @@ static rtems_interrupt_server_entry *bsp_interrupt_server_query_entry(
> > }
> >
> > typedef struct {
> > - bsp_interrupt_server_context *server;
> > + rtems_interrupt_server_control *server;
> > rtems_vector_number vector;
> > rtems_option options;
> > rtems_interrupt_handler handler;
> > @@ -281,7 +265,7 @@ static void bsp_interrupt_server_remove_helper(void *arg)
> > }
> >
> > static rtems_status_code bsp_interrupt_server_call_helper(
> > - bsp_interrupt_server_context *s,
> > + rtems_interrupt_server_control *s,
> > rtems_vector_number vector,
> > rtems_option options,
> > rtems_interrupt_handler handler,
> > @@ -314,7 +298,7 @@ static rtems_status_code bsp_interrupt_server_call_helper(
> > }
> >
> > static rtems_interrupt_server_entry *bsp_interrupt_server_get_entry(
> > - bsp_interrupt_server_context *s
> > + rtems_interrupt_server_control *s
> > )
> > {
> > rtems_interrupt_lock_context lock_context;
> > @@ -337,7 +321,7 @@ static rtems_interrupt_server_entry *bsp_interrupt_server_get_entry(
> >
> > static void bsp_interrupt_server_task(rtems_task_argument arg)
> > {
> > - bsp_interrupt_server_context *s = (bsp_interrupt_server_context *) arg;
> > + rtems_interrupt_server_control *s = (rtems_interrupt_server_control *) arg;
> >
> > while (true) {
> > rtems_event_set events;
> > @@ -377,7 +361,7 @@ rtems_status_code rtems_interrupt_server_handler_install(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -402,7 +386,7 @@ rtems_status_code rtems_interrupt_server_handler_remove(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -464,7 +448,7 @@ rtems_status_code rtems_interrupt_server_handler_iterate(
> > {
> > rtems_status_code sc;
> > bsp_interrupt_server_handler_iterate_helper_data hihd;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -487,103 +471,252 @@ rtems_status_code rtems_interrupt_server_handler_iterate(
> > );
> > }
> >
> > -rtems_status_code rtems_interrupt_server_initialize(
> > +/*
> > + * The default server is statically allocated. Just clear the structure so
> > + * that it can be re-initialized.
> > + */
> > +static void bsp_interrupt_server_destroy_default(
> > + rtems_interrupt_server_control *s
> > +)
> > +{
> > + memset(s, 0, sizeof(*s));
> > +}
> > +
> > +#if defined(RTEMS_SMP)
> > +static void bsp_interrupt_server_destroy_secondary(
> > + rtems_interrupt_server_control *s
> > +)
> > +{
> > + free(s);
> > +}
> > +#endif
> > +
> > +static rtems_status_code bsp_interrupt_server_create(
> > + rtems_interrupt_server_control *s,
> > rtems_task_priority priority,
> > size_t stack_size,
> > rtems_mode modes,
> > rtems_attribute attributes,
> > - uint32_t *server_count
> > + uint32_t cpu_index
> > )
> > {
> > - uint32_t cpu_index;
> > - uint32_t cpu_count;
> > - uint32_t dummy;
> > - bsp_interrupt_server_context *instances;
> > + rtems_status_code sc;
> > +#if defined(RTEMS_SMP)
> > + rtems_id scheduler;
> > + cpu_set_t cpu;
> > +#endif
> >
> > - if (server_count == NULL) {
> > - server_count = &dummy;
> > - }
> > + rtems_interrupt_lock_initialize(&s->lock, "Interrupt Server");
> > + rtems_chain_initialize_empty(&s->entries);
> >
> > - cpu_count = rtems_scheduler_get_processor_maximum();
> > + sc = rtems_task_create(
> > + rtems_build_name('I', 'R', 'Q', 'S'),
> > + priority,
> > + stack_size,
> > + modes,
> > + attributes,
> > + &s->server
> > + );
> > + if (sc != RTEMS_SUCCESSFUL) {
> > + return sc;
> > + }
> >
> > #if defined(RTEMS_SMP)
> > - instances = calloc(cpu_count, sizeof(*instances));
> > - if (instances == NULL) {
> > - return RTEMS_NO_MEMORY;
> > + sc = rtems_scheduler_ident_by_processor(cpu_index, &scheduler);
> > + if (sc != RTEMS_SUCCESSFUL) {
> > + /* Do not start an interrupt server on a processor without a scheduler */
> > + return RTEMS_SUCCESSFUL;
> > }
> > +
> > + sc = rtems_task_set_scheduler(s->server, scheduler, priority);
> > + _Assert(sc == RTEMS_SUCCESSFUL);
> > +
> > + /* Set the task to processor affinity on a best-effort basis */
> > + CPU_ZERO(&cpu);
> > + CPU_SET(cpu_index, &cpu);
> > + (void) rtems_task_set_affinity(s->server, sizeof(cpu), &cpu);
> > #else
> > - instances = &bsp_interrupt_server_instance;
> > + (void) cpu_index;
> > #endif
> >
> > - for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
> > - bsp_interrupt_server_context *s = &instances[cpu_index];
> > - rtems_status_code sc;
> > + rtems_chain_append_unprotected(&bsp_interrupt_server_chain, &s->node);
> > +
> > + sc = rtems_task_start(
> > + s->server,
> > + bsp_interrupt_server_task,
> > + (rtems_task_argument) s
> > + );
> > + _Assert(sc == RTEMS_SUCCESSFUL);
> > +
> > + return sc;
> > +}
> > +
> > +rtems_status_code rtems_interrupt_server_initialize(
> > + rtems_task_priority priority,
> > + size_t stack_size,
> > + rtems_mode modes,
> > + rtems_attribute attributes,
> > + uint32_t *server_count
> > +)
> > +{
> > + rtems_status_code sc;
> > + rtems_interrupt_server_control *s;
> > + uint32_t cpu_index;
> > #if defined(RTEMS_SMP)
> > - rtems_id scheduler;
> > - cpu_set_t cpu;
> > + uint32_t cpu_count;
> > #endif
> >
> > - rtems_interrupt_lock_initialize(&s->lock, "Interrupt Server");
> > - rtems_chain_initialize_empty(&s->entries);
> > + cpu_index = 0;
> > + s = &bsp_interrupt_server_default;
> > +
> > + bsp_interrupt_lock();
> > +
> > + if (s->server != 0) {
> > + sc = RTEMS_INCORRECT_STATE;
> > + goto done;
> > + }
> > +
> > + s->destroy = bsp_interrupt_server_destroy_default;
> > + sc = bsp_interrupt_server_create(
> > + s,
> > + priority,
> > + stack_size,
> > + modes,
> > + attributes,
> > + cpu_index
> > + );
> > + if (sc != RTEMS_SUCCESSFUL) {
> > + goto done;
> > + }
> > +
> > + cpu_index = 1;
> > +
> > +#if defined(RTEMS_SMP)
> > + cpu_count = rtems_scheduler_get_processor_maximum();
> > +
> > + while (cpu_index < cpu_count) {
> > + s = calloc(1, sizeof(*s));
> >
> > - sc = rtems_task_create(
> > - rtems_build_name('I', 'R', 'Q', 'S'),
> > + if (s == NULL) {
> > + sc = RTEMS_NO_MEMORY;
> > + goto done;
> > + }
> > +
> > + s->destroy = bsp_interrupt_server_destroy_secondary;
> > + s->index = cpu_index;
> > + sc = bsp_interrupt_server_create(
> > + s,
> > priority,
> > stack_size,
> > modes,
> > attributes,
> > - &s->server
> > + cpu_index
> > );
> > if (sc != RTEMS_SUCCESSFUL) {
> > - *server_count = cpu_index;
> > -
> > -#if defined(RTEMS_SMP)
> > - if (cpu_index > 0) {
> > - bsp_interrupt_server_instances = instances;
> > - return RTEMS_SUCCESSFUL;
> > - }
> > + goto done;
> > + }
> >
> > - free(instances);
> > + ++cpu_index;
> > + }
> > #endif
> >
> > - return RTEMS_TOO_MANY;
> > - }
> > +done:
> > + bsp_interrupt_unlock();
> >
> > -#if defined(RTEMS_SMP)
> > - sc = rtems_scheduler_ident_by_processor(cpu_index, &scheduler);
> > - if (sc != RTEMS_SUCCESSFUL) {
> > - /* Do not start an interrupt server on a processor without a scheduler */
> > - continue;
> > - }
> > + if (server_count != NULL) {
> > + *server_count = cpu_index;
> > + }
> >
> > - sc = rtems_task_set_scheduler(s->server, scheduler, priority);
> > - _Assert(sc == RTEMS_SUCCESSFUL);
> > + return sc;
> > +}
> >
> > - /* Set the task to processor affinity on a best-effort basis */
> > - CPU_ZERO(&cpu);
> > - CPU_SET(cpu_index, &cpu);
> > - (void) rtems_task_set_affinity(s->server, sizeof(cpu), &cpu);
> > -#endif
> > +rtems_status_code rtems_interrupt_server_create(
> > + rtems_interrupt_server_control *s,
> > + const rtems_interrupt_server_config *config,
> > + uint32_t *server_index
> > +)
> > +{
> > + rtems_status_code sc;
> >
> > - sc = rtems_task_start(
> > - s->server,
> > - bsp_interrupt_server_task,
> > - (rtems_task_argument) s
> > - );
> > - _Assert(sc == RTEMS_SUCCESSFUL);
> > + sc = rtems_task_create(
> > + config->name,
> > + config->priority,
> > + config->storage_size,
> > + config->modes,
> > + config->attributes,
> > + &s->server
> > + );
> > + if (sc != RTEMS_SUCCESSFUL) {
> > + return sc;
> > }
> >
> > -#if defined(RTEMS_SMP)
> > - bsp_interrupt_server_instances = instances;
> > -#endif
> > - *server_count = cpu_index;
> > + rtems_interrupt_lock_initialize(&s->lock, "Interrupt Server");
> > + rtems_chain_initialize_empty(&s->entries);
> > + s->destroy = config->destroy;
> > + s->index = rtems_object_id_get_index(s->server)
> > + + rtems_scheduler_get_processor_maximum();
> > + *server_index = s->index;
> >
> > + bsp_interrupt_lock();
> > + rtems_chain_initialize_node(&s->node);
> > + rtems_chain_append_unprotected(&bsp_interrupt_server_chain, &s->node);
> > + bsp_interrupt_unlock();
> > +
> > + sc = rtems_task_start(
> > + s->server,
> > + bsp_interrupt_server_task,
> > + (rtems_task_argument) s
> > + );
> > + _Assert(sc == RTEMS_SUCCESSFUL);
> > +
> > + return sc;
> > +}
> > +
> > +static void bsp_interrupt_server_destroy_helper(void *arg)
> > +{
> > + bsp_interrupt_server_helper_data *hd = arg;
> > + rtems_interrupt_server_control *s = hd->server;
> > + rtems_status_code sc;
> > +
> > + bsp_interrupt_lock();
> > + rtems_chain_extract_unprotected(&s->node);
> > + bsp_interrupt_unlock();
> > +
> > + if (s->destroy != NULL) {
> > + (*s->destroy)(s);
> > + }
> > +
> > + sc = rtems_event_transient_send(hd->task);
> > + _Assert(sc == RTEMS_SUCCESSFUL);
> > + (void) sc;
> > +
> > + rtems_task_exit();
> > +}
> > +
> > +rtems_status_code rtems_interrupt_server_destroy(uint32_t server_index)
> > +{
> > + rtems_status_code sc;
> > + rtems_interrupt_server_control *s;
> > +
> > + s = bsp_interrupt_server_get_context(server_index, &sc);
> > + if (s == NULL) {
> > + return sc;
> > + }
> > +
> > + bsp_interrupt_server_call_helper(
> > + s,
> > + BSP_INTERRUPT_SERVER_MANAGEMENT_VECTOR,
> > + 0,
> > + NULL,
> > + NULL,
> > + bsp_interrupt_server_destroy_helper
> > + );
> > return RTEMS_SUCCESSFUL;
> > }
> >
> > static void bsp_interrupt_server_entry_initialize(
> > rtems_interrupt_server_entry *entry,
> > - bsp_interrupt_server_context *s
> > + rtems_interrupt_server_control *s
> > )
> > {
> > rtems_chain_set_off_chain(&entry->node);
> > @@ -611,7 +744,7 @@ rtems_status_code rtems_interrupt_server_entry_initialize(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -645,7 +778,7 @@ rtems_status_code rtems_interrupt_server_entry_move(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(destination_server_index, &sc);
> > if (s == NULL) {
> > @@ -667,7 +800,7 @@ void rtems_interrupt_server_entry_destroy(
> > rtems_interrupt_server_entry *entry
> > )
> > {
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> > rtems_interrupt_lock_context lock_context;
> >
> > s = entry->server;
> > @@ -698,7 +831,7 @@ rtems_status_code rtems_interrupt_server_request_initialize(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -727,8 +860,8 @@ static void bsp_interrupt_server_handler_move_helper(void *arg)
> > e = bsp_interrupt_server_query_entry(hd->vector, &trigger_options);
> > if (e != NULL) {
> > rtems_interrupt_lock_context lock_context;
> > - bsp_interrupt_server_context *src = e->server;
> > - bsp_interrupt_server_context *dst = hihd->arg;
> > + rtems_interrupt_server_control *src = e->server;
> > + rtems_interrupt_server_control *dst = hihd->arg;
> > bool pending;
> >
> > /* The source server is only used in SMP configurations for the lock */
> > @@ -763,8 +896,8 @@ rtems_status_code rtems_interrupt_server_move(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *src;
> > - bsp_interrupt_server_context *dst;
> > + rtems_interrupt_server_control *src;
> > + rtems_interrupt_server_control *dst;
> > bsp_interrupt_server_handler_iterate_helper_data hihd;
> >
> > src = bsp_interrupt_server_get_context(source_server_index, &sc);
> > @@ -810,7 +943,7 @@ static void bsp_interrupt_server_entry_suspend_helper(void *arg)
> > rtems_status_code rtems_interrupt_server_suspend(uint32_t server_index)
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -831,7 +964,7 @@ rtems_status_code rtems_interrupt_server_suspend(uint32_t server_index)
> > rtems_status_code rtems_interrupt_server_resume(uint32_t server_index)
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > if (s == NULL) {
> > @@ -858,7 +991,7 @@ rtems_status_code rtems_interrupt_server_set_affinity(
> > )
> > {
> > rtems_status_code sc;
> > - bsp_interrupt_server_context *s;
> > + rtems_interrupt_server_control *s;
> > rtems_id scheduler;
> >
> > s = bsp_interrupt_server_get_context(server_index, &sc);
> > diff --git a/cpukit/include/rtems/irq-extension.h b/cpukit/include/rtems/irq-extension.h
> > index 0d77b320bc..bde75ec765 100644
> > --- a/cpukit/include/rtems/irq-extension.h
> > +++ b/cpukit/include/rtems/irq-extension.h
> > @@ -9,13 +9,7 @@
> > /*
> > * Based on concepts of Pavel Pisa, Till Straumann and Eric Valette.
> > *
> > - * Copyright (C) 2008, 2019 embedded brains GmbH
> > - *
> > - * embedded brains GmbH
> > - * Dornierstr. 4
> > - * 82178 Puchheim
> > - * Germany
> > - * <rtems at embedded-brains.de>
> > + * Copyright (C) 2008, 2020 embedded brains GmbH (http://www.embedded-brains.de)
> > *
> > * The license and distribution terms for this file may be
> > * found in the file LICENSE in this distribution or at
> > @@ -259,6 +253,76 @@ typedef struct rtems_interrupt_server_action {
> > */
> > #define RTEMS_INTERRUPT_SERVER_DEFAULT 0
> >
> > +/**
> > + * @brief An interrupt server control.
> > + *
> > + * This structure must be treated as an opaque data type. Members must not be
> > + * accessed directly.
> > + *
> > + * @see rtems_interrupt_server_create()
> > + */
> > +typedef struct rtems_interrupt_server_control {
> > + RTEMS_INTERRUPT_LOCK_MEMBER( lock )
> > + rtems_chain_control entries;
> > + rtems_id server;
> > + unsigned long errors;
> > + uint32_t index;
> > + rtems_chain_node node;
> > + void ( *destroy )( struct rtems_interrupt_server_control * );
> > +} rtems_interrupt_server_control;
> > +
> > +/**
> > + * @brief An interrupt server configuration.
> > + *
> > + * @see rtems_interrupt_server_create()
> > + */
> > +typedef struct {
> > + /**
> > + * @brief The task name of the interrupt server.
> > + */
> > + rtems_name name;
> > +
> > + /**
> > + * @brief The initial task priority of the interrupt server.
> > + */
> > + rtems_task_priority priority;
> > +
> > + /**
> > + * @brief The task storage area of the interrupt server.
> > + *
> > + * It shall be NULL for interrupt servers created by
> > + * rtems_interrupt_server_create().
> > + */
> > + void *storage_area;
> > +
> > + /**
> > + * @brief The task storage size of the interrupt server.
> > + *
> > + * For interrupt servers created by rtems_interrupt_server_create() this is
> > + * the task stack size.
> > + */
> > + size_t storage_size;
> > +
> > + /**
> > + * @brief The initial task modes of the interrupt server.
> > + */
> > + rtems_mode modes;
> > +
> > + /**
> > + * @brief The task attributes of the interrupt server.
> > + */
> > + rtems_attribute attributes;
> > +
> > + /**
> > + * @brief An optional handler to destroy the interrupt server control handed
> > + * over to rtems_interrupt_server_create().
> > + *
> > + * This handler is called in the context of the interrupt server to be
> > + * destroyed.
> > + */
> > + void ( *destroy )( rtems_interrupt_server_control * );
> > +} rtems_interrupt_server_config;
> > +
> > /**
> > * @brief An interrupt server entry.
> > *
> > @@ -309,16 +373,19 @@ typedef struct {
> > *
> > * The server count pointer @a server_count may be @a NULL.
> > *
> > + * The task name of interrupt servers created by this function is
> > + * rtems_build_name( 'I', 'R', 'Q', 'S' ).
> > + *
> > * This function may block.
> > *
> > - * @see rtems_task_create().
> > + * @retval RTEMS_SUCCESSFUL The operation was successful.
> > *
> > - * @retval RTEMS_SUCCESSFUL Successful operation.
> > - * @retval RTEMS_INCORRECT_STATE The interrupt servers are not initialized.
> > - * @retval RTEMS_NO_MEMORY Not enough memory.
> > - * @retval RTEMS_TOO_MANY No free task available to create at least one server task.
> > - * @retval RTEMS_UNSATISFIED Task stack size too large.
> > - * @retval RTEMS_INVALID_PRIORITY Invalid task priority.
> > + * @retval RTEMS_INCORRECT_STATE The interrupt servers were already initialized.
> > + *
> > + * @return The function uses rtems_task_create(). If this operation is not
> > + * successful, then its status code is returned.
> > + *
> > + * @see rtems_interrupt_server_create() and rtems_interrupt_server_destroy().
> > */
> > rtems_status_code rtems_interrupt_server_initialize(
> > rtems_task_priority priority,
> > @@ -328,6 +395,54 @@ rtems_status_code rtems_interrupt_server_initialize(
> > uint32_t *server_count
> > );
> >
> > +/**
> > + * @brief Creates an interrupt server.
> > + *
> > + * This function may block.
> > + *
> > + * @param[out] control is the interrupt server control. The ownership of this
> > + * structure is transferred from the caller of this function to the interrupt
> > + * server management.
> > + *
> > + * @param config is the interrupt server configuration.
> > + *
> > + * @param[out] server_index is the pointer to a server index variable. The
> > + * index of the built interrupt server will be stored in the referenced
> > + * variable if the operation was successful.
> > + *
> > + * @retval RTEMS_SUCCESSFUL The operation was successful.
> > + *
> > + * @return The function uses rtems_task_create(). If this operation is not
> > + * successful, then its status code is returned.
> > + *
> > + * @see rtems_interrupt_server_initialize() and
> > + * rtems_interrupt_server_destroy().
> > + */
> > +rtems_status_code rtems_interrupt_server_create(
> > + rtems_interrupt_server_control *control,
> > + const rtems_interrupt_server_config *config,
> > + uint32_t *server_index
> > +);
> > +
> > +/**
> > + * @brief Destroys the interrupt server.
> > + *
> > + * This function may block.
> > + *
> > + * The interrupt server deletes itself, so after the return of the function the
> > + * interrupt server may be still in the termination process depending on the
> > + * task priorities of the system.
> > + *
> > + * @param server_index is the index of the interrupt server to destroy. Use
> > + * ::RTEMS_INTERRUPT_SERVER_DEFAULT to specify the default server.
> > + *
> > + * @retval RTEMS_SUCCESSFUL The operation was successful.
> > + * @retval RTEMS_INVALID_ID The interrupt server index was invalid.
> > + *
> > + * @see rtems_interrupt_server_create()
> > + */
> > +rtems_status_code rtems_interrupt_server_destroy( uint32_t server_index );
> > +
> > /**
> > * @brief Installs the interrupt handler routine @a handler for the interrupt
> > * vector with number @a vector on the server @a server.
> >
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel
More information about the devel
mailing list