[PATCH 03/11] sapi: Add per-CPU profiling application level data

Gedare Bloom gedare at rtems.org
Mon Mar 10 15:44:16 UTC 2014


On Mon, Mar 10, 2014 at 9:28 AM, Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
> ---
>  cpukit/sapi/include/rtems/profiling.h              |   83 ++++++++++++++++++
>  cpukit/sapi/src/profilingreportxml.c               |   89 ++++++++++++++++++++
>  testsuites/sptests/spprofiling01/spprofiling01.scn |   10 ++-
>  3 files changed, 181 insertions(+), 1 deletions(-)
>
> diff --git a/cpukit/sapi/include/rtems/profiling.h b/cpukit/sapi/include/rtems/profiling.h
> index ee56a03..ecb3ff7 100644
> --- a/cpukit/sapi/include/rtems/profiling.h
> +++ b/cpukit/sapi/include/rtems/profiling.h
> @@ -61,6 +61,12 @@ extern "C" {
>   * @brief Type of profiling data.
>   */
>  typedef enum {
> +  /**
> +   * @brief Type of per-CPU profiling data.
> +   *
> +   * @see rtems_profiling_per_cpu.
> +   */
> +  RTEMS_PROFILING_PER_CPU
>  } rtems_profiling_type;
>
>  /**
> @@ -74,6 +80,78 @@ typedef struct {
>  } rtems_profiling_header;
>
>  /**
> + * @brief Per-CPU profiling data.
> + */
> +typedef struct {
> +  /**
> +   * @brief The profiling data header.
> +   */
> +  rtems_profiling_header header;
> +
> +  /**
> +   * @brief The processor index of this profiling data.
> +   */
> +  uint32_t processor_index;
> +
> +  /**
> +   * @brief The maximum time of disabled thread dispatching in nanoseconds.
> +   */
> +  uint32_t max_thread_dispatch_disabled_time;
> +
I suppose it is safe to assume the max is less than 4 seconds...

> +  /**
> +   * @brief Count of times when the thread dispatch disable level changes from
> +   * zero to one in thread context.
> +   *
> +   * This value may overflow.
> +   */
> +  uint64_t thread_dispatch_disabled_count;
> +
> +  /**
> +   * @brief Total time of disabled thread dispatching in nanoseconds.
> +   *
> +   * The average time of disabled thread dispatching is the total time of
> +   * disabled thread dispatching divided by the thread dispatch disabled
> +   * count.
> +   *
> +   * This value may overflow.
> +   */
> +  uint64_t total_thread_dispatch_disabled_time;
> +
Is there any option to check for such overflow conditions? It might be
good to state the conditions under which the overflow may occur. I
don't think overflow is likely for most systems...

> +  /**
> +   * @brief The maximum interrupt delay in nanoseconds if supported by the
> +   * hardware.
> +   */
> +  uint32_t max_interrupt_delay;
> +
How does an application tell whether the HW supports this one or not?
What does this measure exactly? My guess would be the time from the
interrupt signal arriving to the jump to ISR, thus giving the maximum
worst-case interrupt response time.

> +  /**
> +   * @brief The maximum time spent to process a single sequence of nested
> +   * interrupts in nanoseconds.
> +   *
> +   * This is the time interval between the change of the interrupt nest level
> +   * from zero to one and the change back from one to zero.
> +   */
> +  uint32_t max_interrupt_time;
> +

Is this the measured worst-case execution time for ISRs ?

> +  /**
> +   * @brief Count of times when the interrupt nest level changes from zero to
> +   * one.
> +   *
> +   * This value may overflow.
> +   */
> +  uint64_t interrupt_count;
> +
Can this value realistically overflow?

> +  /**
> +   * @brief Total time of interrupt processing in nanoseconds.
> +   *
> +   * The average time of interrupt processing is the total time of interrupt
> +   * processing divided by the interrupt count.
> +   *
> +   * This value may overflow.
> +   */
> +  uint64_t total_interrupt_time;
Again, it might be good to state the conditions under which the
overflow may occur.

> +} rtems_profiling_per_cpu;
> +
> +/**
>   * @brief Collection of profiling data.
>   */
>  typedef union {
> @@ -81,6 +159,11 @@ typedef union {
>     * @brief Header to specify the actual profiling data.
>     */
>    rtems_profiling_header header;
> +
> +  /**
> +   * @brief Per-CPU profiling data if indicated by the header.
> +   */
> +  rtems_profiling_per_cpu per_cpu;
>  } rtems_profiling_data;
>
>  /**
> diff --git a/cpukit/sapi/src/profilingreportxml.c b/cpukit/sapi/src/profilingreportxml.c
> index a80cd16..f0a6fd6 100644
> --- a/cpukit/sapi/src/profilingreportxml.c
> +++ b/cpukit/sapi/src/profilingreportxml.c
> @@ -18,6 +18,8 @@
>
>  #include <rtems/profiling.h>
>
> +#include <inttypes.h>
> +
>  typedef struct {
>    rtems_profiling_printf printf_func;
>    void *printf_arg;
> @@ -45,9 +47,96 @@ static void indent(context *ctx, uint32_t indentation_level)
>    }
>  }
>
> +static void report_per_cpu(context *ctx, const rtems_profiling_per_cpu *per_cpu)
> +{
> +  rtems_profiling_printf printf_func = ctx->printf_func;
> +  void *printf_arg = ctx->printf_arg;
> +  int rv;
> +
> +  indent(ctx, 1);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<PerCPUProfilingReport processorIndex=\"%" PRIu32 "\">\n",
> +    per_cpu->processor_index
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<MaxThreadDispatchDisabledTime unit=\"ns\">%" PRIu32
> +      "</MaxThreadDispatchDisabledTime>\n",
> +    per_cpu->max_thread_dispatch_disabled_time
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<ThreadDispatchDisabledCount>%" PRIu64 "</ThreadDispatchDisabledCount>\n",
> +    per_cpu->thread_dispatch_disabled_count
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<TotalThreadDispatchDisabledTime unit=\"ns\">%" PRIu64
> +      "</TotalThreadDispatchDisabledTime>\n",
> +    per_cpu->total_thread_dispatch_disabled_time
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<MaxInterruptTime unit=\"ns\">%" PRIu32
> +      "</MaxInterruptTime>\n",
> +    per_cpu->max_interrupt_time
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<MaxInterruptDelay unit=\"ns\">%" PRIu32 "</MaxInterruptDelay>\n",
> +    per_cpu->max_interrupt_delay
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<InterruptCount>%" PRIu64 "</InterruptCount>\n",
> +    per_cpu->interrupt_count
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 2);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "<TotalInterruptTime unit=\"ns\">%" PRIu64 "</TotalInterruptTime>\n",
> +    per_cpu->total_interrupt_time
> +  );
> +  update_retval(ctx, rv);
> +
> +  indent(ctx, 1);
> +  rv = (*printf_func)(
> +    printf_arg,
> +    "</PerCPUProfilingReport>\n"
> +  );
> +  update_retval(ctx, rv);
> +}
> +
>  static void report(void *arg, const rtems_profiling_data *data)
>  {
>    context *ctx = arg;
> +
> +  switch (data->header.type) {
> +    case RTEMS_PROFILING_PER_CPU:
> +      report_per_cpu(ctx, &data->per_cpu);
> +      break;
> +  }
>  }
>
>  int rtems_profiling_report_xml(
> diff --git a/testsuites/sptests/spprofiling01/spprofiling01.scn b/testsuites/sptests/spprofiling01/spprofiling01.scn
> index 2c289db..a00baa1 100644
> --- a/testsuites/sptests/spprofiling01/spprofiling01.scn
> +++ b/testsuites/sptests/spprofiling01/spprofiling01.scn
> @@ -1,5 +1,13 @@
>  *** TEST SPPROFILING 1 ***
>    <ProfilingReport name="X">
> +    <PerCPUProfilingReport processorIndex="0">
> +      <MaxInterruptDelay unit="ns">0</MaxInterruptDelay>
> +      <MaxThreadDispatchDisabledTime unit="ns">0</MaxThreadDispatchDisabledTime>
> +      <ThreadDispatchDisabledCount>0</ThreadDispatchDisabledCount>
> +      <TotalThreadDispatchDisabledTime unit="ns">0</TotalThreadDispatchDisabledTime>
> +      <InterruptCount>0</InterruptCount>
> +      <TotalInterruptTime unit="ns">0</TotalInterruptTime>
> +    </PerCPUProfilingReport>
>    </ProfilingReport>
> -characters produced by rtems_profiling_report_xml(): 50
> +characters produced by rtems_profiling_report_xml(): 516
>  *** END OF TEST SPPROFILING 1 ***
> --
> 1.7.7
>
> _______________________________________________
> rtems-devel mailing list
> rtems-devel at rtems.org
> http://www.rtems.org/mailman/listinfo/rtems-devel



More information about the devel mailing list