[PATCH 1/2] cpukit: Add Epiphany architecture port v2

Hesham ALMatary heshamelmatary at gmail.com
Sat May 9 14:45:42 UTC 2015


---
 cpukit/configure.ac                                |    1 +
 cpukit/librpc/src/xdr/xdr_float.c                  |    1 +
 cpukit/score/cpu/Makefile.am                       |    1 +
 cpukit/score/cpu/epiphany/Makefile.am              |   32 +
 cpukit/score/cpu/epiphany/cpu.c                    |  114 ++
 .../cpu/epiphany/epiphany-context-initialize.c     |   66 ++
 .../score/cpu/epiphany/epiphany-context-switch.S   |  216 ++++
 .../cpu/epiphany/epiphany-exception-handler.S      |  304 +++++
 cpukit/score/cpu/epiphany/preinstall.am            |   53 +
 cpukit/score/cpu/epiphany/rtems/asm.h              |  120 ++
 cpukit/score/cpu/epiphany/rtems/score/cpu.h        | 1185 ++++++++++++++++++++
 cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h    |   74 ++
 cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h  |   14 +
 .../cpu/epiphany/rtems/score/epiphany-utility.h    |  180 +++
 cpukit/score/cpu/epiphany/rtems/score/epiphany.h   |   64 ++
 cpukit/score/cpu/epiphany/rtems/score/types.h      |   68 ++
 16 files changed, 2493 insertions(+)
 create mode 100644 cpukit/score/cpu/epiphany/Makefile.am
 create mode 100644 cpukit/score/cpu/epiphany/cpu.c
 create mode 100644 cpukit/score/cpu/epiphany/epiphany-context-initialize.c
 create mode 100644 cpukit/score/cpu/epiphany/epiphany-context-switch.S
 create mode 100644 cpukit/score/cpu/epiphany/epiphany-exception-handler.S
 create mode 100644 cpukit/score/cpu/epiphany/preinstall.am
 create mode 100644 cpukit/score/cpu/epiphany/rtems/asm.h
 create mode 100644 cpukit/score/cpu/epiphany/rtems/score/cpu.h
 create mode 100644 cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h
 create mode 100644 cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h
 create mode 100644 cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h
 create mode 100644 cpukit/score/cpu/epiphany/rtems/score/epiphany.h
 create mode 100644 cpukit/score/cpu/epiphany/rtems/score/types.h

diff --git a/cpukit/configure.ac b/cpukit/configure.ac
index 2b432f6..46942c9 100644
--- a/cpukit/configure.ac
+++ b/cpukit/configure.ac
@@ -453,6 +453,7 @@ score/cpu/Makefile
 score/cpu/arm/Makefile
 score/cpu/bfin/Makefile
 score/cpu/avr/Makefile
+score/cpu/epiphany/Makefile
 score/cpu/h8300/Makefile
 score/cpu/i386/Makefile
 score/cpu/lm32/Makefile
diff --git a/cpukit/librpc/src/xdr/xdr_float.c b/cpukit/librpc/src/xdr/xdr_float.c
index 925b294..ac8c46d 100644
--- a/cpukit/librpc/src/xdr/xdr_float.c
+++ b/cpukit/librpc/src/xdr/xdr_float.c
@@ -61,6 +61,7 @@ static char *rcsid = "$FreeBSD: src/lib/libc/xdr/xdr_float.c,v 1.7 1999/08/28 00
 #if defined(__alpha__) || \
     defined(_AM29K) || \
     defined(__arm__) || \
+    defined(__epiphany__) ||  defined(__EPIPHANY__) || \
     defined(__H8300__) || defined(__h8300__) || \
     defined(__hppa__) || \
     defined(__i386__) || \
diff --git a/cpukit/score/cpu/Makefile.am b/cpukit/score/cpu/Makefile.am
index 69abcd6..7279d38 100644
--- a/cpukit/score/cpu/Makefile.am
+++ b/cpukit/score/cpu/Makefile.am
@@ -4,6 +4,7 @@ DIST_SUBDIRS =
 DIST_SUBDIRS += arm
 DIST_SUBDIRS += avr
 DIST_SUBDIRS += bfin
+DIST_SUBDIRS += epiphany
 DIST_SUBDIRS += h8300
 DIST_SUBDIRS += i386
 DIST_SUBDIRS += lm32
diff --git a/cpukit/score/cpu/epiphany/Makefile.am b/cpukit/score/cpu/epiphany/Makefile.am
new file mode 100644
index 0000000..0099f08
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/Makefile.am
@@ -0,0 +1,32 @@
+include $(top_srcdir)/automake/compile.am
+
+CLEANFILES =
+DISTCLEANFILES =
+
+include_rtemsdir = $(includedir)/rtems
+
+include_rtems_HEADERS = rtems/asm.h
+
+include_rtems_scoredir = $(includedir)/rtems/score
+
+include_rtems_score_HEADERS =
+include_rtems_score_HEADERS += rtems/score/cpu.h
+include_rtems_score_HEADERS += rtems/score/cpuatomic.h
+include_rtems_score_HEADERS += rtems/score/cpu_asm.h
+include_rtems_score_HEADERS += rtems/score/types.h
+include_rtems_score_HEADERS += rtems/score/epiphany.h
+include_rtems_score_HEADERS += rtems/score/epiphany-utility.h
+
+noinst_LIBRARIES = libscorecpu.a
+
+libscorecpu_a_SOURCES  = cpu.c
+libscorecpu_a_SOURCES += epiphany-exception-handler.S
+libscorecpu_a_SOURCES += epiphany-context-switch.S
+libscorecpu_a_SOURCES += epiphany-context-initialize.c
+
+libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS)
+
+all-local: $(PREINSTALL_FILES)
+
+include $(srcdir)/preinstall.am
+include $(top_srcdir)/automake/local.am
diff --git a/cpukit/score/cpu/epiphany/cpu.c b/cpukit/score/cpu/epiphany/cpu.c
new file mode 100644
index 0000000..9ec8f94
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/cpu.c
@@ -0,0 +1,114 @@
+/*
+ * Epiphany CPU Dependent Source
+ *
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/wkspace.h>
+#include <bsp/linker-symbols.h>
+#include <rtems/score/cpu.h>
+
+void _init(void);
+void _fini(void);
+
+void _init(void)
+{
+  /* Do nothing */
+}
+
+void _fini(void)
+{
+  /* Do nothing */
+}
+
+void _CPU_Exception_frame_print (const CPU_Exception_frame *ctx)
+{
+  /* Do nothing */
+}
+/**
+ * @brief Performs processor dependent initialization.
+ */
+void _CPU_Initialize(void)
+{
+  /* Do nothing */
+}
+
+void _CPU_ISR_Set_level(uint32_t level)
+{
+  /* Do nothing */
+}
+
+uint32_t  _CPU_ISR_Get_level( void )
+{
+  /* Do nothing */
+  return 0;
+}
+
+void _CPU_ISR_install_raw_handler(
+  uint32_t   vector,
+  proc_ptr    new_handler,
+  proc_ptr   *old_handler
+)
+{
+  /* Do nothing */
+}
+
+void _CPU_ISR_install_vector(
+  uint32_t    vector,
+  proc_ptr    new_handler,
+  proc_ptr   *old_handler
+)
+{
+  /* Do nothing */
+}
+
+void _CPU_Install_interrupt_stack( void )
+{
+  /* Do nothing */
+}
+
+CPU_Counter_ticks _CPU_Counter_read( void )
+{
+  static CPU_Counter_ticks counter;
+
+  CPU_Counter_ticks snapshot;
+
+  snapshot = counter;
+  counter = snapshot + 1;
+
+  return snapshot;
+}
+
+void _CPU_Thread_Idle_body( uintptr_t ignored )
+{
+  do {
+    __asm__ __volatile__ ("idle");
+  } while (1);
+}
diff --git a/cpukit/score/cpu/epiphany/epiphany-context-initialize.c b/cpukit/score/cpu/epiphany/epiphany-context-initialize.c
new file mode 100644
index 0000000..b47871e
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/epiphany-context-initialize.c
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * COPYRIGHT (c) 1989-2006.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <string.h>
+
+#include <rtems/score/cpu.h>
+#include <rtems/score/interr.h>
+
+void _CPU_Context_Initialize(
+  Context_Control *context,
+  void *stack_area_begin,
+  size_t stack_area_size,
+  uint32_t new_level,
+  void (*entry_point)( void ),
+  bool is_fp,
+  void *tls_area
+)
+{
+  uintptr_t stack = ((uintptr_t) stack_area_begin);
+  uint32_t sr, iret;
+
+  /* Account for red-zone */
+  uintptr_t stack_high = stack + stack_area_size - EPIPHANY_GCC_RED_ZONE_SIZE;
+
+  asm volatile ("movfs %0, status \n" : "=r" (sr):);
+  asm volatile ("movfs %0, iret \n" : "=r" (iret):);
+
+  memset(context, 0, sizeof(*context));
+
+  context->r[11] = stack_high;
+  context->r[13] = stack_high;
+  context->r[14] = (uintptr_t) entry_point;
+  context->status = sr;
+  context->iret = iret;
+}
diff --git a/cpukit/score/cpu/epiphany/epiphany-context-switch.S b/cpukit/score/cpu/epiphany/epiphany-context-switch.S
new file mode 100644
index 0000000..6d08389
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/epiphany-context-switch.S
@@ -0,0 +1,216 @@
+/*
+ * Epiphany CPU Dependent Source
+ *
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems/asm.h>
+
+.section .text,"ax"
+.align 4
+
+PUBLIC(_CPU_Context_switch)
+PUBLIC(_CPU_Context_restore)
+PUBLIC(_CPU_Context_restore_fp)
+PUBLIC(_CPU_Context_save_fp)
+PUBLIC(restore)
+
+SYM(_CPU_Context_switch):
+  /* Disable interrupts and store all registers */
+  gid
+
+  str r0,  [r0]
+  str r1,  [r0,1]
+  str r2,  [r0,2]
+  str r3,  [r0,3]
+  str r4,  [r0,4]
+  str r5,  [r0,5]
+  str r6,  [r0,6]
+  str r7,  [r0,7]
+  str r8,  [r0,8]
+  str r9,  [r0,9]
+  str r10, [r0,10]
+  str fp,  [r0,11]
+  str r12, [r0,12]
+  str sp,  [r0,13]
+  str lr,  [r0,14]
+  str r15, [r0,15]
+  str r16, [r0,16]
+  str r17, [r0,17]
+  str r18, [r0,18]
+  str r19, [r0,19]
+  str r20, [r0,20]
+  str r21, [r0,21]
+  str r22, [r0,22]
+  str r23, [r0,23]
+  str r24, [r0,24]
+  str r25, [r0,25]
+  str r26, [r0,26]
+  str r27, [r0,27]
+  str r28, [r0,28]
+  str r29, [r0,29]
+  str r30, [r0,30]
+  str r31, [r0,31]
+  str r32, [r0,32]
+  str r33, [r0,33]
+  str r34, [r0,34]
+  str r35, [r0,35]
+  str r36, [r0,36]
+  str r37, [r0,37]
+  str r38, [r0,38]
+  str r39, [r0,39]
+  str r40, [r0,40]
+  str r41, [r0,41]
+  str r42, [r0,42]
+  str r43, [r0,43]
+  str r44, [r0,44]
+  str r45, [r0,45]
+  str r46, [r0,46]
+  str r47, [r0,47]
+  str r48, [r0,48]
+  str r49, [r0,49]
+  str r50, [r0,50]
+  str r51, [r0,51]
+  str r52, [r0,52]
+  str r53, [r0,53]
+  str r54, [r0,54]
+  str r55, [r0,55]
+  str r56, [r0,56]
+  str r57, [r0,57]
+  str r58, [r0,58]
+  str r59, [r0,59]
+  str r60, [r0,60]
+  str r61, [r0,61]
+  str r62, [r0,62]
+  str r63, [r0,63]
+
+  /* Store status register */
+  movfs r27, status
+  str r27, [r0,64]
+
+  /* Store config register */
+  movfs r27, config
+  str r27, [r0,65]
+
+  /* Store interrupt return address register */
+  movfs r27, iret
+  str r27, [r0,66]
+
+SYM(restore):
+
+  /* r1 contains buffer address, skip it */
+  ldr r2,  [r1,2]
+  ldr r3,  [r1,3]
+  ldr r4,  [r1,4]
+  ldr r5,  [r1,5]
+  ldr r6,  [r1,6]
+  ldr r7,  [r1,7]
+  ldr r8,  [r1,8]
+  ldr r9,  [r1,9]
+  ldr r10, [r1,10]
+  ldr fp,  [r1,11]
+  ldr r12, [r1,12]
+  ldr sp,  [r1,13]
+  ldr lr,  [r1,14]
+  ldr r15, [r1,15]
+  ldr r16, [r1,16]
+  ldr r17, [r1,17]
+  ldr r18, [r1,18]
+  ldr r19, [r1,19]
+  ldr r20, [r1,20]
+  ldr r21, [r1,21]
+  ldr r22, [r1,22]
+  ldr r23, [r1,23]
+  ldr r24, [r1,24]
+  ldr r25, [r1,25]
+  ldr r26, [r1,26]
+  ldr r27, [r1,27]
+  ldr r32, [r1,32]
+  ldr r33, [r1,33]
+  ldr r34, [r1,34]
+  ldr r35, [r1,35]
+  ldr r36, [r1,36]
+  ldr r37, [r1,37]
+  ldr r38, [r1,38]
+  ldr r39, [r1,39]
+  ldr r40, [r1,40]
+  ldr r41, [r1,41]
+  ldr r42, [r1,42]
+  ldr r43, [r1,43]
+  ldr r44, [r1,44]
+  ldr r45, [r1,45]
+  ldr r46, [r1,46]
+  ldr r47, [r1,47]
+  ldr r48, [r1,48]
+  ldr r49, [r1,49]
+  ldr r50, [r1,50]
+  ldr r51, [r1,51]
+  ldr r52, [r1,52]
+  ldr r53, [r1,53]
+  ldr r54, [r1,54]
+  ldr r55, [r1,55]
+  ldr r56, [r1,56]
+  ldr r57, [r1,57]
+  ldr r58, [r1,58]
+  ldr r59, [r1,59]
+  ldr r60, [r1,60]
+  ldr r61, [r1,61]
+  ldr r62, [r1,62]
+  ldr r63, [r1,63]
+
+  /* Load status register */
+  ldr r0, [r1,64]
+  movts status, r0
+
+  /* Load config register */
+  ldr r0, [r1,65]
+  movts config, r0
+
+  /* Load interrupt return address register */
+  ldr r0,[r1,66]
+  movts iret, r0
+
+  ldr r0,[r1]
+  ldr r1,[r1,1]
+
+  /* Enable interrupts and return */
+  gie
+  jr lr
+
+SYM(_CPU_Context_restore):
+  mov     r1, r0
+  b       _restore
+  nop
+
+/* No FP support for Epiphany yet */
+SYM(_CPU_Context_restore_fp):
+  nop
+
+ SYM(_CPU_Context_save_fp):
+  nop
diff --git a/cpukit/score/cpu/epiphany/epiphany-exception-handler.S b/cpukit/score/cpu/epiphany/epiphany-exception-handler.S
new file mode 100644
index 0000000..bf5e8d8
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/epiphany-exception-handler.S
@@ -0,0 +1,304 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreCPU
+ *
+ * @brief Epiphany exception support implementation.
+ */
+
+/*
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/cpu.h>
+
+#include <rtems/asm.h>
+#include <rtems/score/percpu.h>
+
+EXTERN(bsp_start_vector_table_begin)
+EXTERN(_Thread_Dispatch)
+PUBLIC(ISR_Handler)
+
+.section .text, "ax"
+.align 4
+TYPE_FUNC(ISR_Handler)
+SYM(ISR_Handler):
+  /* Reserve space for CPU_Exception_frame */
+  sub sp, sp, #(CPU_EXCEPTION_FRAME_SIZE)
+
+  str r0,  [sp]
+  str r1,  [sp,1]
+  str r2,  [sp,2]
+  str r3,  [sp,3]
+  str r4,  [sp,4]
+  str r5,  [sp,5]
+  str r6,  [sp,6]
+  str r7,  [sp,7]
+  str r8,  [sp,8]
+  str r9,  [sp,9]
+  str r10, [sp,10]
+  str fp,  [sp,11]
+  str r12, [sp,12]
+
+  /* Save interrupted task stack pointer */
+  add r1, sp, #(CPU_EXCEPTION_FRAME_SIZE + 8)
+  str r1,[sp,13]
+
+  str lr,  [sp,14]
+  str r15, [sp,15]
+  str r16, [sp,16]
+  str r17, [sp,17]
+  str r18, [sp,18]
+  str r19, [sp,19]
+  str r20, [sp,20]
+  str r21, [sp,21]
+  str r22, [sp,22]
+  str r23, [sp,23]
+  str r24, [sp,24]
+  str r25, [sp,25]
+  str r26, [sp,26]
+  str r27, [sp,27]
+  str r28, [sp,28]
+  str r29, [sp,29]
+  str r30, [sp,30]
+  str r31, [sp,31]
+  str r32, [sp,32]
+  str r33, [sp,33]
+  str r34, [sp,34]
+  str r35, [sp,35]
+  str r36, [sp,36]
+  str r37, [sp,37]
+  str r38, [sp,38]
+  str r39, [sp,39]
+  str r40, [sp,40]
+  str r41, [sp,41]
+  str r42, [sp,42]
+  str r43, [sp,43]
+  str r44, [sp,44]
+  str r45, [sp,45]
+  str r46, [sp,46]
+  str r47, [sp,47]
+  str r48, [sp,48]
+  str r49, [sp,49]
+  str r50, [sp,50]
+  str r51, [sp,51]
+  str r52, [sp,52]
+  str r53, [sp,53]
+  str r54, [sp,54]
+  str r55, [sp,55]
+  str r56, [sp,56]
+  str r57, [sp,57]
+  str r58, [sp,58]
+  str r59, [sp,59]
+  str r60, [sp,60]
+  str r61, [sp,61]
+  /* r62 and r63 are saved from start.S interrupt entry
+   * and hold vector number and _ISR_Handler address repsectively.
+   */
+
+  /* Save status register */
+  movfs r1,status
+  str r1, [sp,62]
+
+  /* Save config register */
+  movfs r1,config
+  str r1, [sp,63]
+
+  /* Save interrupt return address register */
+  movfs r1,iret
+  str r1, [sp,64]
+
+  mov  r33, %low(__Per_CPU_Information)
+  movt r33, %high(__Per_CPU_Information)
+
+	add  r6, r33, #(PER_CPU_ISR_NEST_LEVEL)
+	add  r8, r33, #(PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
+
+  /* Increment nesting level and disable thread dispatch */
+  ldr  r5, [r6]
+  ldr  r7, [r8]
+  add  r5, r5, #1
+  add  r7, r7, #1
+  str  r5, [r6]
+  str  r7, [r8]
+
+  /* Keep sp (Exception frame address) in r32 - Callee saved */
+  mov  r32, sp
+
+  /* Keep __Per_CPU_Information address in r33 - Callee saved */
+  mov  r33, r18
+
+  /* Call the exception handler from vector table.
+   * First function arg for C handler is vector number,
+   * and the second is a pointer to exception frame.
+   */
+  mov  r0,  r62
+  mov  r1,  sp
+
+  mov  r27, r62
+  lsl  r27, r27, #2
+  mov  r26, %low(_bsp_start_vector_table_begin)
+  movt r15, #0
+  add  r27, r27, r26
+  ldr  r27, [r27]
+
+  /* Do not switch stacks if we are in a nested interrupt. At
+   * this point r5 should be holding ISR_NEST_LEVEL value.
+   */
+  sub  r37, r5, #1
+  bgtu jump_to_c_handler
+
+  /* Switch to RTEMS dedicated interrupt stack */
+  add     sp, r18, #(PER_CPU_INTERRUPT_STACK_HIGH)
+  ldr     sp, [sp]
+
+jump_to_c_handler:
+  jalr r27
+
+  /* Switch back to the interrupted task stack */
+  mov  sp, r32
+
+  /* Get the address of __Per_CPU_Information */
+  mov r18, r33
+
+  /* Decrement nesting level and enable multitasking */
+  add  r6, r18, #(PER_CPU_ISR_NEST_LEVEL)
+	add  r8, r18, #(PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
+
+  ldr     r5, [r6]
+  ldr     r7, [r8]
+  sub     r5, r5, #1
+  sub     r7, r7, #1
+  str     r5, [r6]
+  str     r7, [r8]
+
+  /* Check if _ISR_Nest_level > 0 */
+  sub     r37, r5, #0
+  bgtu    exception_frame_restore
+
+  /* Check if _Thread_Dispatch_disable_level > 0 */
+  sub     r37, r7, #0
+  bgtu    exception_frame_restore
+
+  /* Check if dispatch needed */
+  add     r31, r18, #(PER_CPU_DISPATCH_NEEDED)
+  ldr     r31, [r31]
+
+  sub     r35, r31, #0
+  beq     exception_frame_restore
+
+  mov     r35, %low(__Thread_Dispatch)
+  movt    r35, %high(__Thread_Dispatch)
+  jalr    r35
+
+exception_frame_restore:
+
+  ldr r1,  [sp,1]
+  ldr r2,  [sp,2]
+  ldr r3,  [sp,3]
+  ldr r4,  [sp,4]
+  ldr r5,  [sp,5]
+  ldr r6,  [sp,6]
+  ldr r7,  [sp,7]
+  ldr r8,  [sp,8]
+  ldr r9,  [sp,9]
+  ldr r10, [sp,10]
+  ldr fp,  [sp,11]
+  ldr r12, [sp,12]
+  ldr lr,  [sp,14]
+  ldr r15, [sp,15]
+  ldr r16, [sp,16]
+  ldr r17, [sp,17]
+  ldr r18, [sp,18]
+  ldr r19, [sp,19]
+  ldr r20, [sp,20]
+  ldr r21, [sp,21]
+  ldr r22, [sp,22]
+  ldr r23, [sp,23]
+  ldr r24, [sp,24]
+  ldr r25, [sp,25]
+  ldr r26, [sp,26]
+  ldr r27, [sp,27]
+  ldr r28, [sp,28]
+  ldr r29, [sp,29]
+  ldr r30, [sp,30]
+  ldr r31, [sp,31]
+  ldr r32, [sp,32]
+  ldr r34, [sp,34]
+  ldr r36, [sp,36]
+  ldr r38, [sp,38]
+  ldr r39, [sp,39]
+  ldr r40, [sp,40]
+  ldr r41, [sp,41]
+  ldr r42, [sp,42]
+  ldr r43, [sp,43]
+  ldr r44, [sp,44]
+  ldr r45, [sp,45]
+  ldr r46, [sp,46]
+  ldr r47, [sp,47]
+  ldr r48, [sp,48]
+  ldr r49, [sp,49]
+  ldr r50, [sp,50]
+  ldr r51, [sp,51]
+  ldr r52, [sp,52]
+  ldr r53, [sp,53]
+  ldr r54, [sp,54]
+  ldr r55, [sp,55]
+  ldr r56, [sp,56]
+  ldr r57, [sp,57]
+  ldr r58, [sp,58]
+  ldr r59, [sp,59]
+  ldr r60, [sp,60]
+  ldr r61, [sp,61]
+
+  /* Restore status register */
+  ldr r0,[sp,62]
+  movts status, r0
+
+  /* Restore config register */
+  ldr r0, [sp,63]
+  movts config, r0
+
+  /* Restore interrupt return address register */
+  ldr   r0, [sp,64]
+  movts iret, r0
+
+  ldr r0,[sp]
+
+  /* Restore interrupted task's stack pointer */
+  ldr sp, [sp,13]
+
+  /* r62 and r63 are saved from start.S interrupt entry
+   * and hold vector number and _ISR_Handler address repsectively.
+   */
+  ldr r62, [sp, -8]
+  ldr r63, [sp, -4]
+
+  /* return from interrupt */
+  rti
diff --git a/cpukit/score/cpu/epiphany/preinstall.am b/cpukit/score/cpu/epiphany/preinstall.am
new file mode 100644
index 0000000..0250d12
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/preinstall.am
@@ -0,0 +1,52 @@
+## Automatically generated by ampolish3 - Do not edit
+
+if AMPOLISH3
+$(srcdir)/preinstall.am: Makefile.am
+	$(AMPOLISH3) $(srcdir)/Makefile.am > $(srcdir)/preinstall.am
+endif
+
+PREINSTALL_DIRS =
+DISTCLEANFILES += $(PREINSTALL_DIRS)
+
+all-am: $(PREINSTALL_FILES)
+
+PREINSTALL_FILES =
+CLEANFILES += $(PREINSTALL_FILES)
+
+$(PROJECT_INCLUDE)/rtems/$(dirstamp):
+	@$(MKDIR_P) $(PROJECT_INCLUDE)/rtems
+	@: > $(PROJECT_INCLUDE)/rtems/$(dirstamp)
+PREINSTALL_DIRS += $(PROJECT_INCLUDE)/rtems/$(dirstamp)
+
+$(PROJECT_INCLUDE)/rtems/asm.h: rtems/asm.h $(PROJECT_INCLUDE)/rtems/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/asm.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/asm.h
+
+$(PROJECT_INCLUDE)/rtems/score/$(dirstamp):
+	@$(MKDIR_P) $(PROJECT_INCLUDE)/rtems/score
+	@: > $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+PREINSTALL_DIRS += $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+
+$(PROJECT_INCLUDE)/rtems/score/cpu.h: rtems/score/cpu.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpu.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpu.h
+
+$(PROJECT_INCLUDE)/rtems/score/cpuatomic.h: rtems/score/cpuatomic.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h
+
+$(PROJECT_INCLUDE)/rtems/score/cpu_asm.h: rtems/score/cpu_asm.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpu_asm.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpu_asm.h
+
+$(PROJECT_INCLUDE)/rtems/score/types.h: rtems/score/types.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/types.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/types.h
+
+$(PROJECT_INCLUDE)/rtems/score/epiphany.h: rtems/score/epiphany.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/epiphany.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/epiphany.h
+
+$(PROJECT_INCLUDE)/rtems/score/epiphany-utility.h: rtems/score/epiphany-utility.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/epiphany-utility.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/epiphany-utility.h
diff --git a/cpukit/score/cpu/epiphany/rtems/asm.h b/cpukit/score/cpu/epiphany/rtems/asm.h
new file mode 100644
index 0000000..87e0cca
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/asm.h
@@ -0,0 +1,120 @@
+/**
+ * @file rtems/asm.h
+ *
+ *  This include file attempts to address the problems
+ *  caused by incompatible flavors of assemblers and
+ *  toolsets.  It primarily addresses variations in the
+ *  use of leading underscores on symbols and the requirement
+ *  that register names be preceded by a %.
+ */
+
+/*
+ *  NOTE: The spacing in the use of these macros
+ *        is critical to them working as advertised.
+ *
+ *  This file is based on similar code found in newlib available
+ *  from ftp.cygnus.com.  The file which was used had no copyright
+ *  notice.  This file is freely distributable as long as the source
+ *  of the file is noted.  This file is:
+ *
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ *
+ * COPYRIGHT (c) 1994-1997.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __EPIPHANY_ASM_H
+#define __EPIPHANY_ASM_H
+
+/*
+ *  Indicate we are in an assembly file and get the basic CPU definitions.
+ */
+
+#ifndef ASM
+#define ASM
+#endif
+#include <rtems/score/cpuopts.h>
+#include <rtems/score/epiphany.h>
+
+/*
+ *  Recent versions of GNU cpp define variables which indicate the
+ *  need for underscores and percents.  If not using GNU cpp or
+ *  the version does not support this, then you will obviously
+ *  have to define these as appropriate.
+ */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+/* ANSI concatenation macros.  */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels.  */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+/* Use the right prefix for registers.  */
+
+#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
+
+/*
+ *  define macros for all of the registers on this CPU
+ *
+ *  EXAMPLE:     #define d0 REG (d0)
+ */
+
+/*
+ *  Define macros to handle section beginning and ends.
+ */
+#define BEGIN_CODE_DCL .text
+#define END_CODE_DCL
+#define BEGIN_DATA_DCL .data
+#define END_DATA_DCL
+#define BEGIN_CODE .text
+#define END_CODE
+#define BEGIN_DATA
+#define END_DATA
+#define BEGIN_BSS
+#define END_BSS
+#define END
+
+/*
+ *  Following must be tailor for a particular flavor of the C compiler.
+ *  They may need to put underscores in front of the symbols.
+ */
+
+#define PUBLIC(sym)    .global SYM (sym)
+#define EXTERN(sym)    .extern SYM (sym)
+#define TYPE_FUNC(sym) .type SYM (sym), %function
+
+#endif
diff --git a/cpukit/score/cpu/epiphany/rtems/score/cpu.h b/cpukit/score/cpu/epiphany/rtems/score/cpu.h
new file mode 100644
index 0000000..9958599
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/score/cpu.h
@@ -0,0 +1,1185 @@
+/**
+ * @file rtems/score/cpu.h
+ */
+
+/*
+ *
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * COPYRIGHT (c) 1989-1999.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _EPIPHANY_CPU_H
+#define _EPIPHANY_CPU_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems/score/epiphany.h> /* pick up machine definitions */
+#include <rtems/score/types.h>
+#ifndef ASM
+#include <rtems/bspIo.h>
+#include <stdint.h>
+#include <stdio.h> /* for printk */
+#endif
+
+/* conditional compilation parameters */
+
+/*
+ *  Should the calls to _Thread_Enable_dispatch be inlined?
+ *
+ *  If TRUE, then they are inlined.
+ *  If FALSE, then a subroutine call is made.
+ *
+ *  Basically this is an example of the classic trade-off of size
+ *  versus speed.  Inlining the call (TRUE) typically increases the
+ *  size of RTEMS while speeding up the enabling of dispatching.
+ *  [NOTE: In general, the _Thread_Dispatch_disable_level will
+ *  only be 0 or 1 unless you are in an interrupt handler and that
+ *  interrupt handler invokes the executive.]  When not inlined
+ *  something calls _Thread_Enable_dispatch which in turns calls
+ *  _Thread_Dispatch.  If the enable dispatch is inlined, then
+ *  one subroutine call is avoided entirely.]
+ *
+ */
+
+#define CPU_INLINE_ENABLE_DISPATCH       FALSE
+
+/*
+ *  Should the body of the search loops in _Thread_queue_Enqueue_priority
+ *  be unrolled one time?  In unrolled each iteration of the loop examines
+ *  two "nodes" on the chain being searched.  Otherwise, only one node
+ *  is examined per iteration.
+ *
+ *  If TRUE, then the loops are unrolled.
+ *  If FALSE, then the loops are not unrolled.
+ *
+ *  The primary factor in making this decision is the cost of disabling
+ *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
+ *  body of the loop.  On some CPUs, the flash is more expensive than
+ *  one iteration of the loop body.  In this case, it might be desirable
+ *  to unroll the loop.  It is important to note that on some CPUs, this
+ *  code is the longest interrupt disable period in RTEMS.  So it is
+ *  necessary to strike a balance when setting this parameter.
+ *
+ */
+
+#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
+
+/*
+ *  Does RTEMS manage a dedicated interrupt stack in software?
+ *
+ *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
+ *  If FALSE, nothing is done.
+ *
+ *  If the CPU supports a dedicated interrupt stack in hardware,
+ *  then it is generally the responsibility of the BSP to allocate it
+ *  and set it up.
+ *
+ *  If the CPU does not support a dedicated interrupt stack, then
+ *  the porter has two options: (1) execute interrupts on the
+ *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
+ *  interrupt stack.
+ *
+ *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
+ *  possible that both are FALSE for a particular CPU.  Although it
+ *  is unclear what that would imply about the interrupt processing
+ *  procedure on that CPU.
+ *
+ *  Currently, for epiphany port, _ISR_Handler is responsible for switching to
+ *  RTEMS dedicated interrupt task.
+ *
+ */
+
+#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
+
+/*
+ *  Does this CPU have hardware support for a dedicated interrupt stack?
+ *
+ *  If TRUE, then it must be installed during initialization.
+ *  If FALSE, then no installation is performed.
+ *
+ *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
+ *
+ *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
+ *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
+ *  possible that both are FALSE for a particular CPU.  Although it
+ *  is unclear what that would imply about the interrupt processing
+ *  procedure on that CPU.
+ *
+ */
+
+#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
+
+/*
+ *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
+ *
+ *  If TRUE, then the memory is allocated during initialization.
+ *  If FALSE, then the memory is allocated during initialization.
+ *
+ *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
+ *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
+ *
+ */
+
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
+
+/*
+ *  Does the RTEMS invoke the user's ISR with the vector number and
+ *  a pointer to the saved interrupt frame (1) or just the vector
+ *  number (0)?
+ *
+ */
+
+#define CPU_ISR_PASSES_FRAME_POINTER 1
+
+/*
+ *  Does the CPU have hardware floating point?
+ *
+ *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
+ *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
+ *
+ *  If there is a FP coprocessor such as the i387 or mc68881, then
+ *  the answer is TRUE.
+ *
+ *  The macro name "epiphany_HAS_FPU" should be made CPU specific.
+ *  It indicates whether or not this CPU model has FP support.  For
+ *  example, it would be possible to have an i386_nofp CPU model
+ *  which set this to false to indicate that you have an i386 without
+ *  an i387 and wish to leave floating point support out of RTEMS.
+ *
+ *  The CPU_SOFTWARE_FP is used to indicate whether or not there
+ *  is software implemented floating point that must be context
+ *  switched.  The determination of whether or not this applies
+ *  is very tool specific and the state saved/restored is also
+ *  compiler specific.
+ *
+ *  epiphany Specific Information:
+ *
+ *  At this time there are no implementations of Epiphany that are
+ *  expected to implement floating point.
+ */
+
+#define CPU_HARDWARE_FP     FALSE
+#define CPU_SOFTWARE_FP     FALSE
+
+/*
+ *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
+ *
+ *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
+ *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
+ *
+ *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
+ *
+ */
+
+#define CPU_ALL_TASKS_ARE_FP     FALSE
+
+/*
+ *  Should the IDLE task have a floating point context?
+ *
+ *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
+ *  and it has a floating point context which is switched in and out.
+ *  If FALSE, then the IDLE task does not have a floating point context.
+ *
+ *  Setting this to TRUE negatively impacts the time required to preempt
+ *  the IDLE task from an interrupt because the floating point context
+ *  must be saved as part of the preemption.
+ *
+ */
+
+#define CPU_IDLE_TASK_IS_FP      FALSE
+
+/*
+ *  Should the saving of the floating point registers be deferred
+ *  until a context switch is made to another different floating point
+ *  task?
+ *
+ *  If TRUE, then the floating point context will not be stored until
+ *  necessary.  It will remain in the floating point registers and not
+ *  disturned until another floating point task is switched to.
+ *
+ *  If FALSE, then the floating point context is saved when a floating
+ *  point task is switched out and restored when the next floating point
+ *  task is restored.  The state of the floating point registers between
+ *  those two operations is not specified.
+ *
+ *  If the floating point context does NOT have to be saved as part of
+ *  interrupt dispatching, then it should be safe to set this to TRUE.
+ *
+ *  Setting this flag to TRUE results in using a different algorithm
+ *  for deciding when to save and restore the floating point context.
+ *  The deferred FP switch algorithm minimizes the number of times
+ *  the FP context is saved and restored.  The FP context is not saved
+ *  until a context switch is made to another, different FP task.
+ *  Thus in a system with only one FP task, the FP context will never
+ *  be saved or restored.
+ *
+ */
+
+#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
+
+/*
+ *  Does this port provide a CPU dependent IDLE task implementation?
+ *
+ *  If TRUE, then the routine _CPU_Thread_Idle_body
+ *  must be provided and is the default IDLE thread body instead of
+ *  _CPU_Thread_Idle_body.
+ *
+ *  If FALSE, then use the generic IDLE thread body if the BSP does
+ *  not provide one.
+ *
+ *  This is intended to allow for supporting processors which have
+ *  a low power or idle mode.  When the IDLE thread is executed, then
+ *  the CPU can be powered down.
+ *
+ *  The order of precedence for selecting the IDLE thread body is:
+ *
+ *    1.  BSP provided
+ *    2.  CPU dependent (if provided)
+ *    3.  generic (if no BSP and no CPU dependent)
+ *
+ */
+
+#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
+
+/*
+ *  Does the stack grow up (toward higher addresses) or down
+ *  (toward lower addresses)?
+ *
+ *  If TRUE, then the grows upward.
+ *  If FALSE, then the grows toward smaller addresses.
+ *
+ */
+
+#define CPU_STACK_GROWS_UP               FALSE
+
+/*
+ *  The following is the variable attribute used to force alignment
+ *  of critical RTEMS structures.  On some processors it may make
+ *  sense to have these aligned on tighter boundaries than
+ *  the minimum requirements of the compiler in order to have as
+ *  much of the critical data area as possible in a cache line.
+ *
+ *  The placement of this macro in the declaration of the variables
+ *  is based on the syntactically requirements of the GNU C
+ *  "__attribute__" extension.  For example with GNU C, use
+ *  the following to force a structures to a 32 byte boundary.
+ *
+ *      __attribute__ ((aligned (32)))
+ *
+ *  NOTE:  Currently only the Priority Bit Map table uses this feature.
+ *         To benefit from using this, the data must be heavily
+ *         used so it will stay in the cache and used frequently enough
+ *         in the executive to justify turning this on.
+ *
+ */
+
+#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (64)))
+
+/*
+ *  Define what is required to specify how the network to host conversion
+ *  routines are handled.
+ *
+ *  epiphany Specific Information:
+ *
+ *  This version of RTEMS is designed specifically to run with
+ *  big endian architectures. If you want little endian, you'll
+ *  have to make the appropriate adjustments here and write
+ *  efficient routines for byte swapping. The epiphany architecture
+ *  doesn't do this very well.
+ */
+
+#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
+#define CPU_BIG_ENDIAN                           FALSE
+#define CPU_LITTLE_ENDIAN                        TRUE
+
+/*
+ *  The following defines the number of bits actually used in the
+ *  interrupt field of the task mode.  How those bits map to the
+ *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
+ *
+ */
+
+#define CPU_MODES_INTERRUPT_MASK   0x00000001
+
+/*
+ *  Processor defined structures required for cpukit/score.
+ */
+
+/*
+ * Contexts
+ *
+ *  Generally there are 2 types of context to save.
+ *     1. Interrupt registers to save
+ *     2. Task level registers to save
+ *
+ *  This means we have the following 3 context items:
+ *     1. task level context stuff::  Context_Control
+ *     2. floating point task stuff:: Context_Control_fp
+ *     3. special interrupt level context :: Context_Control_interrupt
+ *
+ *  On some processors, it is cost-effective to save only the callee
+ *  preserved registers during a task context switch.  This means
+ *  that the ISR code needs to save those registers which do not
+ *  persist across function calls.  It is not mandatory to make this
+ *  distinctions between the caller/callee saves registers for the
+ *  purpose of minimizing context saved during task switch and on interrupts.
+ *  If the cost of saving extra registers is minimal, simplicity is the
+ *  choice.  Save the same context on interrupt entry as for tasks in
+ *  this case.
+ *
+ *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
+ *  care should be used in designing the context area.
+ *
+ *  On some CPUs with hardware floating point support, the Context_Control_fp
+ *  structure will not be used or it simply consist of an array of a
+ *  fixed number of bytes.   This is done when the floating point context
+ *  is dumped by a "FP save context" type instruction and the format
+ *  is not really defined by the CPU.  In this case, there is no need
+ *  to figure out the exact format -- only the size.  Of course, although
+ *  this is enough information for RTEMS, it is probably not enough for
+ *  a debugger such as gdb.  But that is another problem.
+ *
+ *
+ */
+#ifndef ASM
+
+typedef struct {
+  uint32_t  r[64];
+
+  uint32_t status;
+  uint32_t config;
+  uint32_t iret;
+
+#ifdef RTEMS_SMP
+    /**
+     * @brief On SMP configurations the thread context must contain a boolean
+     * indicator to signal if this context is executing on a processor.
+     *
+     * This field must be updated during a context switch.  The context switch
+     * to the heir must wait until the heir context indicates that it is no
+     * longer executing on a processor.  The context switch must also check if
+     * a thread dispatch is necessary to honor updates of the heir thread for
+     * this processor.  This indicator must be updated using an atomic test and
+     * set operation to ensure that at most one processor uses the heir
+     * context at the same time.
+     *
+     * @code
+     * void _CPU_Context_switch(
+     *   Context_Control *executing,
+     *   Context_Control *heir
+     * )
+     * {
+     *   save( executing );
+     *
+     *   executing->is_executing = false;
+     *   memory_barrier();
+     *
+     *   if ( test_and_set( &heir->is_executing ) ) {
+     *     do {
+     *       Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
+     *
+     *       if ( cpu_self->dispatch_necessary ) {
+     *         heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
+     *       }
+     *     } while ( test_and_set( &heir->is_executing ) );
+     *   }
+     *
+     *   restore( heir );
+     * }
+     * @endcode
+     */
+    volatile bool is_executing;
+#endif
+} Context_Control;
+
+#define _CPU_Context_Get_SP( _context ) \
+  (_context)->r[13]
+
+typedef struct {
+  /** FPU registers are listed here */
+  double  some_float_register;
+} Context_Control_fp;
+
+typedef Context_Control CPU_Interrupt_frame;
+
+/*
+ *  The size of the floating point context area.  On some CPUs this
+ *  will not be a "sizeof" because the format of the floating point
+ *  area is not defined -- only the size is.  This is usually on
+ *  CPUs with a "floating point save context" instruction.
+ *
+ *  epiphany Specific Information:
+ *
+ */
+
+#define CPU_CONTEXT_FP_SIZE  0
+SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
+
+/*
+ *  Amount of extra stack (above minimum stack size) required by
+ *  MPCI receive server thread.  Remember that in a multiprocessor
+ *  system this thread must exist and be able to process all directives.
+ *
+ */
+
+#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
+
+/*
+ *  Should be large enough to run all RTEMS tests.  This insures
+ *  that a "reasonable" small application should not have any problems.
+ *
+ */
+
+#define CPU_STACK_MINIMUM_SIZE  4096
+
+/*
+ *  CPU's worst alignment requirement for data types on a byte boundary.  This
+ *  alignment does not take into account the requirements for the stack.
+ *
+ */
+
+#define CPU_ALIGNMENT 8
+
+/*
+ *  This is defined if the port has a special way to report the ISR nesting
+ *  level.  Most ports maintain the variable _ISR_Nest_level.
+ */
+#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
+
+/*
+ *  This number corresponds to the byte alignment requirement for the
+ *  heap handler.  This alignment requirement may be stricter than that
+ *  for the data types alignment specified by CPU_ALIGNMENT.  It is
+ *  common for the heap to follow the same alignment requirement as
+ *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
+ *  then this should be set to CPU_ALIGNMENT.
+ *
+ *  NOTE:  This does not have to be a power of 2 although it should be
+ *         a multiple of 2 greater than or equal to 2.  The requirement
+ *         to be a multiple of 2 is because the heap uses the least
+ *         significant field of the front and back flags to indicate
+ *         that a block is in use or free.  So you do not want any odd
+ *         length blocks really putting length data in that bit.
+ *
+ *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
+ *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
+ *         elements allocated from the heap meet all restrictions.
+ *
+ */
+
+#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
+
+/*
+ *  This number corresponds to the byte alignment requirement for memory
+ *  buffers allocated by the partition manager.  This alignment requirement
+ *  may be stricter than that for the data types alignment specified by
+ *  CPU_ALIGNMENT.  It is common for the partition to follow the same
+ *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
+ *  enough for the partition, then this should be set to CPU_ALIGNMENT.
+ *
+ *  NOTE:  This does not have to be a power of 2.  It does have to
+ *         be greater or equal to than CPU_ALIGNMENT.
+ *
+ */
+
+#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
+
+/*
+ *  This number corresponds to the byte alignment requirement for the
+ *  stack.  This alignment requirement may be stricter than that for the
+ *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
+ *  is strict enough for the stack, then this should be set to 0.
+ *
+ *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
+ *
+ */
+
+#define CPU_STACK_ALIGNMENT        8
+
+/* ISR handler macros */
+
+/*
+ *  Support routine to initialize the RTEMS vector table after it is allocated.
+ *
+ *  NO_CPU Specific Information:
+ *
+ *  XXX document implementation including references if appropriate
+ */
+
+#define _CPU_Initialize_vectors()
+
+/*
+ *  Disable all interrupts for an RTEMS critical section.  The previous
+ *  level is returned in _level.
+ *
+ */
+
+static inline uint32_t epiphany_interrupt_disable( void )
+{
+  uint32_t sr;
+  __asm__ __volatile__ ("movfs %[sr], status \n" : [sr] "=r" (sr):);
+  __asm__ __volatile__("gid \n");
+  return sr;
+}
+
+static inline void epiphany_interrupt_enable(uint32_t level)
+{
+  __asm__ __volatile__("gie \n");
+  __asm__ __volatile__ ("movts status, %[level] \n" :: [level] "r" (level):);
+}
+
+#define _CPU_ISR_Disable( _level ) \
+    _level = epiphany_interrupt_disable()
+
+/*
+ *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
+ *  This indicates the end of an RTEMS critical section.  The parameter
+ *  _level is not modified.
+ *
+ */
+
+#define _CPU_ISR_Enable( _level )  \
+  epiphany_interrupt_enable( _level )
+
+/*
+ *  This temporarily restores the interrupt to _level before immediately
+ *  disabling them again.  This is used to divide long RTEMS critical
+ *  sections into two or more parts.  The parameter _level is not
+ *  modified.
+ *
+ */
+
+#define _CPU_ISR_Flash( _level ) \
+  do{ \
+      if ( (_level & 0x2) != 0 ) \
+        _CPU_ISR_Enable( _level ); \
+      epiphany_interrupt_disable(); \
+    } while(0)
+
+/*
+ *  Map interrupt level in task mode onto the hardware that the CPU
+ *  actually provides.  Currently, interrupt levels which do not
+ *  map onto the CPU in a generic fashion are undefined.  Someday,
+ *  it would be nice if these were "mapped" by the application
+ *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
+ *  8 - 255 would be available for bsp/application specific meaning.
+ *  This could be used to manage a programmable interrupt controller
+ *  via the rtems_task_mode directive.
+ *
+ *  The get routine usually must be implemented as a subroutine.
+ *
+ */
+
+void _CPU_ISR_Set_level( uint32_t level );
+
+uint32_t _CPU_ISR_Get_level( void );
+
+/* end of ISR handler macros */
+
+/* Context handler macros */
+
+/*
+ *  Initialize the context to a state suitable for starting a
+ *  task after a context restore operation.  Generally, this
+ *  involves:
+ *
+ *     - setting a starting address
+ *     - preparing the stack
+ *     - preparing the stack and frame pointers
+ *     - setting the proper interrupt level in the context
+ *     - initializing the floating point context
+ *
+ *  This routine generally does not set any unnecessary register
+ *  in the context.  The state of the "general data" registers is
+ *  undefined at task start time.
+ *
+ *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
+ *        point thread.  This is typically only used on CPUs where the
+ *        FPU may be easily disabled by software such as on the SPARC
+ *        where the PSR contains an enable FPU bit.
+ *
+ */
+
+/**
+ * @brief Account for GCC red-zone
+ *
+ * The following macro is used when initializing task's stack
+ * to account for GCC red-zone.
+ */
+
+#define EPIPHANY_GCC_RED_ZONE_SIZE 128
+
+/**
+ * @brief Initializes the CPU context.
+ *
+ * The following steps are performed:
+ *  - setting a starting address
+ *  - preparing the stack
+ *  - preparing the stack and frame pointers
+ *  - setting the proper interrupt level in the context
+ *
+ * @param[in] context points to the context area
+ * @param[in] stack_area_begin is the low address of the allocated stack area
+ * @param[in] stack_area_size is the size of the stack area in bytes
+ * @param[in] new_level is the interrupt level for the task
+ * @param[in] entry_point is the task's entry point
+ * @param[in] is_fp is set to @c true if the task is a floating point task
+ * @param[in] tls_area is the thread-local storage (TLS) area
+ */
+void _CPU_Context_Initialize(
+  Context_Control *context,
+  void *stack_area_begin,
+  size_t stack_area_size,
+  uint32_t new_level,
+  void (*entry_point)( void ),
+  bool is_fp,
+  void *tls_area
+);
+
+/*
+ *  This routine is responsible for somehow restarting the currently
+ *  executing task.  If you are lucky, then all that is necessary
+ *  is restoring the context.  Otherwise, there will need to be
+ *  a special assembly routine which does something special in this
+ *  case.  Context_Restore should work most of the time.  It will
+ *  not work if restarting self conflicts with the stack frame
+ *  assumptions of restoring a context.
+ *
+ */
+
+#define _CPU_Context_Restart_self( _the_context ) \
+   _CPU_Context_restore( (_the_context) );
+
+/*
+ *  The purpose of this macro is to allow the initial pointer into
+ *  a floating point context area (used to save the floating point
+ *  context) to be at an arbitrary place in the floating point
+ *  context area.
+ *
+ *  This is necessary because some FP units are designed to have
+ *  their context saved as a stack which grows into lower addresses.
+ *  Other FP units can be saved by simply moving registers into offsets
+ *  from the base of the context area.  Finally some FP units provide
+ *  a "dump context" instruction which could fill in from high to low
+ *  or low to high based on the whim of the CPU designers.
+ *
+ */
+
+#define _CPU_Context_Fp_start( _base, _offset ) \
+   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
+
+/*
+ *  This routine initializes the FP context area passed to it to.
+ *  There are a few standard ways in which to initialize the
+ *  floating point context.  The code included for this macro assumes
+ *  that this is a CPU in which a "initial" FP context was saved into
+ *  _CPU_Null_fp_context and it simply copies it to the destination
+ *  context passed to it.
+ *
+ *  Other models include (1) not doing anything, and (2) putting
+ *  a "null FP status word" in the correct place in the FP context.
+ *
+ */
+
+#define _CPU_Context_Initialize_fp( _destination ) \
+  { \
+   *(*(_destination)) = _CPU_Null_fp_context; \
+  }
+
+/* end of Context handler macros */
+
+/* Fatal Error manager macros */
+
+/*
+ *  This routine copies _error into a known place -- typically a stack
+ *  location or a register, optionally disables interrupts, and
+ *  halts/stops the CPU.
+ *
+ */
+
+#define _CPU_Fatal_halt(_source, _error ) \
+          printk("Fatal Error %d.%d Halted\n",_source, _error); \
+          asm("trap 3" :: "r" (_error)); \
+          for(;;)
+
+/* end of Fatal Error manager macros */
+
+/* Bitfield handler macros */
+
+/*
+ *  This routine sets _output to the bit number of the first bit
+ *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
+ *  This type may be either 16 or 32 bits wide although only the 16
+ *  least significant bits will be used.
+ *
+ *  There are a number of variables in using a "find first bit" type
+ *  instruction.
+ *
+ *    (1) What happens when run on a value of zero?
+ *    (2) Bits may be numbered from MSB to LSB or vice-versa.
+ *    (3) The numbering may be zero or one based.
+ *    (4) The "find first bit" instruction may search from MSB or LSB.
+ *
+ *  RTEMS guarantees that (1) will never happen so it is not a concern.
+ *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
+ *  _CPU_Priority_bits_index().  These three form a set of routines
+ *  which must logically operate together.  Bits in the _value are
+ *  set and cleared based on masks built by _CPU_Priority_mask().
+ *  The basic major and minor values calculated by _Priority_Major()
+ *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
+ *  to properly range between the values returned by the "find first bit"
+ *  instruction.  This makes it possible for _Priority_Get_highest() to
+ *  calculate the major and directly index into the minor table.
+ *  This mapping is necessary to ensure that 0 (a high priority major/minor)
+ *  is the first bit found.
+ *
+ *  This entire "find first bit" and mapping process depends heavily
+ *  on the manner in which a priority is broken into a major and minor
+ *  components with the major being the 4 MSB of a priority and minor
+ *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
+ *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
+ *  to the lowest priority.
+ *
+ *  If your CPU does not have a "find first bit" instruction, then
+ *  there are ways to make do without it.  Here are a handful of ways
+ *  to implement this in software:
+ *
+ *    - a series of 16 bit test instructions
+ *    - a "binary search using if's"
+ *    - _number = 0
+ *      if _value > 0x00ff
+ *        _value >>=8
+ *        _number = 8;
+ *
+ *      if _value > 0x0000f
+ *        _value >=8
+ *        _number += 4
+ *
+ *      _number += bit_set_table[ _value ]
+ *
+ *    where bit_set_table[ 16 ] has values which indicate the first
+ *      bit set
+ *
+ */
+
+  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
+#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
+#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
+
+#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
+
+#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
+  { \
+    (_output) = 0;   /* do something to prevent warnings */ \
+  }
+#endif
+
+/* end of Bitfield handler macros */
+
+/*
+ *  This routine builds the mask which corresponds to the bit fields
+ *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
+ *  for that routine.
+ *
+ */
+
+#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
+
+#define _CPU_Priority_Mask( _bit_number ) \
+    (1 << _bit_number)
+
+#endif
+
+/*
+ *  This routine translates the bit numbers returned by
+ *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
+ *  a major or minor component of a priority.  See the discussion
+ *  for that routine.
+ *
+ */
+
+#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
+
+#define _CPU_Priority_bits_index( _priority ) \
+  (_priority)
+
+#endif
+
+#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC FALSE
+#define CPU_TIMESTAMP_USE_INT64 TRUE
+#define CPU_TIMESTAMP_USE_INT64_INLINE FALSE
+
+typedef struct {
+/* There is no CPU specific per-CPU state */
+} CPU_Per_CPU_control;
+#endif /* ASM */
+
+/**
+ * Size of a pointer.
+ *
+ * This must be an integer literal that can be used by the assembler.  This
+ * value will be used to calculate offsets of structure members.  These
+ * offsets will be used in assembler code.
+ */
+#define CPU_SIZEOF_POINTER 4
+#define CPU_EXCEPTION_FRAME_SIZE 260
+#define CPU_PER_CPU_CONTROL_SIZE 0
+
+#ifndef ASM
+typedef uint16_t Priority_bit_map_Word;
+
+typedef struct {
+  uint32_t r[62];
+  uint32_t status;
+  uint32_t config;
+  uint32_t iret;
+} CPU_Exception_frame;
+
+/**
+ * @brief Prints the exception frame via printk().
+ *
+ * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
+ */
+void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
+
+
+/* end of Priority handler macros */
+
+/* functions */
+
+/*
+ *  _CPU_Initialize
+ *
+ *  This routine performs CPU dependent initialization.
+ *
+ */
+
+void _CPU_Initialize(
+  void
+);
+
+/*
+ *  _CPU_ISR_install_raw_handler
+ *
+ *  This routine installs a "raw" interrupt handler directly into the
+ *  processor's vector table.
+ *
+ */
+
+void _CPU_ISR_install_raw_handler(
+  uint32_t    vector,
+  proc_ptr    new_handler,
+  proc_ptr   *old_handler
+);
+
+/*
+ *  _CPU_ISR_install_vector
+ *
+ *  This routine installs an interrupt vector.
+ *
+ *  NO_CPU Specific Information:
+ *
+ *  XXX document implementation including references if appropriate
+ */
+
+void _CPU_ISR_install_vector(
+  uint32_t    vector,
+  proc_ptr   new_handler,
+  proc_ptr   *old_handler
+);
+
+/*
+ *  _CPU_Install_interrupt_stack
+ *
+ *  This routine installs the hardware interrupt stack pointer.
+ *
+ *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
+ *         is TRUE.
+ *
+ */
+
+void _CPU_Install_interrupt_stack( void );
+
+/*
+ *  _CPU_Thread_Idle_body
+ *
+ *  This routine is the CPU dependent IDLE thread body.
+ *
+ *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
+ *         is TRUE.
+ *
+ */
+
+void _CPU_Thread_Idle_body( uintptr_t ignored )
+  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
+
+/*
+ *  _CPU_Context_switch
+ *
+ *  This routine switches from the run context to the heir context.
+ *
+ *  epiphany Specific Information:
+ *
+ *  Please see the comments in the .c file for a description of how
+ *  this function works. There are several things to be aware of.
+ */
+
+void _CPU_Context_switch(
+  Context_Control  *run,
+  Context_Control  *heir
+);
+
+/*
+ *  _CPU_Context_restore
+ *
+ *  This routine is generally used only to restart self in an
+ *  efficient manner.  It may simply be a label in _CPU_Context_switch.
+ *
+ *  NOTE: May be unnecessary to reload some registers.
+ *
+ */
+
+void _CPU_Context_restore(
+  Context_Control *new_context
+);
+
+/*
+ *  _CPU_Context_save_fp
+ *
+ *  This routine saves the floating point context passed to it.
+ *
+ */
+
+void _CPU_Context_save_fp(
+  void **fp_context_ptr
+);
+
+/*
+ *  _CPU_Context_restore_fp
+ *
+ *  This routine restores the floating point context passed to it.
+ *
+ */
+
+void _CPU_Context_restore_fp(
+  void **fp_context_ptr
+);
+
+/*  The following routine swaps the endian format of an unsigned int.
+ *  It must be static because it is referenced indirectly.
+ *
+ *  This version will work on any processor, but if there is a better
+ *  way for your CPU PLEASE use it.  The most common way to do this is to:
+ *
+ *     swap least significant two bytes with 16-bit rotate
+ *     swap upper and lower 16-bits
+ *     swap most significant two bytes with 16-bit rotate
+ *
+ *  Some CPUs have special instructions which swap a 32-bit quantity in
+ *  a single instruction (e.g. i486).  It is probably best to avoid
+ *  an "endian swapping control bit" in the CPU.  One good reason is
+ *  that interrupts would probably have to be disabled to insure that
+ *  an interrupt does not try to access the same "chunk" with the wrong
+ *  endian.  Another good reason is that on some CPUs, the endian bit
+ *  endianness for ALL fetches -- both code and data -- so the code
+ *  will be fetched incorrectly.
+ *
+ */
+
+static inline unsigned int CPU_swap_u32(
+  unsigned int value
+)
+{
+  uint32_t   byte1, byte2, byte3, byte4, swapped;
+
+  byte4 = (value >> 24) & 0xff;
+  byte3 = (value >> 16) & 0xff;
+  byte2 = (value >> 8)  & 0xff;
+  byte1 =  value        & 0xff;
+
+  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+  return( swapped );
+}
+
+#define CPU_swap_u16( value ) \
+  (((value&0xff) << 8) | ((value >> 8)&0xff))
+
+static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
+{
+  /* TODO */
+}
+
+static inline void _CPU_Context_validate( uintptr_t pattern )
+{
+  while (1) {
+    /* TODO */
+  }
+}
+
+typedef uint32_t CPU_Counter_ticks;
+
+CPU_Counter_ticks _CPU_Counter_read( void );
+
+static inline CPU_Counter_ticks _CPU_Counter_difference(
+  CPU_Counter_ticks second,
+  CPU_Counter_ticks first
+)
+{
+  return second - first;
+}
+
+#ifdef RTEMS_SMP
+  /**
+   * @brief Performs CPU specific SMP initialization in the context of the boot
+   * processor.
+   *
+   * This function is invoked on the boot processor during system
+   * initialization.  All interrupt stacks are allocated at this point in case
+   * the CPU port allocates the interrupt stacks.  This function is called
+   * before _CPU_SMP_Start_processor() or _CPU_SMP_Finalize_initialization() is
+   * used.
+   *
+   * @return The count of physically or virtually available processors.
+   * Depending on the configuration the application may use not all processors.
+   */
+  uint32_t _CPU_SMP_Initialize( void );
+
+  /**
+   * @brief Starts a processor specified by its index.
+   *
+   * This function is invoked on the boot processor during system
+   * initialization.
+   *
+   * This function will be called after _CPU_SMP_Initialize().
+   *
+   * @param[in] cpu_index The processor index.
+   *
+   * @retval true Successful operation.
+   * @retval false Unable to start this processor.
+   */
+  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
+
+  /**
+   * @brief Performs final steps of CPU specific SMP initialization in the
+   * context of the boot processor.
+   *
+   * This function is invoked on the boot processor during system
+   * initialization.
+   *
+   * This function will be called after all processors requested by the
+   * application have been started.
+   *
+   * @param[in] cpu_count The minimum value of the count of processors
+   * requested by the application configuration and the count of physically or
+   * virtually available processors.
+   */
+  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
+
+  /**
+   * @brief Returns the index of the current processor.
+   *
+   * An architecture specific method must be used to obtain the index of the
+   * current processor in the system.  The set of processor indices is the
+   * range of integers starting with zero up to the processor count minus one.
+   */
+   uint32_t _CPU_SMP_Get_current_processor( void );
+
+  /**
+   * @brief Sends an inter-processor interrupt to the specified target
+   * processor.
+   *
+   * This operation is undefined for target processor indices out of range.
+   *
+   * @param[in] target_processor_index The target processor index.
+   */
+  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
+
+  /**
+   * @brief Broadcasts a processor event.
+   *
+   * Some architectures provide a low-level synchronization primitive for
+   * processors in a multi-processor environment.  Processors waiting for this
+   * event may go into a low-power state and stop generating system bus
+   * transactions.  This function must ensure that preceding store operations
+   * can be observed by other processors.
+   *
+   * @see _CPU_SMP_Processor_event_receive().
+   */
+  void _CPU_SMP_Processor_event_broadcast( void );
+
+  /**
+   * @brief Receives a processor event.
+   *
+   * This function will wait for the processor event and may wait forever if no
+   * such event arrives.
+   *
+   * @see _CPU_SMP_Processor_event_broadcast().
+   */
+  static inline void _CPU_SMP_Processor_event_receive( void )
+  {
+    __asm__ volatile ( "" : : : "memory" );
+  }
+
+  /**
+   * @brief Gets the is executing indicator of the thread context.
+   *
+   * @param[in] context The context.
+   */
+  static inline bool _CPU_Context_Get_is_executing(
+    const Context_Control *context
+  )
+  {
+    return context->is_executing;
+  }
+
+  /**
+   * @brief Sets the is executing indicator of the thread context.
+   *
+   * @param[in] context The context.
+   * @param[in] is_executing The new value for the is executing indicator.
+   */
+  static inline void _CPU_Context_Set_is_executing(
+    Context_Control *context,
+    bool is_executing
+  )
+  {
+    context->is_executing = is_executing;
+  }
+#endif /* RTEMS_SMP */
+
+#endif /* ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h b/cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h
new file mode 100644
index 0000000..cc091fa
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/score/cpu_asm.h
@@ -0,0 +1,74 @@
+/**
+ * @file
+ *
+ * @brief Epiphany Assembly File
+ *
+ * Very loose template for an include file for the cpu_asm.? file
+ * if it is implemented as a ".S" file (preprocessed by cpp) instead
+ * of a ".s" file (preprocessed by gm4 or gasp).
+ */
+
+/*
+ *  COPYRIGHT (c) 1989-1999.
+ *  On-Line Applications Research Corporation (OAR).
+ *
+ *  The license and distribution terms for this file may be
+ *  found in the file LICENSE in this distribution or at
+ *  http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+#ifndef _RTEMS_SCORE_CPU_ASM_H
+#define _RTEMS_SCORE_CPU_ASM_H
+
+/* pull in the generated offsets */
+
+/*
+#include <rtems/score/offsets.h>
+*/
+
+/*
+ * Hardware General Registers
+ */
+
+/* put something here */
+
+/*
+ * Hardware Floating Point Registers
+ */
+
+/* put something here */
+
+/*
+ * Hardware Control Registers
+ */
+
+/* put something here */
+
+/*
+ * Calling Convention
+ */
+
+/* put something here */
+
+/*
+ * Temporary registers
+ */
+
+/* put something here */
+
+/*
+ * Floating Point Registers - SW Conventions
+ */
+
+/* put something here */
+
+/*
+ * Temporary floating point registers
+ */
+
+/* put something here */
+
+#endif
+
+/* end of file */
diff --git a/cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h b/cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h
new file mode 100644
index 0000000..598ee76
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/score/cpuatomic.h
@@ -0,0 +1,14 @@
+/*
+ * COPYRIGHT (c) 2012-2013 Deng Hengyi.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
+#define _RTEMS_SCORE_ATOMIC_CPU_H
+
+#include <rtems/score/cpustdatomic.h>
+
+#endif /* _RTEMS_SCORE_ATOMIC_CPU_H */
diff --git a/cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h b/cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h
new file mode 100644
index 0000000..3ce865f
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/score/epiphany-utility.h
@@ -0,0 +1,180 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreCPU
+ *
+ * @brief This include file contains macros pertaining to the
+ *  Epiphany processor family.
+ */
+
+/*
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _EPIPHANY_UTILITY_H
+#define _EPIPHANY_UTILITY_H
+
+/* eCore IRQs */
+typedef enum
+{
+  START,
+  SW_EXCEPTION,
+  MEM_FAULT,
+  TIMER0,
+  TIMER1,
+  SMP_MESSAGE,
+  DMA0,
+  DMA1,
+  SER,
+} EPIPHANY_IRQ_PER_CORE_T;
+
+/*  Per-core IO mapped register addresses
+ *  @see Epiphany architecture reference.
+ */
+#define EPIPHANY_PER_CORE_REG_CONFIG      0xF0400
+#define EPIPHANY_PER_CORE_REG_STATUS      0xF0404
+#define EPIPHANY_PER_CORE_REG_PC          0xF0408
+#define EPIPHANY_PER_CORE_REG_DEBUGSTATUS 0xF040C
+#define EPIPHANY_PER_CORE_REG_LC          0xF0414
+#define EPIPHANY_PER_CORE_REG_LS          0xF0418
+#define EPIPHANY_PER_CORE_REG_LE          0xF041C
+#define EPIPHANY_PER_CORE_REG_IRET        0xF0420
+#define EPIPHANY_PER_CORE_REG_IMASK       0xF0424
+#define EPIPHANY_PER_CORE_REG_ILAT        0xF0428
+#define EPIPHANY_PER_CORE_REG_ILATST      0xF042C
+#define EPIPHANY_PER_CORE_REG_ILATCL      0xF0430
+#define EPIPHANY_PER_CORE_REG_IPEND       0xF0434
+#define EPIPHANY_PER_CORE_REG_FSTATUS     0xF0440
+#define EPIPHANY_PER_CORE_REG_DEBUGCMD    0xF0448
+#define EPIPHANY_PER_CORE_REG_RESETCORE   0xF070C
+
+/* Event timer registers */
+#define EPIPHANY_PER_CORE_REG_CTIMER0     0xF0438
+#define EPIPHANY_PER_CORE_REG_CTIMER1     0xF043C
+
+/* Processor control registers */
+#define EPIPHANY_PER_CORE_REG_MEMSTATUS   0xF0604
+#define EPIPHANY_PER_CORE_REG_MEMPROTECT  0xF0608
+
+/* DMA Registers */
+#define EPIPHANY_PER_CORE_REG_DMA0CONFIG  0xF0500
+#define EPIPHANY_PER_CORE_REG_DMA0STRIDE  0xF0504
+#define EPIPHANY_PER_CORE_REG_DMA0COUNT   0xF0508
+#define EPIPHANY_PER_CORE_REG_DMA0SRCADDR 0xF050C
+#define EPIPHANY_PER_CORE_REG_DMA0DSTADDR 0xF0510
+#define EPIPHANY_PER_CORE_REG_DMA0AUTO0   0xF0514
+#define EPIPHANY_PER_CORE_REG_DMA0AUTO1   0xF0518
+#define EPIPHANY_PER_CORE_REG_DMA0STATUS  0xF051C
+#define EPIPHANY_PER_CORE_REG_DMA1CONFIG  0xF0520
+#define EPIPHANY_PER_CORE_REG_DMA1STRIDE  0xF0524
+#define EPIPHANY_PER_CORE_REG_DMA1COUNT   0xF0528
+#define EPIPHANY_PER_CORE_REG_DMA1SRCADDR 0xF052C
+#define EPIPHANY_PER_CORE_REG_DMA1DSTADDR 0xF0530
+#define EPIPHANY_PER_CORE_REG_DMA1AUTO0   0xF0534
+#define EPIPHANY_PER_CORE_REG_DMA1AUTO1   0xF0538
+#define EPIPHANY_PER_CORE_REG_DMA1STATUS  0xF053C
+
+/* Mesh Node Control Registers */
+#define EPIPHANY_PER_CORE_REG_MESHCONFIG  0xF0700
+#define EPIPHANY_PER_CORE_REG_COREID      0xF0704
+#define EPIPHANY_PER_CORE_REG_MULTICAST   0xF0708
+#define EPIPHANY_PER_CORE_REG_CMESHROUTE  0xF0710
+#define EPIPHANY_PER_CORE_REG_XMESHROUTE  0xF0714
+#define EPIPHANY_PER_CORE_REG_RMESHROUTE  0xF0718
+
+/*  This macros constructs an address space of epiphany cores
+ *  from their IDs.
+ */
+#define EPIPHANY_COREID_TO_MSB_ADDR(id) (id) << 20
+
+/*  Construct a complete/absolute IO mapped address register from
+ *  core ID and register name
+ */
+#define EPIPHANY_GET_REG_ABSOLUTE_ADDR(coreid, reg) \
+        (EPIPHANY_COREID_TO_MSB_ADDR(coreid) | (reg))
+
+#define EPIPHANY_REG(reg) (uint32_t *) (reg)
+
+/* Read register with its absolute address */
+static inline uint32_t read_epiphany_reg(volatile uint32_t reg_addr)
+{
+  return *(EPIPHANY_REG(reg_addr));
+}
+
+/* Write register with its abolute address */
+static inline void write_epiphany_reg(volatile uint32_t reg_addr, uint32_t val)
+{
+  *(EPIPHANY_REG(reg_addr)) = val;
+}
+
+/*  Epiphany uses 12 bits for defining core IDs, while RTEMS uses
+ *  linear IDs. The following function converts RTEMS linear IDs to
+ *  Epiphany corresponding ones
+ */
+static const uint32_t map[16] =
+{
+   0x808, 0x809, 0x80A, 0x80B,
+   0x848, 0x849, 0x84A, 0x84B,
+   0x888, 0x889, 0x88A, 0x88B,
+   0x8C8, 0x8C9, 0x8CA, 0x8CB
+};
+
+static inline uint32_t rtems_coreid_to_epiphany_map(uint32_t rtems_id)
+{
+  return map[rtems_id];
+}
+
+/* Epiphany uses 12 bits for defining core IDs, while RTEMS uses
+ * linear IDs. The following function is used to map Epiphany IDs to
+ * RTEMS linear IDs.
+ */
+static inline uint32_t epiphany_coreid_to_rtems_map(uint32_t epiphany_id)
+{
+  register uint32_t coreid asm ("r17") = epiphany_id;
+
+  /* Mapping from Epiphany IDs to 0-16 IDs macro */
+  __asm__ __volatile__(" \
+   movfs r17, coreid \
+   mov r19, #0x003   \
+   mov r20, #0x0F0   \
+   and r19, r17, r19 \
+   and r20, r17, r20 \
+   lsr r20, r20, #4  \
+   add r17, r19, r20 \
+   ");
+
+  /* coreid or r17 now holds the rtems core id */
+  return coreid;
+}
+
+static inline uint32_t _Epiphany_Get_current_processor()
+{
+  uint32_t coreid;
+
+  asm volatile ("movfs %0, coreid" : "=r" (coreid): );
+
+  return epiphany_coreid_to_rtems_map(coreid);
+}
+#endif  /* _EPIPHANY_UTILITY_H */
diff --git a/cpukit/score/cpu/epiphany/rtems/score/epiphany.h b/cpukit/score/cpu/epiphany/rtems/score/epiphany.h
new file mode 100644
index 0000000..60d9755
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/score/epiphany.h
@@ -0,0 +1,64 @@
+/**
+ * @file rtems/score/epiphany.h
+ */
+
+/*
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * COPYRIGHT (c) 1989-1999, 2010.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_SCORE_EPIPHANY_H
+#define _RTEMS_SCORE_EPIPHANY_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ *  This file contains the information required to build
+ *  RTEMS for a particular member of the Epiphany family.
+ *  It does this by setting variables to indicate which
+ *  implementation dependent features are present in a particular
+ *  member of the family.
+ *
+ *  This is a good place to list all the known CPU models
+ *  that this port supports and which RTEMS CPU model they correspond
+ *  to.
+ */
+
+ /*
+ *  Define the name of the CPU family and specific model.
+ */
+
+#define CPU_NAME "EPIPHANY"
+#define CPU_MODEL_NAME "EPIPHANY"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_EPIPHANY_H */
diff --git a/cpukit/score/cpu/epiphany/rtems/score/types.h b/cpukit/score/cpu/epiphany/rtems/score/types.h
new file mode 100644
index 0000000..5b6c503
--- /dev/null
+++ b/cpukit/score/cpu/epiphany/rtems/score/types.h
@@ -0,0 +1,68 @@
+/**
+ * @file
+ *
+ * @brief Epiphany Architecture Types API
+ */
+
+/*
+ * Copyright (c) 2015 University of York.
+ * Hesham ALMatary <hmka501 at york.ac.uk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+ #ifndef _RTEMS_SCORE_TYPES_H
+#define _RTEMS_SCORE_TYPES_H
+
+#include <rtems/score/basedefs.h>
+
+#ifndef ASM
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup ScoreCPU
+ */
+/**@{**/
+
+/*
+ *  This section defines the basic types for this processor.
+ */
+
+/** Type that can store a 32-bit integer or a pointer. */
+typedef uintptr_t CPU_Uint32ptr;
+
+typedef uint16_t Priority_bit_map_Word;
+typedef void epiphany_isr;
+typedef void ( *epiphany_isr_entry )( void );
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* !ASM */
+
+#endif
-- 
2.1.0



More information about the devel mailing list