[rtems commit] bsps/powerpc: Fix inline assembly

Sebastian Huber sebh at rtems.org
Sun Jul 5 16:00:53 UTC 2020


Module:    rtems
Branch:    master
Commit:    9b3b33d91a4615175852aee5d2f44df0a9fd1e87
Changeset: http://git.rtems.org/rtems/commit/?id=9b3b33d91a4615175852aee5d2f44df0a9fd1e87

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Apr  7 08:26:00 2020 +0200

bsps/powerpc: Fix inline assembly

GCC 10 no longer passes -many to the assembler.  This enables more
checks in the assembler.

---

 bsps/powerpc/include/libcpu/powerpc-utility.h      | 73 ++++++++++++++++++++++
 bsps/powerpc/qoriq/start/bspstart.c                |  5 +-
 bsps/powerpc/qoriq/start/mmu.c                     |  7 ++-
 .../powerpc/shared/exceptions/ppc_exc_initialize.c | 45 +++++++------
 bsps/powerpc/shared/mmu/e500-mmu.c                 | 31 +++++----
 5 files changed, 114 insertions(+), 47 deletions(-)

diff --git a/bsps/powerpc/include/libcpu/powerpc-utility.h b/bsps/powerpc/include/libcpu/powerpc-utility.h
index 60cfe85..2827430 100644
--- a/bsps/powerpc/include/libcpu/powerpc-utility.h
+++ b/bsps/powerpc/include/libcpu/powerpc-utility.h
@@ -860,6 +860,79 @@ static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
   return (svr >> 0) & 0xf;
 }
 
+static inline void ppc_msync(void)
+{
+  __asm__ volatile (
+    ".machine push\n"
+    ".machine e500\n"
+    "msync\n"
+    ".machine pop"
+    :
+    :
+    : "memory"
+  );
+}
+
+static inline void ppc_tlbre(void)
+{
+  __asm__ volatile (
+    ".machine push\n"
+    ".machine e500\n"
+    "tlbre\n"
+    ".machine pop"
+    :
+    :
+    : "memory"
+  );
+}
+
+static inline void ppc_tlbwe(void)
+{
+  __asm__ volatile (
+    ".machine push\n"
+    ".machine e500\n"
+    "tlbwe\n"
+    ".machine pop"
+    :
+    :
+    : "memory"
+  );
+}
+
+static inline void ppc_tlbsx(void *addr)
+{
+  __asm__ volatile (
+    ".machine push\n"
+    ".machine e500\n"
+    "tlbsx 0, %0\n"
+    ".machine pop"
+    :
+    : "r" (addr)
+    : "memory"
+  );
+}
+
+static inline void ppc_mtivpr(void *prefix)
+{
+  __asm__ volatile (
+    ".machine push\n"
+    ".machine e500\n"
+    "mtivpr %0\n"
+    ".machine pop"
+    :
+    : "r" (prefix)
+  );
+}
+
+#define ppc_mtivor(x, vec) __asm__ volatile ( \
+    ".machine push\n" \
+    ".machine e500\n" \
+    "mtivor" RTEMS_XSTRING(x) " %0\n" \
+    ".machine pop" \
+    : \
+    : "r" (vec) \
+  )
+
 void ppc_code_copy(void *dest, const void *src, size_t n);
 
 /* FIXME: Do not use this function */
diff --git a/bsps/powerpc/qoriq/start/bspstart.c b/bsps/powerpc/qoriq/start/bspstart.c
index 3c75ecf..5abd651 100644
--- a/bsps/powerpc/qoriq/start/bspstart.c
+++ b/bsps/powerpc/qoriq/start/bspstart.c
@@ -93,9 +93,6 @@ static void initialize_frequency_parameters(void)
   #endif
 }
 
-#define MTIVPR(base) \
-  __asm__ volatile ("mtivpr %0" : : "r" (base))
-
 #ifdef __powerpc64__
 #define VECTOR_TABLE_ENTRY_SIZE 32
 #else
@@ -117,7 +114,7 @@ void qoriq_initialize_exceptions(void *interrupt_stack_begin)
   );
 
   addr = (uintptr_t) bsp_exc_vector_base;
-  MTIVPR(addr);
+  ppc_mtivpr((void *) addr);
   MTIVOR(BOOKE_IVOR0,  addr);
   MTIVOR(BOOKE_IVOR1,  addr);
   MTIVOR(BOOKE_IVOR2,  addr);
diff --git a/bsps/powerpc/qoriq/start/mmu.c b/bsps/powerpc/qoriq/start/mmu.c
index b912613..558c496 100644
--- a/bsps/powerpc/qoriq/start/mmu.c
+++ b/bsps/powerpc/qoriq/start/mmu.c
@@ -350,7 +350,7 @@ void qoriq_mmu_change_perm(uint32_t test, uint32_t set, uint32_t clear)
 		uint32_t mas1 = 0;
 
 		PPC_SET_SPECIAL_PURPOSE_REGISTER(FSL_EIS_MAS0, mas0);
-		asm volatile ("tlbre");
+		ppc_tlbre();
 
 		mas1 = PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_MAS1);
 		if ((mas1 & FSL_EIS_MAS1_V) != 0) {
@@ -361,7 +361,10 @@ void qoriq_mmu_change_perm(uint32_t test, uint32_t set, uint32_t clear)
 				mas3 &= ~(clear & mask);
 				mas3 |= set & mask;
 				PPC_SET_SPECIAL_PURPOSE_REGISTER(FSL_EIS_MAS3, mas3);
-				asm volatile ("msync; isync; tlbwe; isync" : : : "memory");
+				ppc_msync();
+				ppc_synchronize_instructions();
+				ppc_tlbwe();
+				ppc_synchronize_instructions();
 			}
 		}
 	}
diff --git a/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c b/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c
index 215918a..46b7252 100644
--- a/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c
+++ b/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c
@@ -29,13 +29,10 @@
 
 uint32_t ppc_exc_cache_wb_check = 1;
 
-#define MTIVPR(prefix) __asm__ volatile ("mtivpr %0" : : "r" (prefix))
-#define MTIVOR(x, vec) __asm__ volatile ("mtivor"#x" %0" : : "r" (vec))
-
 static void ppc_exc_initialize_booke(void *vector_base)
 {
   /* Interrupt vector prefix register */
-  MTIVPR((uintptr_t) vector_base);
+  ppc_mtivpr(vector_base);
 
   if (
     ppc_cpu_is_specific_e200(PPC_e200z0)
@@ -49,29 +46,29 @@ static void ppc_exc_initialize_booke(void *vector_base)
   }
 
   /* Interupt vector offset registers */
-  MTIVOR(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
-  MTIVOR(1,  ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
-  MTIVOR(2,  ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
-  MTIVOR(3,  ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
-  MTIVOR(4,  ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
-  MTIVOR(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
-  MTIVOR(6,  ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
-  MTIVOR(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
-  MTIVOR(8,  ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
-  MTIVOR(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
-  MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
-  MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
-  MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
-  MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
-  MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
-  MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
+  ppc_mtivor(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
+  ppc_mtivor(1,  ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
+  ppc_mtivor(2,  ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
+  ppc_mtivor(3,  ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
+  ppc_mtivor(4,  ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
+  ppc_mtivor(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
+  ppc_mtivor(6,  ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
+  ppc_mtivor(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
+  ppc_mtivor(8,  ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
+  ppc_mtivor(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
+  ppc_mtivor(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
+  ppc_mtivor(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
+  ppc_mtivor(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
+  ppc_mtivor(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
+  ppc_mtivor(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
+  ppc_mtivor(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
   if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
-    MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
-    MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
-    MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
+    ppc_mtivor(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
+    ppc_mtivor(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
+    ppc_mtivor(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
   }
   if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
-    MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
+    ppc_mtivor(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
   }
 }
 
diff --git a/bsps/powerpc/shared/mmu/e500-mmu.c b/bsps/powerpc/shared/mmu/e500-mmu.c
index 15fe88f..ee69172 100644
--- a/bsps/powerpc/shared/mmu/e500-mmu.c
+++ b/bsps/powerpc/shared/mmu/e500-mmu.c
@@ -75,6 +75,7 @@
 #include <stdio.h>
 
 #include <libcpu/e500_mmu.h>
+#include <libcpu/powerpc-utility.h>
 
 #define TLBIVAX_TLBSEL  (1<<(63-60))
 #define TLBIVAX_INV_ALL  (1<<(63-61))
@@ -215,7 +216,7 @@ rtems_e500_prtlb(rtems_e500_tlb_idx key, int quiet, FILE *f)
 
   seltlb( key );
 
-  asm volatile("tlbre");
+  ppc_tlbre();
 
   /* not manipulating MAS0, skip reading it */
   mas1 = _read_MAS1();
@@ -433,13 +434,11 @@ rtems_interrupt_level lvl;
   _write_MAS3(mas3);
   _write_MAS4(mas4);
 
-  asm volatile(
-    "  sync\n"
-    "  isync\n"
-    "  tlbwe\n"
-    "  sync\n"
-    "  isync\n"
-  );
+  ppc_synchronize_data();
+  ppc_synchronize_instructions();
+  ppc_tlbwe();
+  ppc_synchronize_data();
+  ppc_synchronize_instructions();
 
   rtems_interrupt_enable(lvl);
 
@@ -536,7 +535,7 @@ rtems_interrupt_level lvl;
 
     _write_MAS6( FSL_EIS_MAS6_SPID0(pid) | (as ? FSL_EIS_MAS6_SAS : 0 ) );
 
-    asm volatile("tlbsx 0, %0"::"r"(ea));
+    ppc_tlbsx((void *)(uintptr_t) ea);
 
     mas1 = _read_MAS1();
 
@@ -608,18 +607,16 @@ rtems_interrupt_level lvl;
 
   seltlb(key);
 
-  asm volatile("tlbre");
+  ppc_tlbre();
 
   /* read old entries */
   _write_MAS1( _read_MAS1() & ~FSL_EIS_MAS1_V );
 
-  asm volatile(
-    "  sync\n"
-    "  isync\n"
-    "  tlbwe\n"
-    "  sync\n"
-    "  isync\n"
-  );
+  ppc_synchronize_data();
+  ppc_synchronize_instructions();
+  ppc_tlbwe();
+  ppc_synchronize_data();
+  ppc_synchronize_instructions();
 
   /* update cache */
   if ( E500_SELTLB_1 & key )



More information about the vc mailing list