[rtems commit] bsps/powerpc: Fix tlbie instruction usage
Sebastian Huber
sebh at rtems.org
Sun Jul 5 16:00:53 UTC 2020
Module: rtems
Branch: master
Commit: 9b5af6a47f799952c178967e04e83053bef57422
Changeset: http://git.rtems.org/rtems/commit/?id=9b5af6a47f799952c178967e04e83053bef57422
Author: Sebastian Huber <sebastian.huber at embedded-brains.de>
Date: Wed Apr 8 13:19:51 2020 +0200
bsps/powerpc: Fix tlbie instruction usage
GCC 10 no longer passes -many to the assembler. This enables more
checks in the assembler.
The 0 in the tlbie instruction is the L operand which selects a 4KiB
page size.
---
bsps/powerpc/gen5200/start/start.S | 2 +-
bsps/powerpc/gen83xx/start/cpuinit.c | 2 +-
bsps/powerpc/include/libcpu/mmu.h | 2 +-
bsps/powerpc/motorola_powerpc/bootloader/head.S | 2 +-
bsps/powerpc/motorola_powerpc/bootloader/mm.c | 4 ++--
bsps/powerpc/mvme5500/start/start.S | 2 +-
bsps/powerpc/shared/mmu/pte121.c | 11 ++++++-----
bsps/powerpc/shared/start/start.S | 2 +-
8 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/bsps/powerpc/gen5200/start/start.S b/bsps/powerpc/gen5200/start/start.S
index 9e9e504..fbb58cd 100644
--- a/bsps/powerpc/gen5200/start/start.S
+++ b/bsps/powerpc/gen5200/start/start.S
@@ -775,7 +775,7 @@ TLB_init: /* Initialize translation lookaside buffers (TLBs) */
xor r29, r29, r29
TLB_init_loop:
- tlbie r29
+ tlbie r29, 0
tlbsync
addi r29, r29, 0x1000
addi r30, r30, 0x01
diff --git a/bsps/powerpc/gen83xx/start/cpuinit.c b/bsps/powerpc/gen83xx/start/cpuinit.c
index 1b0fd1e..ad8fe98 100644
--- a/bsps/powerpc/gen83xx/start/cpuinit.c
+++ b/bsps/powerpc/gen83xx/start/cpuinit.c
@@ -133,7 +133,7 @@ static void clear_mmu_regs( void)
/* Clear TLBs */
for (i = 0;i < 32;i++) {
- __asm__ volatile( "tlbie %0\n" : : "r" (i << (31 - 19)));
+ __asm__ volatile( "tlbie %0, 0\n" : : "r" (i << (31 - 19)));
}
}
diff --git a/bsps/powerpc/include/libcpu/mmu.h b/bsps/powerpc/include/libcpu/mmu.h
index d308131..6e7abb1 100644
--- a/bsps/powerpc/include/libcpu/mmu.h
+++ b/bsps/powerpc/include/libcpu/mmu.h
@@ -165,7 +165,7 @@ typedef struct _MMU_context {
/* invalidate a TLB entry */
static inline void _tlbie(unsigned long va)
{
- asm volatile ("tlbie %0" : : "r"(va));
+ asm volatile ("tlbie %0, 0" : : "r"(va));
}
extern void _tlbia(void); /* invalidate all TLB entries */
diff --git a/bsps/powerpc/motorola_powerpc/bootloader/head.S b/bsps/powerpc/motorola_powerpc/bootloader/head.S
index 974b78a..b7e423e 100644
--- a/bsps/powerpc/motorola_powerpc/bootloader/head.S
+++ b/bsps/powerpc/motorola_powerpc/bootloader/head.S
@@ -383,7 +383,7 @@ MMUoff: blr
flush_tlb:
lis r11,0x1000
1: addic. r11,r11,-0x1000
- tlbie r11
+ tlbie r11, 0
bnl 1b
/* tlbsync is not implemented on 601, so use sync which seems to be a superset
* of tlbsync in all cases and do not bother with CPU dependant code
diff --git a/bsps/powerpc/motorola_powerpc/bootloader/mm.c b/bsps/powerpc/motorola_powerpc/bootloader/mm.c
index 1b3df41..2675396 100644
--- a/bsps/powerpc/motorola_powerpc/bootloader/mm.c
+++ b/bsps/powerpc/motorola_powerpc/bootloader/mm.c
@@ -199,7 +199,7 @@ void _handler(int vec, ctxt *p) {
flushva |= ((hte[i].key<<21)&0xf0000000)
| ((hte[i].key<<22)&0x0fc00000);
hte[i].key=0;
- asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
+ asm volatile("sync; tlbie %0, 0; sync" : : "r" (flushva));
found:
hte[i].rpn = rpn;
asm volatile("eieio": : );
@@ -583,7 +583,7 @@ void vflush(map *virtmap) {
| ((p[i].key<<22)&0x0fc00000);
if (va>=virtmap->base && va<=virtmap->end) {
p[i].key=0;
- asm volatile("sync; tlbie %0; sync" : :
+ asm volatile("sync; tlbie %0, 0; sync" : :
"r" (va));
}
}
diff --git a/bsps/powerpc/mvme5500/start/start.S b/bsps/powerpc/mvme5500/start/start.S
index c948c9c..7e33914 100644
--- a/bsps/powerpc/mvme5500/start/start.S
+++ b/bsps/powerpc/mvme5500/start/start.S
@@ -193,7 +193,7 @@ _return_to_ppcbug:
flush_tlbs:
lis r20, 0x1000
1: addic. r20, r20, -0x1000
- tlbie r20
+ tlbie r20, 0
bgt 1b
sync
blr
diff --git a/bsps/powerpc/shared/mmu/pte121.c b/bsps/powerpc/shared/mmu/pte121.c
index 93ef909..778d635 100644
--- a/bsps/powerpc/shared/mmu/pte121.c
+++ b/bsps/powerpc/shared/mmu/pte121.c
@@ -122,9 +122,10 @@
* instructions in order to flush all TLBs.
* On the 750 and 7400, there are 128 two way I and D TLBs,
* indexed by EA[14:19]. Hence calling
- * tlbie rx
+ * tlbie rx, 0
* where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000
- * is sufficient to do the job
+ * is sufficient to do the job. The 0 in the tlbie instruction is the L operand
+ * which selects a 4KiB page size.
*/
#define NUM_TLB_PER_WAY 64 /* 750 and 7400 have 128 two way TLBs */
#define FLUSH_EA_RANGE (NUM_TLB_PER_WAY<<LD_PG_SIZE)
@@ -640,7 +641,7 @@ triv121PgTblActivate (Triv121PgTbl pt)
/* Now flush all TLBs, starting with the topmost index */
" lis %[tmp2], %[ea_range]@h\n"
"2: addic. %[tmp2], %[tmp2], -%[pg_sz]\n" /* address the next one (decrementing) */
- " tlbie %[tmp2]\n" /* invalidate & repeat */
+ " tlbie %[tmp2], 0\n" /* invalidate & repeat */
" bgt 2b\n"
" eieio \n"
" tlbsync \n"
@@ -872,7 +873,7 @@ triv121UnmapEa (unsigned long ea)
pte->v = 0;
do_dssall ();
__asm__ volatile (" sync \n\t"
- " tlbie %0 \n\t"
+ " tlbie %0, 0 \n\t"
" eieio \n\t"
" tlbsync \n\t"
" sync \n\t"::"r" (ea):"memory");
@@ -960,7 +961,7 @@ triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
pte->wimg = wimg;
if (pp >= 0)
pte->pp = pp;
- __asm__ volatile ("tlbie %0; eieio"::"r" (ea):"memory");
+ __asm__ volatile ("tlbie %0, 0; eieio"::"r" (ea):"memory");
pte->v = 1;
__asm__ volatile ("tlbsync; sync":::"memory");
diff --git a/bsps/powerpc/shared/start/start.S b/bsps/powerpc/shared/start/start.S
index 76d4fc3..eb91a6c 100644
--- a/bsps/powerpc/shared/start/start.S
+++ b/bsps/powerpc/shared/start/start.S
@@ -192,7 +192,7 @@ _return_to_ppcbug:
flush_tlbs:
lis r20, 0x1000
1: addic. r20, r20, -0x1000
- tlbie r20
+ tlbie r20, 0
bgt 1b
sync
blr
More information about the vc
mailing list