[rtems commit] bsps/shared/xnandpsu: Add opportunistic page cache

Joel Sherrill joel at rtems.org
Mon Mar 11 17:10:03 UTC 2024


Module:    rtems
Branch:    master
Commit:    33379dcfc40ecebfe255aeb4256906641a9fbcb1
Changeset: http://git.rtems.org/rtems/commit/?id=33379dcfc40ecebfe255aeb4256906641a9fbcb1

Author:    Kinsey Moore <kinsey.moore at oarcorp.com>
Date:      Fri Mar  1 15:39:27 2024 -0600

bsps/shared/xnandpsu: Add opportunistic page cache

Add an opportunistic page cache to the xnandpsu driver since it does not
implement partial page reads and common filesystem access patterns
perform multiple reads from the same page. This has been seen to provide
a 10x speedup to read speeds and a 2x speedup on first initialization
when used with JFFS2.

---

 bsps/include/dev/nand/xnandpsu.h |  9 +++++++++
 bsps/shared/dev/nand/xnandpsu.c  | 41 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+)

diff --git a/bsps/include/dev/nand/xnandpsu.h b/bsps/include/dev/nand/xnandpsu.h
index 5c87be08c2..ac9496a745 100644
--- a/bsps/include/dev/nand/xnandpsu.h
+++ b/bsps/include/dev/nand/xnandpsu.h
@@ -217,6 +217,12 @@ extern "C" {
 #define XNANDPSU_NVDDR_CLK_5		((u16)100U * (u16)1000U * (u16)1000U)
 
 #define XNANDPSU_MAX_TIMING_MODE	5
+
+#ifdef __rtems__
+#define XNANDPSU_PAGE_CACHE_UNAVAILABLE	-2
+#define XNANDPSU_PAGE_CACHE_NONE	-1
+#endif
+
 /**
  * The XNandPsu_Config structure contains configuration information for NAND
  * controller.
@@ -390,6 +396,9 @@ typedef struct {
 	XNandPsu_EccCfg EccCfg;		/**< ECC configuration */
 	XNandPsu_Geometry Geometry;	/**< Flash geometry */
 	XNandPsu_Features Features;	/**< ONFI features */
+#ifdef __rtems__
+	int32_t PartialDataPageIndex;	/**< Cached page index */
+#endif
 #ifdef __ICCARM__
 	u8 PartialDataBuf[XNANDPSU_MAX_PAGE_SIZE];	/**< Partial read/write buffer */
 #pragma pack(pop)
diff --git a/bsps/shared/dev/nand/xnandpsu.c b/bsps/shared/dev/nand/xnandpsu.c
index 65c58b7e9b..79025f3c04 100644
--- a/bsps/shared/dev/nand/xnandpsu.c
+++ b/bsps/shared/dev/nand/xnandpsu.c
@@ -244,6 +244,11 @@ s32 XNandPsu_CfgInitialize(XNandPsu *InstancePtr, XNandPsu_Config *ConfigPtr,
 	InstancePtr->DmaMode = XNANDPSU_MDMA;
 	InstancePtr->IsReady = XIL_COMPONENT_IS_READY;
 
+#ifdef __rtems__
+	/* Set page cache to unavailable */
+	InstancePtr->PartialDataPageIndex = XNANDPSU_PAGE_CACHE_UNAVAILABLE;
+#endif
+
 	/* Initialize the NAND flash targets */
 	Status = XNandPsu_FlashInit(InstancePtr);
 	if (Status != XST_SUCCESS) {
@@ -278,6 +283,11 @@ s32 XNandPsu_CfgInitialize(XNandPsu *InstancePtr, XNandPsu_Config *ConfigPtr,
 #endif
 		goto Out;
 	}
+
+#ifdef __rtems__
+	/* Set page cache to none */
+	InstancePtr->PartialDataPageIndex = XNANDPSU_PAGE_CACHE_NONE;
+#endif
 Out:
 	return Status;
 }
@@ -1454,6 +1464,12 @@ s32 XNandPsu_Write(XNandPsu *InstancePtr, u64 Offset, u64 Length, u8 *SrcBuf)
 		goto Out;
 	}
 
+#ifdef __rtems__
+	if (InstancePtr->PartialDataPageIndex != XNANDPSU_PAGE_CACHE_UNAVAILABLE) {
+		/* All writes invalidate the page cache */
+		InstancePtr->PartialDataPageIndex = XNANDPSU_PAGE_CACHE_NONE;
+	}
+#endif
 	while (LengthVar > 0U) {
 		Block = (u32) (OffsetVar/InstancePtr->Geometry.BlockSize);
 		/*
@@ -1619,9 +1635,34 @@ s32 XNandPsu_Read(XNandPsu *InstancePtr, u64 Offset, u64 Length, u8 *DestBuf)
 					InstancePtr->Geometry.BytesPerPage :
 					(u32)LengthVar;
 		}
+#ifdef __rtems__
+		if (Page == InstancePtr->PartialDataPageIndex) {
+			/*
+			 * This is a whole page read for the currently cached
+			 * page. It will not be taken care of below, so perform
+			 * the copy here.
+			 */
+			if (PartialBytes == 0U) {
+				(void)Xil_MemCpy(DestBufPtr,
+						&InstancePtr->PartialDataBuf[0],
+						NumBytes);
+			}
+		} else {
+#endif
 		/* Read page */
 		Status = XNandPsu_ReadPage(InstancePtr, Target, Page, 0U,
 								BufPtr);
+#ifdef __rtems__
+			if (PartialBytes > 0U &&
+				InstancePtr->PartialDataPageIndex != XNANDPSU_PAGE_CACHE_UNAVAILABLE) {
+				/*
+				 * Partial read into page cache. Update the
+				 * cached page index.
+				 */
+				InstancePtr->PartialDataPageIndex = Page;
+			}
+		}
+#endif
 		if (Status != XST_SUCCESS) {
 			goto Out;
 		}



More information about the vc mailing list