All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] spl: fit: nand: fix fit loading on bad blocks
@ 2022-12-11 15:27 Jit Loon Lim
  0 siblings, 0 replies; only message in thread
From: Jit Loon Lim @ 2022-12-11 15:27 UTC (permalink / raw
  To: u-boot
  Cc: Jagan Teki, Vignesh R, Marek, Simon, Tien Fong, Kok Kiang,
	Siew Chin, Sin Hui, Raaj, Dinesh, Boon Khai, Alif, Teik Heng,
	Hazim, Jit Loon Lim, Sieu Mun Tang

From: Tien Fong Chee <tien.fong.chee@intel.com>

The offset at which the image to be loaded from NAND is located
is retrieved from the itb header. The presence of bad blocks in
the area of the NAND where the itb image is located could
invalidate the offset which must therefore be adjusted taking
into account the state of the sectors concerned.

Signed-off-by: Tien Fong Chee <tien.fong.chee@intel.com>
Signed-off-by: Jit Loon Lim <jit.loon.lim@intel.com>
---
 common/spl/spl_nand.c         |  3 +-
 drivers/mtd/nand/raw/denali.c | 91 ++++++++++++++++++++++++++++++++++-
 2 files changed, 92 insertions(+), 2 deletions(-)

diff --git a/common/spl/spl_nand.c b/common/spl/spl_nand.c
index 7b7579a2df..d94148ec71 100644
--- a/common/spl/spl_nand.c
+++ b/common/spl/spl_nand.c
@@ -45,10 +45,11 @@ static ulong spl_nand_fit_read(struct spl_load_info *load, ulong offs,
 	int err;
 	ulong sector;
 
-	sector = *(int *)load->priv;
 	offs *= load->bl_len;
 	size *= load->bl_len;
+	sector = *(int *)load->priv;
 	offs = sector + nand_spl_adjust_offset(sector, offs - sector);
+
 	err = nand_spl_load_image(offs, size, dst);
 	if (err)
 		return 0;
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index c827f80281..290c12ac96 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -2,7 +2,7 @@
 /*
  * Copyright (C) 2014       Panasonic Corporation
  * Copyright (C) 2013-2014, Altera Corporation <www.altera.com>
- * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
+ * Copyright (C) 2009-2022, Intel Corporation and its suppliers.
  */
 
 #include <common.h>
@@ -1374,3 +1374,92 @@ free_buf:
 
 	return ret;
 }
+
+#ifdef CONFIG_SPL_BUILD
+struct mtd_info *nand_get_mtd(void)
+{
+	struct mtd_info *mtd;
+
+	mtd = get_nand_dev_by_index(nand_curr_device);
+	if (!mtd)
+		hang();
+
+	return mtd;
+}
+
+int nand_spl_load_image(u32 offset, u32 len, void *dst)
+{
+	size_t count = len, actual = 0, page_align_overhead = 0;
+	u32 page_align_offset = 0;
+	u8 *page_buffer;
+	int err = 0;
+	struct mtd_info *mtd;
+	if (!len || !dst)
+		return -EINVAL;
+
+	mtd = get_nand_dev_by_index(nand_curr_device);
+	if (!mtd)
+		hang();
+	mtd = nand_get_mtd();
+
+	if ((offset & (mtd->writesize - 1)) != 0) {
+		page_buffer = malloc_cache_aligned(mtd->writesize);
+		if (!page_buffer) {
+			debug("Error: allocating buffer\n");
+			return -ENOMEM;
+		}
+		page_align_overhead = offset % mtd->writesize;
+		page_align_offset = (offset / mtd->writesize) * mtd->writesize;
+		count = mtd->writesize;
+		err = nand_read_skip_bad(mtd, page_align_offset, &count,
+					 &actual, mtd->size, page_buffer);
+		if (err)
+			return err;
+		count -= page_align_overhead;
+		count = min((size_t)len, count);
+		memcpy(dst, page_buffer + page_align_overhead, count);
+		free(page_buffer);
+		len -= count;
+		if (!len)
+			return err;
+		offset += count;
+		dst += count;
+		count = len;
+	}
+	return nand_read_skip_bad(mtd, offset, &count, &actual, mtd->size, dst);
+}
+
+/*
+ * This function is to adjust the load offset to skip bad blocks.
+ * The Denali NAND load image does skip bad blocks during read,
+ * hence this function is returning the offset as it is.
+ * The offset at which the image to be loaded from NAND is located is
+ * retrieved from the itb header. The presence of bad blocks in the area
+ * of the NAND where the itb image is located could invalidate the offset
+ * which must therefore be adjusted taking into account the state of the
+ * sectors concerned
+ */
+u32 nand_spl_adjust_offset(u32 sector, u32 offs)
+{
+	u32 sector_align_offset, sector_align_end_offset;
+	struct mtd_info *mtd;
+
+	mtd = nand_get_mtd();
+
+	sector_align_offset = sector & (~(mtd->erasesize - 1));
+
+	sector_align_end_offset = (sector + offs) & (~(mtd->erasesize - 1));
+
+	while (sector_align_offset <= sector_align_end_offset) {
+		if (nand_block_isbad(mtd, sector_align_offset)) {
+			offs += mtd->erasesize;
+			sector_align_end_offset += mtd->erasesize;
+		}
+		sector_align_offset += mtd->erasesize;
+	}
+
+	return offs;
+}
+
+void nand_deselect(void) {}
+#endif
-- 
2.26.2


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2022-12-11 15:27 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-12-11 15:27 [PATCH] spl: fit: nand: fix fit loading on bad blocks Jit Loon Lim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.