2 * drivers/mtd/nand/omap-hw.c
4 * This is the MTD driver for OMAP1710 internal HW NAND controller.
6 * Copyright (C) 2004-2006 Nokia Corporation
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and
9 * Juha Yrjölä <juha.yrjola@nokia.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/delay.h>
31 #include <linux/errno.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/wait.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/nand.h>
39 #include <linux/mtd/partitions.h>
40 #include <linux/mtd/nand_ecc.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/clk.h>
46 #include <asm/arch/board.h>
47 #include <asm/arch/dma.h>
49 #define NAND_BASE 0xfffbcc00
50 #define NND_REVISION 0x00
51 #define NND_ACCESS 0x04
52 #define NND_ADDR_SRC 0x08
55 #define NND_STATUS 0x18
56 #define NND_READY 0x1c
57 #define NND_COMMAND 0x20
58 #define NND_COMMAND_SEC 0x24
59 #define NND_ECC_SELECT 0x28
60 #define NND_ECC_START 0x2c
61 #define NND_ECC_9 0x4c
62 #define NND_RESET 0x50
64 #define NND_FIFOCTRL 0x58
65 #define NND_PSC_CLK 0x5c
66 #define NND_SYSTEST 0x60
67 #define NND_SYSCFG 0x64
68 #define NND_SYSSTATUS 0x68
69 #define NND_FIFOTEST1 0x6c
70 #define NND_FIFOTEST2 0x70
71 #define NND_FIFOTEST3 0x74
72 #define NND_FIFOTEST4 0x78
73 #define NND_PSC1_CLK 0x8c
74 #define NND_PSC2_CLK 0x90
77 #define NND_CMD_READ1_LOWER 0x00
78 #define NND_CMD_WRITE1_LOWER 0x00
79 #define NND_CMD_READ1_UPPER 0x01
80 #define NND_CMD_WRITE1_UPPER 0x01
81 #define NND_CMD_PROGRAM_END 0x10
82 #define NND_CMD_READ2_SPARE 0x50
83 #define NND_CMD_WRITE2_SPARE 0x50
84 #define NND_CMD_ERASE 0x60
85 #define NND_CMD_STATUS 0x70
86 #define NND_CMD_PROGRAM 0x80
87 #define NND_CMD_READ_ID 0x90
88 #define NND_CMD_ERASE_END 0xD0
89 #define NND_CMD_RESET 0xFF
92 #define NAND_Ecc_P1e (1 << 0)
93 #define NAND_Ecc_P2e (1 << 1)
94 #define NAND_Ecc_P4e (1 << 2)
95 #define NAND_Ecc_P8e (1 << 3)
96 #define NAND_Ecc_P16e (1 << 4)
97 #define NAND_Ecc_P32e (1 << 5)
98 #define NAND_Ecc_P64e (1 << 6)
99 #define NAND_Ecc_P128e (1 << 7)
100 #define NAND_Ecc_P256e (1 << 8)
101 #define NAND_Ecc_P512e (1 << 9)
102 #define NAND_Ecc_P1024e (1 << 10)
103 #define NAND_Ecc_P2048e (1 << 11)
105 #define NAND_Ecc_P1o (1 << 16)
106 #define NAND_Ecc_P2o (1 << 17)
107 #define NAND_Ecc_P4o (1 << 18)
108 #define NAND_Ecc_P8o (1 << 19)
109 #define NAND_Ecc_P16o (1 << 20)
110 #define NAND_Ecc_P32o (1 << 21)
111 #define NAND_Ecc_P64o (1 << 22)
112 #define NAND_Ecc_P128o (1 << 23)
113 #define NAND_Ecc_P256o (1 << 24)
114 #define NAND_Ecc_P512o (1 << 25)
115 #define NAND_Ecc_P1024o (1 << 26)
116 #define NAND_Ecc_P2048o (1 << 27)
118 #define TF(value) (value ? 1 : 0)
120 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0 )
121 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1 )
122 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2 )
123 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3 )
124 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4 )
125 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5 )
126 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6 )
127 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7 )
129 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0 )
130 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1 )
131 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2 )
132 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3 )
133 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4 )
134 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5 )
135 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6 )
136 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7 )
138 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0 )
139 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1 )
140 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2 )
141 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3 )
142 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4 )
143 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5 )
144 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6 )
145 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7 )
147 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0 )
148 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1 )
149 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2 )
150 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3 )
151 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4 )
152 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5 )
153 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6 )
154 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7 )
156 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0 )
157 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1 )
159 extern struct nand_oobinfo jffs2_oobinfo;
162 * MTD structure for OMAP board
164 static struct mtd_info *omap_mtd;
165 static struct clk *omap_nand_clk;
166 static int omap_nand_dma_ch;
167 static struct completion omap_nand_dma_comp;
168 static unsigned long omap_nand_base = io_p2v(NAND_BASE);
170 static inline u32 nand_read_reg(int idx)
172 return __raw_readl(omap_nand_base + idx);
175 static inline void nand_write_reg(int idx, u32 val)
177 __raw_writel(val, omap_nand_base + idx);
180 static inline u8 nand_read_reg8(int idx)
182 return __raw_readb(omap_nand_base + idx);
185 static inline void nand_write_reg8(int idx, u8 val)
187 __raw_writeb(val, omap_nand_base + idx);
190 static void omap_nand_select_chip(struct mtd_info *mtd, int chip)
196 l = nand_read_reg(NND_CTRL);
197 l |= (1 << 8) | (1 << 10) | (1 << 12) | (1 << 14);
198 nand_write_reg(NND_CTRL, l);
201 /* Also CS1, CS2, CS4 would be available */
202 l = nand_read_reg(NND_CTRL);
204 nand_write_reg(NND_CTRL, l);
211 static void nand_dma_cb(int lch, u16 ch_status, void *data)
213 complete((struct completion *) data);
216 static void omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
217 unsigned int u32_count, int is_write)
219 const int block_size = 16;
220 unsigned int block_count, len;
222 unsigned long fifo_reg, timeout, jiffies_before, jiffies_spent;
223 static unsigned long max_jiffies = 0;
225 dma_ch = omap_nand_dma_ch;
226 block_count = u32_count * 4 / block_size;
227 nand_write_reg(NND_STATUS, 0x0f);
228 nand_write_reg(NND_FIFOCTRL, (block_size << 24) | block_count);
229 fifo_reg = NAND_BASE + NND_FIFO;
231 omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_TIPB,
232 OMAP_DMA_AMODE_CONSTANT, fifo_reg,
234 omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_EMIFF,
235 OMAP_DMA_AMODE_POST_INC,
238 // omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
239 /* Set POSTWRITE bit */
240 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 16));
242 omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_TIPB,
243 OMAP_DMA_AMODE_CONSTANT, fifo_reg,
245 omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_EMIFF,
246 OMAP_DMA_AMODE_POST_INC,
249 // omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_8);
250 /* Set PREFETCH bit */
251 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17));
253 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, block_size / 4,
254 block_count, OMAP_DMA_SYNC_FRAME,
256 init_completion(&omap_nand_dma_comp);
258 len = u32_count << 2;
259 consistent_sync(addr, len, DMA_TO_DEVICE);
260 omap_start_dma(dma_ch);
261 jiffies_before = jiffies;
262 timeout = wait_for_completion_timeout(&omap_nand_dma_comp,
263 msecs_to_jiffies(1000));
264 jiffies_spent = (unsigned long)((long)jiffies - (long)jiffies_before);
265 if (jiffies_spent > max_jiffies)
266 max_jiffies = jiffies_spent;
269 printk(KERN_WARNING "omap-hw-nand: DMA timeout after %u ms, max. seen latency %u ms\n",
270 jiffies_to_msecs(jiffies_spent),
271 jiffies_to_msecs(max_jiffies));
272 if (OMAP_DMA_CCR_REG(dma_ch) & (1 << 7)) {
273 /* If the DMA transfer is still running, something
274 * is really wrong. */
275 printk(KERN_ERR "omap-hw-nand: DMA transfer still running. Not good.\n");
276 printk(KERN_INFO "DMA ch %d: CCR %04x, CSR %04x, CCDEN_L %04x\n",
277 dma_ch, omap_readw(OMAP_DMA_CCR_REG(dma_ch)), omap_readw(OMAP_DMA_CSR_REG(dma_ch)),
278 omap_readw(OMAP_DMA_BASE + 0x40 * (dma_ch) + 0x34));
282 consistent_sync(addr, len, DMA_FROM_DEVICE);
284 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~((1 << 16) | (1 << 17)));
287 static void fifo_read(u32 *out, unsigned int len)
289 const int block_size = 16;
290 unsigned long status_reg, fifo_reg;
293 status_reg = omap_nand_base + NND_STATUS;
294 fifo_reg = omap_nand_base + NND_FIFO;
295 len = len * 4 / block_size;
296 nand_write_reg(NND_FIFOCTRL, (block_size << 24) | len);
297 nand_write_reg(NND_STATUS, 0x0f);
298 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17));
303 while ((__raw_readl(status_reg) & (1 << 2)) == 0);
304 __raw_writel(0x0f, status_reg);
305 for (i = 0; i < c; i++) {
306 u32 l = __raw_readl(fifo_reg);
310 nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~(1 << 17));
311 nand_write_reg(NND_STATUS, 0x0f);
314 static void omap_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
316 unsigned long access_reg;
318 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
319 int u32_count = len >> 2;
320 u32 *dest = (u32 *) buf;
321 /* If the transfer is big enough and the length divisible by
322 * 16, we try to use DMA transfer, or FIFO copy in case of
323 * DMA failure (e.g. all channels busy) */
324 if (u32_count > 64 && (u32_count & 3) == 0) {
325 if (omap_nand_dma_ch >= 0) {
326 omap_nand_dma_transfer(mtd, buf, u32_count, 0);
329 /* In case of an error, fallback to FIFO copy */
330 fifo_read((u32 *) buf, u32_count);
333 access_reg = omap_nand_base + NND_ACCESS;
334 /* Small buffers we just read directly */
336 *dest++ = __raw_readl(access_reg);
338 /* If we're not word-aligned, we use byte copy */
339 access_reg = omap_nand_base + NND_ACCESS;
341 *buf++ = __raw_readb(access_reg);
345 static void omap_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
347 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
348 const u32 *src = (const u32 *) buf;
352 /* If the transfer is big enough and length divisible by 16,
353 * we try to use DMA transfer. */
354 if (len > 256 / 4 && (len & 3) == 0) {
355 if (omap_nand_dma_transfer(mtd, (void *) buf, len, 1) == 0)
357 /* In case of an error, fallback to CPU copy */
361 nand_write_reg(NND_ACCESS, *src++);
364 nand_write_reg8(NND_ACCESS, *buf++);
368 static int omap_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
370 if (likely(((unsigned long) buf & 3) == 0 && (len & 3) == 0)) {
371 const u32 *dest = (const u32 *) buf;
374 if (*dest++ != nand_read_reg(NND_ACCESS))
378 if (*buf++ != nand_read_reg8(NND_ACCESS))
384 static u_char omap_nand_read_byte(struct mtd_info *mtd)
386 return nand_read_reg8(NND_ACCESS);
389 static int omap_nand_dev_ready(struct mtd_info *mtd)
393 l = nand_read_reg(NND_READY);
397 static int nand_write_command(u8 cmd, u32 addr, int addr_valid)
400 nand_write_reg(NND_ADDR_SRC, addr);
401 nand_write_reg8(NND_COMMAND, cmd);
403 nand_write_reg(NND_ADDR_SRC, 0);
404 nand_write_reg8(NND_COMMAND_SEC, cmd);
406 while (!omap_nand_dev_ready(NULL));
411 * Send command to NAND device
413 static void omap_nand_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
415 struct nand_chip *this = mtd->priv;
418 * Write out the command to the device.
420 if (command == NAND_CMD_SEQIN) {
423 if (column >= mtd->writesize) {
425 column -= mtd->writesize;
426 readcmd = NAND_CMD_READOOB;
427 } else if (column < 256) {
428 /* First 256 bytes --> READ0 */
429 readcmd = NAND_CMD_READ0;
432 readcmd = NAND_CMD_READ1;
434 nand_write_command(readcmd, 0, 0);
438 case NAND_CMD_PAGEPROG:
439 case NAND_CMD_STATUS:
440 case NAND_CMD_ERASE2:
441 nand_write_command(command, 0, 0);
443 case NAND_CMD_ERASE1:
444 nand_write_command(command, ((page_addr & 0xFFFFFF00) << 1) | (page_addr & 0XFF), 1);
447 nand_write_command(command, (page_addr << this->page_shift) | column, 1);
451 static void omap_nand_command_lp(struct mtd_info *mtd, unsigned command, int column, int page_addr)
453 struct nand_chip *this = mtd->priv;
455 if (command == NAND_CMD_READOOB) {
456 column += mtd->writesize;
457 command = NAND_CMD_READ0;
461 case NAND_CMD_PAGEPROG:
462 case NAND_CMD_STATUS:
463 case NAND_CMD_ERASE2:
464 nand_write_command(command, 0, 0);
466 case NAND_CMD_ERASE1:
467 nand_write_command(command, page_addr << this->page_shift >> 11, 1);
470 nand_write_command(command, (page_addr << 16) | column, 1);
472 if (command == NAND_CMD_READ0)
473 nand_write_command(NAND_CMD_READSTART, 0, 0);
477 * Generate non-inverted ECC bytes.
479 * Using noninverted ECC can be considered ugly since writing a blank
480 * page ie. padding will clear the ECC bytes. This is no problem as long
481 * nobody is trying to write data on the seemingly unused page.
483 * Reading an erased page will produce an ECC mismatch between
484 * generated and read ECC bytes that has to be dealt with separately.
486 static int omap_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
491 struct nand_chip *this = mtd->priv;
493 /* Ex NAND_ECC_HW12_2048 */
494 if ((this->ecc.mode == NAND_ECC_HW) && (this->ecc.size == 2048))
500 l = nand_read_reg(reg);
501 *ecc_code++ = l; // P128e, ..., P1e
502 *ecc_code++ = l >> 16; // P128o, ..., P1o
503 // P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e
504 *ecc_code++ = ((l >> 8) & 0x0f) | ((l >> 20) & 0xf0);
511 * This function will generate true ECC value, which can be used
512 * when correcting data read from NAND flash memory core
514 static void gen_true_ecc(u8 *ecc_buf)
516 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
518 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp) );
519 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
520 ecc_buf[2] = ~( P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | P1e(tmp) | P2048o(tmp) | P2048e(tmp));
524 * This function compares two ECC's and indicates if there is an error.
525 * If the error can be corrected it will be corrected to the buffer
527 static int omap_nand_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
528 u8 *ecc_data2, /* read from register */
532 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
533 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
540 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
542 gen_true_ecc(ecc_data1);
543 gen_true_ecc(ecc_data2);
545 for (i = 0; i <= 2; i++) {
546 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
547 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
550 for (i = 0; i < 8; i++) {
551 tmp0_bit[i] = *ecc_data1 % 2;
552 *ecc_data1 = *ecc_data1 / 2;
555 for (i = 0; i < 8; i++) {
556 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
557 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
560 for (i = 0; i < 8; i++) {
561 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
562 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
565 for (i = 0; i < 8; i++) {
566 comp0_bit[i] = *ecc_data2 % 2;
567 *ecc_data2 = *ecc_data2 / 2;
570 for (i = 0; i < 8; i++) {
571 comp1_bit[i] = *(ecc_data2 + 1) % 2;
572 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
575 for (i = 0; i < 8; i++) {
576 comp2_bit[i] = *(ecc_data2 + 2) % 2;
577 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
580 for (i = 0; i< 6; i++ )
581 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
583 for (i = 0; i < 8; i++)
584 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
586 for (i = 0; i < 8; i++)
587 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
589 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
590 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
592 for (i = 0; i < 24; i++)
593 ecc_sum += ecc_bit[i];
597 /* Not reached because this function is not called if
598 ECC values are equal */
602 /* Uncorrectable error */
603 DEBUG (MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
607 /* Correctable error */
608 find_byte = (ecc_bit[23] << 8) +
618 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
620 DEBUG (MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at offset: %d, bit: %d\n", find_byte, find_bit);
622 page_data[find_byte] ^= (1 << find_bit);
627 if (ecc_data2[0] == 0 && ecc_data2[1] == 0 && ecc_data2[2] == 0)
630 DEBUG (MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
635 static int omap_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
637 struct nand_chip *this;
638 int block_count = 0, i, r;
641 /* Ex NAND_ECC_HW12_2048 */
642 if ((this->ecc.mode == NAND_ECC_HW) && (this->ecc.size == 2048))
646 for (i = 0; i < block_count; i++) {
647 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
648 r = omap_nand_compare_ecc(read_ecc, calc_ecc, dat);
659 static void omap_nand_enable_hwecc(struct mtd_info *mtd, int mode)
661 nand_write_reg(NND_RESET, 0x01);
664 #ifdef CONFIG_MTD_CMDLINE_PARTS
666 extern int mtdpart_setup(char *);
668 static int __init add_dynamic_parts(struct mtd_info *mtd)
670 static const char *part_parsers[] = { "cmdlinepart", NULL };
671 struct mtd_partition *parts;
672 const struct omap_flash_part_str_config *cfg;
673 char *part_str = NULL;
677 cfg = omap_get_var_config(OMAP_TAG_FLASH_PART_STR, &part_str_len);
679 part_str = kmalloc(part_str_len + 1, GFP_KERNEL);
680 if (part_str == NULL)
682 memcpy(part_str, cfg->part_table, part_str_len);
683 part_str[part_str_len] = '\0';
684 mtdpart_setup(part_str);
686 c = parse_mtd_partitions(omap_mtd, part_parsers, &parts, 0);
687 if (part_str != NULL) {
694 add_mtd_partitions(mtd, parts, c);
701 static inline int add_dynamic_parts(struct mtd_info *mtd)
708 static inline int calc_psc(int ns, int cycle_ps)
710 return (ns * 1000 + (cycle_ps - 1)) / cycle_ps;
713 static void set_psc_regs(int psc_ns, int psc1_ns, int psc2_ns)
716 unsigned long rate, ps;
718 rate = clk_get_rate(omap_nand_clk);
719 ps = 1000000000 / (rate / 1000);
720 psc[0] = calc_psc(psc_ns, ps);
721 psc[1] = calc_psc(psc1_ns, ps);
722 psc[2] = calc_psc(psc2_ns, ps);
723 for (i = 0; i < 3; i++) {
726 else if (psc[i] > 256)
729 nand_write_reg(NND_PSC_CLK, psc[0] - 1);
730 nand_write_reg(NND_PSC1_CLK, psc[1] - 1);
731 nand_write_reg(NND_PSC2_CLK, psc[2] - 1);
732 printk(KERN_INFO "omap-hw-nand: using PSC values %d, %d, %d\n", psc[0], psc[1], psc[2]);
736 * Main initialization routine
738 static int __init omap_nand_init(void)
740 struct nand_chip *this;
744 omap_nand_clk = clk_get(NULL, "armper_ck");
745 BUG_ON(omap_nand_clk == NULL);
746 clk_enable(omap_nand_clk);
748 l = nand_read_reg(NND_REVISION);
749 printk(KERN_INFO "omap-hw-nand: OMAP NAND Controller rev. %d.%d\n", l>>4, l & 0xf);
751 /* Reset the NAND Controller */
752 nand_write_reg(NND_SYSCFG, 0x02);
753 while ((nand_read_reg(NND_SYSSTATUS) & 0x01) == 0);
755 /* No Prefetch, no postwrite, write prot & enable pairs disabled,
756 addres counter set to send 4 byte addresses to flash,
757 A8 is set not to be sent to flash (erase addre needs formatting),
758 choose little endian, enable 512 byte ECC logic,
760 nand_write_reg(NND_CTRL, 0xFF01);
762 /* Allocate memory for MTD device structure and private data */
763 omap_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
765 printk(KERN_WARNING "omap-hw-nand: Unable to allocate OMAP NAND MTD device structure.\n");
770 err = omap_request_dma(OMAP_DMA_NAND, "NAND", nand_dma_cb,
771 &omap_nand_dma_comp, &omap_nand_dma_ch);
773 printk(KERN_WARNING "omap-hw-nand: Unable to reserve DMA channel\n");
774 omap_nand_dma_ch = -1;
777 omap_nand_dma_ch = -1;
779 /* Get pointer to private data */
780 this = (struct nand_chip *) (&omap_mtd[1]);
782 /* Initialize structures */
783 memset((char *) omap_mtd, 0, sizeof(struct mtd_info));
784 memset((char *) this, 0, sizeof(struct nand_chip));
786 /* Link the private data with the MTD structure */
787 omap_mtd->priv = this;
788 omap_mtd->name = "omap-nand";
790 this->options = NAND_SKIP_BBTSCAN;
792 /* Used from chip select and nand_command() */
793 this->read_byte = omap_nand_read_byte;
795 this->select_chip = omap_nand_select_chip;
796 this->dev_ready = omap_nand_dev_ready;
797 this->chip_delay = 0;
798 this->ecc.mode = NAND_ECC_HW;
800 this->ecc.size = 512;
801 this->cmdfunc = omap_nand_command;
802 this->write_buf = omap_nand_write_buf;
803 this->read_buf = omap_nand_read_buf;
804 this->verify_buf = omap_nand_verify_buf;
805 this->ecc.calculate = omap_nand_calculate_ecc;
806 this->ecc.correct = omap_nand_correct_data;
807 this->ecc.hwctl = omap_nand_enable_hwecc;
809 nand_write_reg(NND_SYSCFG, 0x1); /* Enable auto idle */
810 nand_write_reg(NND_PSC_CLK, 10);
811 /* Scan to find existance of the device */
812 if (nand_scan(omap_mtd, 1)) {
817 set_psc_regs(25, 15, 35);
818 if (this->page_shift == 11) {
819 this->cmdfunc = omap_nand_command_lp;
820 l = nand_read_reg(NND_CTRL);
821 l |= 1 << 4; /* Set the A8 bit in CTRL reg */
822 nand_write_reg(NND_CTRL, l);
823 this->ecc.mode = NAND_ECC_HW;
825 this->ecc.size = 2048;
826 this->ecc.bytes = 12;
827 omap_mtd->eccsize = 2048;
828 nand_write_reg(NND_ECC_SELECT, 6);
831 /* We have to do bbt scanning ourselves */
832 if (this->scan_bbt (omap_mtd)) {
837 err = add_dynamic_parts(omap_mtd);
839 printk(KERN_ERR "omap-hw-nand: no partitions defined\n");
841 nand_release(omap_mtd);
847 if (omap_nand_dma_ch >= 0)
848 omap_free_dma(omap_nand_dma_ch);
851 clk_put(omap_nand_clk);
855 module_init(omap_nand_init);
860 static void __exit omap_nand_cleanup (void)
862 clk_disable(omap_nand_clk);
863 clk_put(omap_nand_clk);
864 nand_release(omap_mtd);
868 module_exit(omap_nand_cleanup);