2 * Common Flash Interface support:
3 * Generic utility functions not dependant on command set
5 * Copyright (C) 2002 Red Hat
6 * Copyright (C) 2003 STMicroelectronics Limited
8 * This code is covered by the GPL.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
15 #include <asm/byteorder.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25 #include <linux/mtd/compatmac.h>
27 int __xipram qry_present(struct map_info *map, __u32 base,
28 struct cfi_private *cfi)
30 int osf = cfi->interleave * cfi->device_type; /* scale factor */
34 qry[0] = cfi_build_cmd('Q', map, cfi);
35 qry[1] = cfi_build_cmd('R', map, cfi);
36 qry[2] = cfi_build_cmd('Y', map, cfi);
38 val[0] = map_read(map, base + osf*0x10);
39 val[1] = map_read(map, base + osf*0x11);
40 val[2] = map_read(map, base + osf*0x12);
42 if (!map_word_equal(map, qry[0], val[0]))
45 if (!map_word_equal(map, qry[1], val[1]))
48 if (!map_word_equal(map, qry[2], val[2]))
51 return 1; /* "QRY" found */
54 int __xipram qry_mode_on(uint32_t base, struct map_info *map,
55 struct cfi_private *cfi)
57 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
58 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
59 if (qry_present(map, base, cfi))
61 /* QRY not found probably we deal with some odd CFI chips */
62 /* Some revisions of some old Intel chips? */
63 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
64 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
65 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
66 if (qry_present(map, base, cfi))
69 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
70 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
71 if (qry_present(map, base, cfi))
76 void __xipram qry_mode_off(uint32_t base, struct map_info *map,
77 struct cfi_private *cfi)
79 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
80 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
84 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
86 struct cfi_private *cfi = map->fldrv_priv;
87 __u32 base = 0; // cfi->chips[0].start;
88 int ofs_factor = cfi->interleave * cfi->device_type;
90 struct cfi_extquery *extp = NULL;
92 printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
96 extp = kmalloc(size, GFP_KERNEL);
98 printk(KERN_ERR "Failed to allocate memory\n");
102 #ifdef CONFIG_MTD_XIP
106 /* Switch it into Query Mode */
107 qry_mode_on(base, map, cfi);
108 /* Read in the Extended Query Table */
109 for (i=0; i<size; i++) {
110 ((unsigned char *)extp)[i] =
111 cfi_read_query(map, base+((adr+i)*ofs_factor));
114 /* Make sure it returns to read mode */
115 qry_mode_off(base, map, cfi);
117 #ifdef CONFIG_MTD_XIP
118 (void) map_read(map, base);
126 EXPORT_SYMBOL(cfi_read_pri);
128 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
130 struct map_info *map = mtd->priv;
131 struct cfi_private *cfi = map->fldrv_priv;
134 for (f=fixups; f->fixup; f++) {
135 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
136 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
137 f->fixup(mtd, f->param);
142 EXPORT_SYMBOL(cfi_fixup);
144 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
145 loff_t ofs, size_t len, void *thunk)
147 struct map_info *map = mtd->priv;
148 struct cfi_private *cfi = map->fldrv_priv;
150 int chipnum, ret = 0;
152 struct mtd_erase_region_info *regions = mtd->eraseregions;
157 if ((len + ofs) > mtd->size)
160 /* Check that both start and end of the requested erase are
161 * aligned with the erasesize at the appropriate addresses.
166 /* Skip all erase regions which are ended before the start of
167 the requested erase. Actually, to save on the calculations,
168 we skip to the first erase region which starts after the
169 start of the requested erase, and then go back one.
172 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
176 /* OK, now i is pointing at the erase region in which this
177 erase request starts. Check the start of the requested
178 erase range is aligned with the erase size which is in
182 if (ofs & (regions[i].erasesize-1))
185 /* Remember the erase region we start on */
188 /* Next, check that the end of the requested erase is aligned
189 * with the erase region at that address.
192 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
195 /* As before, drop back one to point at the region in which
196 the address actually falls
200 if ((ofs + len) & (regions[i].erasesize-1))
203 chipnum = ofs >> cfi->chipshift;
204 adr = ofs - (chipnum << cfi->chipshift);
209 int size = regions[i].erasesize;
211 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
220 if (ofs == regions[i].offset + size * regions[i].numblocks)
223 if (adr >> cfi->chipshift) {
227 if (chipnum >= cfi->numchips)
235 EXPORT_SYMBOL(cfi_varsize_frob);
237 MODULE_LICENSE("GPL");