]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
c655e971c15864019a09258ba719048df42a8a88
[linux-2.6-omap-h63xx.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  *      - auto unlock sectors on resume for auto locking flash on power up
20  */
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47
48 #define MANUFACTURER_INTEL      0x0089
49 #define I82802AB        0x00ad
50 #define I82802AC        0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 #ifdef CONFIG_MTD_OTP
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68                                             struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70                                             struct otp_info *, size_t);
71 #endif
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
75
76 static void cfi_intelext_destroy(struct mtd_info *);
77
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
79
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
82
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84                      size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86                         size_t len);
87
88 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
90 #include "fwh_lock.h"
91
92
93
94 /*
95  *  *********** SETUP AND PROBE BITS  ***********
96  */
97
98 static struct mtd_chip_driver cfi_intelext_chipdrv = {
99         .probe          = NULL, /* Not usable directly */
100         .destroy        = cfi_intelext_destroy,
101         .name           = "cfi_cmdset_0001",
102         .module         = THIS_MODULE
103 };
104
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
107
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext *extp)
110 {
111         int i;
112         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
113         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
114         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
115         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
116         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
117         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
118         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
119         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
120         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
121         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
122         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
123         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
124         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
125         for (i=11; i<32; i++) {
126                 if (extp->FeatureSupport & (1<<i))
127                         printk("     - Unknown Bit %X:      supported\n", i);
128         }
129
130         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
131         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
132         for (i=1; i<8; i++) {
133                 if (extp->SuspendCmdSupport & (1<<i))
134                         printk("     - Unknown Bit %X:               supported\n", i);
135         }
136
137         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
138         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
139         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
140         for (i=2; i<3; i++) {
141                 if (extp->BlkStatusRegMask & (1<<i))
142                         printk("     - Unknown Bit %X Active: yes\n",i);
143         }
144         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
145         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
146         for (i=6; i<16; i++) {
147                 if (extp->BlkStatusRegMask & (1<<i))
148                         printk("     - Unknown Bit %X Active: yes\n",i);
149         }
150
151         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
153         if (extp->VppOptimal)
154                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
156 }
157 #endif
158
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
166
167         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
168                             "erase on write disabled.\n");
169         extp->SuspendCmdSupport &= ~1;
170 }
171 #endif
172
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
175 {
176         struct map_info *map = mtd->priv;
177         struct cfi_private *cfi = map->fldrv_priv;
178         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
179
180         if (cfip && (cfip->FeatureSupport&4)) {
181                 cfip->FeatureSupport &= ~4;
182                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
183         }
184 }
185 #endif
186
187 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
188 {
189         struct map_info *map = mtd->priv;
190         struct cfi_private *cfi = map->fldrv_priv;
191
192         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
193         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
194 }
195
196 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
197 {
198         struct map_info *map = mtd->priv;
199         struct cfi_private *cfi = map->fldrv_priv;
200
201         /* Note this is done after the region info is endian swapped */
202         cfi->cfiq->EraseRegionInfo[1] =
203                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
204 };
205
206 static void fixup_use_point(struct mtd_info *mtd, void *param)
207 {
208         struct map_info *map = mtd->priv;
209         if (!mtd->point && map_is_linear(map)) {
210                 mtd->point   = cfi_intelext_point;
211                 mtd->unpoint = cfi_intelext_unpoint;
212         }
213 }
214
215 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
216 {
217         struct map_info *map = mtd->priv;
218         struct cfi_private *cfi = map->fldrv_priv;
219         if (cfi->cfiq->BufWriteTimeoutTyp) {
220                 printk(KERN_INFO "Using buffer write method\n" );
221                 mtd->write = cfi_intelext_write_buffers;
222                 mtd->writev = cfi_intelext_writev;
223         }
224 }
225
226 /*
227  * Some chips power-up with all sectors locked by default.
228  */
229 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
230 {
231         printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232         mtd->flags |= MTD_STUPID_LOCK;
233 }
234
235 static struct cfi_fixup cfi_fixup_table[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
238 #endif
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
241 #endif
242 #if !FORCE_WORD_WRITE
243         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
244 #endif
245         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
246         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247         { MANUFACTURER_INTEL, 0x891c,         fixup_use_powerup_lock, NULL, },
248         { 0, 0, NULL, NULL }
249 };
250
251 static struct cfi_fixup jedec_fixup_table[] = {
252         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
253         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
254         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
255         { 0, 0, NULL, NULL }
256 };
257 static struct cfi_fixup fixup_table[] = {
258         /* The CFI vendor ids and the JEDEC vendor IDs appear
259          * to be common.  It is like the devices id's are as
260          * well.  This table is to pick all cases where
261          * we know that is the case.
262          */
263         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
264         { 0, 0, NULL, NULL }
265 };
266
267 static inline struct cfi_pri_intelext *
268 read_pri_intelext(struct map_info *map, __u16 adr)
269 {
270         struct cfi_pri_intelext *extp;
271         unsigned int extp_size = sizeof(*extp);
272
273  again:
274         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
275         if (!extp)
276                 return NULL;
277
278         if (extp->MajorVersion != '1' ||
279             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
280                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
281                        "version %c.%c.\n",  extp->MajorVersion,
282                        extp->MinorVersion);
283                 kfree(extp);
284                 return NULL;
285         }
286
287         /* Do some byteswapping if necessary */
288         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
289         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
290         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
291
292         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
293                 unsigned int extra_size = 0;
294                 int nb_parts, i;
295
296                 /* Protection Register info */
297                 extra_size += (extp->NumProtectionFields - 1) *
298                               sizeof(struct cfi_intelext_otpinfo);
299
300                 /* Burst Read info */
301                 extra_size += 2;
302                 if (extp_size < sizeof(*extp) + extra_size)
303                         goto need_more;
304                 extra_size += extp->extra[extra_size-1];
305
306                 /* Number of hardware-partitions */
307                 extra_size += 1;
308                 if (extp_size < sizeof(*extp) + extra_size)
309                         goto need_more;
310                 nb_parts = extp->extra[extra_size - 1];
311
312                 /* skip the sizeof(partregion) field in CFI 1.4 */
313                 if (extp->MinorVersion >= '4')
314                         extra_size += 2;
315
316                 for (i = 0; i < nb_parts; i++) {
317                         struct cfi_intelext_regioninfo *rinfo;
318                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
319                         extra_size += sizeof(*rinfo);
320                         if (extp_size < sizeof(*extp) + extra_size)
321                                 goto need_more;
322                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
323                         extra_size += (rinfo->NumBlockTypes - 1)
324                                       * sizeof(struct cfi_intelext_blockinfo);
325                 }
326
327                 if (extp->MinorVersion >= '4')
328                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
329
330                 if (extp_size < sizeof(*extp) + extra_size) {
331                         need_more:
332                         extp_size = sizeof(*extp) + extra_size;
333                         kfree(extp);
334                         if (extp_size > 4096) {
335                                 printk(KERN_ERR
336                                         "%s: cfi_pri_intelext is too fat\n",
337                                         __FUNCTION__);
338                                 return NULL;
339                         }
340                         goto again;
341                 }
342         }
343
344         return extp;
345 }
346
347 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
348 {
349         struct cfi_private *cfi = map->fldrv_priv;
350         struct mtd_info *mtd;
351         int i;
352
353         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
354         if (!mtd) {
355                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
356                 return NULL;
357         }
358         mtd->priv = map;
359         mtd->type = MTD_NORFLASH;
360
361         /* Fill in the default mtd operations */
362         mtd->erase   = cfi_intelext_erase_varsize;
363         mtd->read    = cfi_intelext_read;
364         mtd->write   = cfi_intelext_write_words;
365         mtd->sync    = cfi_intelext_sync;
366         mtd->lock    = cfi_intelext_lock;
367         mtd->unlock  = cfi_intelext_unlock;
368         mtd->suspend = cfi_intelext_suspend;
369         mtd->resume  = cfi_intelext_resume;
370         mtd->flags   = MTD_CAP_NORFLASH;
371         mtd->name    = map->name;
372         mtd->writesize = 1;
373
374         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
375
376         if (cfi->cfi_mode == CFI_MODE_CFI) {
377                 /*
378                  * It's a real CFI chip, not one for which the probe
379                  * routine faked a CFI structure. So we read the feature
380                  * table from it.
381                  */
382                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
383                 struct cfi_pri_intelext *extp;
384
385                 extp = read_pri_intelext(map, adr);
386                 if (!extp) {
387                         kfree(mtd);
388                         return NULL;
389                 }
390
391                 /* Install our own private info structure */
392                 cfi->cmdset_priv = extp;
393
394                 cfi_fixup(mtd, cfi_fixup_table);
395
396 #ifdef DEBUG_CFI_FEATURES
397                 /* Tell the user about it in lots of lovely detail */
398                 cfi_tell_features(extp);
399 #endif
400
401                 if(extp->SuspendCmdSupport & 1) {
402                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
403                 }
404         }
405         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
406                 /* Apply jedec specific fixups */
407                 cfi_fixup(mtd, jedec_fixup_table);
408         }
409         /* Apply generic fixups */
410         cfi_fixup(mtd, fixup_table);
411
412         for (i=0; i< cfi->numchips; i++) {
413                 if (cfi->cfiq->WordWriteTimeoutTyp)
414                         cfi->chips[i].word_write_time =
415                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
416                 else
417                         cfi->chips[i].word_write_time = 50000;
418
419                 if (cfi->cfiq->BufWriteTimeoutTyp)
420                         cfi->chips[i].buffer_write_time =
421                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
422                 /* No default; if it isn't specified, we won't use it */
423
424                 if (cfi->cfiq->BlockEraseTimeoutTyp)
425                         cfi->chips[i].erase_time =
426                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
427                 else
428                         cfi->chips[i].erase_time = 2000000;
429
430                 cfi->chips[i].ref_point_counter = 0;
431                 init_waitqueue_head(&(cfi->chips[i].wq));
432         }
433
434         map->fldrv = &cfi_intelext_chipdrv;
435
436         return cfi_intelext_setup(mtd);
437 }
438 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
443
444 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
445 {
446         struct map_info *map = mtd->priv;
447         struct cfi_private *cfi = map->fldrv_priv;
448         unsigned long offset = 0;
449         int i,j;
450         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
451
452         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
453
454         mtd->size = devsize * cfi->numchips;
455
456         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458                         * mtd->numeraseregions, GFP_KERNEL);
459         if (!mtd->eraseregions) {
460                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
461                 goto setup_err;
462         }
463
464         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465                 unsigned long ernum, ersize;
466                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
468
469                 if (mtd->erasesize < ersize) {
470                         mtd->erasesize = ersize;
471                 }
472                 for (j=0; j<cfi->numchips; j++) {
473                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
477                 }
478                 offset += (ersize * ernum);
479         }
480
481         if (offset != devsize) {
482                 /* Argh */
483                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
484                 goto setup_err;
485         }
486
487         for (i=0; i<mtd->numeraseregions;i++){
488                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489                        i,mtd->eraseregions[i].offset,
490                        mtd->eraseregions[i].erasesize,
491                        mtd->eraseregions[i].numblocks);
492         }
493
494 #ifdef CONFIG_MTD_OTP
495         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
496         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
497         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
498         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
499         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
500         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
501 #endif
502
503         /* This function has the potential to distort the reality
504            a bit and therefore should be called last. */
505         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
506                 goto setup_err;
507
508         __module_get(THIS_MODULE);
509         register_reboot_notifier(&mtd->reboot_notifier);
510         return mtd;
511
512  setup_err:
513         if(mtd) {
514                 kfree(mtd->eraseregions);
515                 kfree(mtd);
516         }
517         kfree(cfi->cmdset_priv);
518         return NULL;
519 }
520
521 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
522                                         struct cfi_private **pcfi)
523 {
524         struct map_info *map = mtd->priv;
525         struct cfi_private *cfi = *pcfi;
526         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
527
528         /*
529          * Probing of multi-partition flash chips.
530          *
531          * To support multiple partitions when available, we simply arrange
532          * for each of them to have their own flchip structure even if they
533          * are on the same physical chip.  This means completely recreating
534          * a new cfi_private structure right here which is a blatent code
535          * layering violation, but this is still the least intrusive
536          * arrangement at this point. This can be rearranged in the future
537          * if someone feels motivated enough.  --nico
538          */
539         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
540             && extp->FeatureSupport & (1 << 9)) {
541                 struct cfi_private *newcfi;
542                 struct flchip *chip;
543                 struct flchip_shared *shared;
544                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
545
546                 /* Protection Register info */
547                 offs = (extp->NumProtectionFields - 1) *
548                        sizeof(struct cfi_intelext_otpinfo);
549
550                 /* Burst Read info */
551                 offs += extp->extra[offs+1]+2;
552
553                 /* Number of partition regions */
554                 numregions = extp->extra[offs];
555                 offs += 1;
556
557                 /* skip the sizeof(partregion) field in CFI 1.4 */
558                 if (extp->MinorVersion >= '4')
559                         offs += 2;
560
561                 /* Number of hardware partitions */
562                 numparts = 0;
563                 for (i = 0; i < numregions; i++) {
564                         struct cfi_intelext_regioninfo *rinfo;
565                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
566                         numparts += rinfo->NumIdentPartitions;
567                         offs += sizeof(*rinfo)
568                                 + (rinfo->NumBlockTypes - 1) *
569                                   sizeof(struct cfi_intelext_blockinfo);
570                 }
571
572                 /* Programming Region info */
573                 if (extp->MinorVersion >= '4') {
574                         struct cfi_intelext_programming_regioninfo *prinfo;
575                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
576                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
577                         mtd->flags &= ~MTD_BIT_WRITEABLE;
578                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579                                map->name, mtd->writesize,
580                                cfi->interleave * prinfo->ControlValid,
581                                cfi->interleave * prinfo->ControlInvalid);
582                 }
583
584                 /*
585                  * All functions below currently rely on all chips having
586                  * the same geometry so we'll just assume that all hardware
587                  * partitions are of the same size too.
588                  */
589                 partshift = cfi->chipshift - __ffs(numparts);
590
591                 if ((1 << partshift) < mtd->erasesize) {
592                         printk( KERN_ERR
593                                 "%s: bad number of hw partitions (%d)\n",
594                                 __FUNCTION__, numparts);
595                         return -EINVAL;
596                 }
597
598                 numvirtchips = cfi->numchips * numparts;
599                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
600                 if (!newcfi)
601                         return -ENOMEM;
602                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
603                 if (!shared) {
604                         kfree(newcfi);
605                         return -ENOMEM;
606                 }
607                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
608                 newcfi->numchips = numvirtchips;
609                 newcfi->chipshift = partshift;
610
611                 chip = &newcfi->chips[0];
612                 for (i = 0; i < cfi->numchips; i++) {
613                         shared[i].writing = shared[i].erasing = NULL;
614                         spin_lock_init(&shared[i].lock);
615                         for (j = 0; j < numparts; j++) {
616                                 *chip = cfi->chips[i];
617                                 chip->start += j << partshift;
618                                 chip->priv = &shared[i];
619                                 /* those should be reset too since
620                                    they create memory references. */
621                                 init_waitqueue_head(&chip->wq);
622                                 spin_lock_init(&chip->_spinlock);
623                                 chip->mutex = &chip->_spinlock;
624                                 chip++;
625                         }
626                 }
627
628                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
629                                   "--> %d partitions of %d KiB\n",
630                                   map->name, cfi->numchips, cfi->interleave,
631                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
632
633                 map->fldrv_priv = newcfi;
634                 *pcfi = newcfi;
635                 kfree(cfi);
636         }
637
638         return 0;
639 }
640
641 /*
642  *  *********** CHIP ACCESS FUNCTIONS ***********
643  */
644
645 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
646 {
647         DECLARE_WAITQUEUE(wait, current);
648         struct cfi_private *cfi = map->fldrv_priv;
649         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
650         unsigned long timeo;
651         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
652
653  resettime:
654         timeo = jiffies + HZ;
655  retry:
656         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
657                 /*
658                  * OK. We have possibility for contension on the write/erase
659                  * operations which are global to the real chip and not per
660                  * partition.  So let's fight it over in the partition which
661                  * currently has authority on the operation.
662                  *
663                  * The rules are as follows:
664                  *
665                  * - any write operation must own shared->writing.
666                  *
667                  * - any erase operation must own _both_ shared->writing and
668                  *   shared->erasing.
669                  *
670                  * - contension arbitration is handled in the owner's context.
671                  *
672                  * The 'shared' struct can be read and/or written only when
673                  * its lock is taken.
674                  */
675                 struct flchip_shared *shared = chip->priv;
676                 struct flchip *contender;
677                 spin_lock(&shared->lock);
678                 contender = shared->writing;
679                 if (contender && contender != chip) {
680                         /*
681                          * The engine to perform desired operation on this
682                          * partition is already in use by someone else.
683                          * Let's fight over it in the context of the chip
684                          * currently using it.  If it is possible to suspend,
685                          * that other partition will do just that, otherwise
686                          * it'll happily send us to sleep.  In any case, when
687                          * get_chip returns success we're clear to go ahead.
688                          */
689                         int ret = spin_trylock(contender->mutex);
690                         spin_unlock(&shared->lock);
691                         if (!ret)
692                                 goto retry;
693                         spin_unlock(chip->mutex);
694                         ret = get_chip(map, contender, contender->start, mode);
695                         spin_lock(chip->mutex);
696                         if (ret) {
697                                 spin_unlock(contender->mutex);
698                                 return ret;
699                         }
700                         timeo = jiffies + HZ;
701                         spin_lock(&shared->lock);
702                         spin_unlock(contender->mutex);
703                 }
704
705                 /* We now own it */
706                 shared->writing = chip;
707                 if (mode == FL_ERASING)
708                         shared->erasing = chip;
709                 spin_unlock(&shared->lock);
710         }
711
712         switch (chip->state) {
713
714         case FL_STATUS:
715                 for (;;) {
716                         status = map_read(map, adr);
717                         if (map_word_andequal(map, status, status_OK, status_OK))
718                                 break;
719
720                         /* At this point we're fine with write operations
721                            in other partitions as they don't conflict. */
722                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
723                                 break;
724
725                         if (time_after(jiffies, timeo)) {
726                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727                                        map->name, status.x[0]);
728                                 return -EIO;
729                         }
730                         spin_unlock(chip->mutex);
731                         cfi_udelay(1);
732                         spin_lock(chip->mutex);
733                         /* Someone else might have been playing with it. */
734                         goto retry;
735                 }
736
737         case FL_READY:
738         case FL_CFI_QUERY:
739         case FL_JEDEC_QUERY:
740                 return 0;
741
742         case FL_ERASING:
743                 if (!cfip ||
744                     !(cfip->FeatureSupport & 2) ||
745                     !(mode == FL_READY || mode == FL_POINT ||
746                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
747                         goto sleep;
748
749
750                 /* Erase suspend */
751                 map_write(map, CMD(0xB0), adr);
752
753                 /* If the flash has finished erasing, then 'erase suspend'
754                  * appears to make some (28F320) flash devices switch to
755                  * 'read' mode.  Make sure that we switch to 'read status'
756                  * mode so we get the right data. --rmk
757                  */
758                 map_write(map, CMD(0x70), adr);
759                 chip->oldstate = FL_ERASING;
760                 chip->state = FL_ERASE_SUSPENDING;
761                 chip->erase_suspended = 1;
762                 for (;;) {
763                         status = map_read(map, adr);
764                         if (map_word_andequal(map, status, status_OK, status_OK))
765                                 break;
766
767                         if (time_after(jiffies, timeo)) {
768                                 /* Urgh. Resume and pretend we weren't here.  */
769                                 map_write(map, CMD(0xd0), adr);
770                                 /* Make sure we're in 'read status' mode if it had finished */
771                                 map_write(map, CMD(0x70), adr);
772                                 chip->state = FL_ERASING;
773                                 chip->oldstate = FL_READY;
774                                 printk(KERN_ERR "%s: Chip not ready after erase "
775                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
776                                 return -EIO;
777                         }
778
779                         spin_unlock(chip->mutex);
780                         cfi_udelay(1);
781                         spin_lock(chip->mutex);
782                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
783                            So we can just loop here. */
784                 }
785                 chip->state = FL_STATUS;
786                 return 0;
787
788         case FL_XIP_WHILE_ERASING:
789                 if (mode != FL_READY && mode != FL_POINT &&
790                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
791                         goto sleep;
792                 chip->oldstate = chip->state;
793                 chip->state = FL_READY;
794                 return 0;
795
796         case FL_POINT:
797                 /* Only if there's no operation suspended... */
798                 if (mode == FL_READY && chip->oldstate == FL_READY)
799                         return 0;
800
801         default:
802         sleep:
803                 set_current_state(TASK_UNINTERRUPTIBLE);
804                 add_wait_queue(&chip->wq, &wait);
805                 spin_unlock(chip->mutex);
806                 schedule();
807                 remove_wait_queue(&chip->wq, &wait);
808                 spin_lock(chip->mutex);
809                 goto resettime;
810         }
811 }
812
813 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
814 {
815         struct cfi_private *cfi = map->fldrv_priv;
816
817         if (chip->priv) {
818                 struct flchip_shared *shared = chip->priv;
819                 spin_lock(&shared->lock);
820                 if (shared->writing == chip && chip->oldstate == FL_READY) {
821                         /* We own the ability to write, but we're done */
822                         shared->writing = shared->erasing;
823                         if (shared->writing && shared->writing != chip) {
824                                 /* give back ownership to who we loaned it from */
825                                 struct flchip *loaner = shared->writing;
826                                 spin_lock(loaner->mutex);
827                                 spin_unlock(&shared->lock);
828                                 spin_unlock(chip->mutex);
829                                 put_chip(map, loaner, loaner->start);
830                                 spin_lock(chip->mutex);
831                                 spin_unlock(loaner->mutex);
832                                 wake_up(&chip->wq);
833                                 return;
834                         }
835                         shared->erasing = NULL;
836                         shared->writing = NULL;
837                 } else if (shared->erasing == chip && shared->writing != chip) {
838                         /*
839                          * We own the ability to erase without the ability
840                          * to write, which means the erase was suspended
841                          * and some other partition is currently writing.
842                          * Don't let the switch below mess things up since
843                          * we don't have ownership to resume anything.
844                          */
845                         spin_unlock(&shared->lock);
846                         wake_up(&chip->wq);
847                         return;
848                 }
849                 spin_unlock(&shared->lock);
850         }
851
852         switch(chip->oldstate) {
853         case FL_ERASING:
854                 chip->state = chip->oldstate;
855                 /* What if one interleaved chip has finished and the
856                    other hasn't? The old code would leave the finished
857                    one in READY mode. That's bad, and caused -EROFS
858                    errors to be returned from do_erase_oneblock because
859                    that's the only bit it checked for at the time.
860                    As the state machine appears to explicitly allow
861                    sending the 0x70 (Read Status) command to an erasing
862                    chip and expecting it to be ignored, that's what we
863                    do. */
864                 map_write(map, CMD(0xd0), adr);
865                 map_write(map, CMD(0x70), adr);
866                 chip->oldstate = FL_READY;
867                 chip->state = FL_ERASING;
868                 break;
869
870         case FL_XIP_WHILE_ERASING:
871                 chip->state = chip->oldstate;
872                 chip->oldstate = FL_READY;
873                 break;
874
875         case FL_READY:
876         case FL_STATUS:
877         case FL_JEDEC_QUERY:
878                 /* We should really make set_vpp() count, rather than doing this */
879                 DISABLE_VPP(map);
880                 break;
881         default:
882                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
883         }
884         wake_up(&chip->wq);
885 }
886
887 #ifdef CONFIG_MTD_XIP
888
889 /*
890  * No interrupt what so ever can be serviced while the flash isn't in array
891  * mode.  This is ensured by the xip_disable() and xip_enable() functions
892  * enclosing any code path where the flash is known not to be in array mode.
893  * And within a XIP disabled code path, only functions marked with __xipram
894  * may be called and nothing else (it's a good thing to inspect generated
895  * assembly to make sure inline functions were actually inlined and that gcc
896  * didn't emit calls to its own support functions). Also configuring MTD CFI
897  * support to a single buswidth and a single interleave is also recommended.
898  */
899
900 static void xip_disable(struct map_info *map, struct flchip *chip,
901                         unsigned long adr)
902 {
903         /* TODO: chips with no XIP use should ignore and return */
904         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
905         local_irq_disable();
906 }
907
908 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
909                                 unsigned long adr)
910 {
911         struct cfi_private *cfi = map->fldrv_priv;
912         if (chip->state != FL_POINT && chip->state != FL_READY) {
913                 map_write(map, CMD(0xff), adr);
914                 chip->state = FL_READY;
915         }
916         (void) map_read(map, adr);
917         xip_iprefetch();
918         local_irq_enable();
919 }
920
921 /*
922  * When a delay is required for the flash operation to complete, the
923  * xip_wait_for_operation() function is polling for both the given timeout
924  * and pending (but still masked) hardware interrupts.  Whenever there is an
925  * interrupt pending then the flash erase or write operation is suspended,
926  * array mode restored and interrupts unmasked.  Task scheduling might also
927  * happen at that point.  The CPU eventually returns from the interrupt or
928  * the call to schedule() and the suspended flash operation is resumed for
929  * the remaining of the delay period.
930  *
931  * Warning: this function _will_ fool interrupt latency tracing tools.
932  */
933
934 static int __xipram xip_wait_for_operation(
935                 struct map_info *map, struct flchip *chip,
936                 unsigned long adr, unsigned int chip_op_time )
937 {
938         struct cfi_private *cfi = map->fldrv_priv;
939         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
940         map_word status, OK = CMD(0x80);
941         unsigned long usec, suspended, start, done;
942         flstate_t oldstate, newstate;
943
944         start = xip_currtime();
945         usec = chip_op_time * 8;
946         if (usec == 0)
947                 usec = 500000;
948         done = 0;
949
950         do {
951                 cpu_relax();
952                 if (xip_irqpending() && cfip &&
953                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
954                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
955                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
956                         /*
957                          * Let's suspend the erase or write operation when
958                          * supported.  Note that we currently don't try to
959                          * suspend interleaved chips if there is already
960                          * another operation suspended (imagine what happens
961                          * when one chip was already done with the current
962                          * operation while another chip suspended it, then
963                          * we resume the whole thing at once).  Yes, it
964                          * can happen!
965                          */
966                         usec -= done;
967                         map_write(map, CMD(0xb0), adr);
968                         map_write(map, CMD(0x70), adr);
969                         suspended = xip_currtime();
970                         do {
971                                 if (xip_elapsed_since(suspended) > 100000) {
972                                         /*
973                                          * The chip doesn't want to suspend
974                                          * after waiting for 100 msecs.
975                                          * This is a critical error but there
976                                          * is not much we can do here.
977                                          */
978                                         return -EIO;
979                                 }
980                                 status = map_read(map, adr);
981                         } while (!map_word_andequal(map, status, OK, OK));
982
983                         /* Suspend succeeded */
984                         oldstate = chip->state;
985                         if (oldstate == FL_ERASING) {
986                                 if (!map_word_bitsset(map, status, CMD(0x40)))
987                                         break;
988                                 newstate = FL_XIP_WHILE_ERASING;
989                                 chip->erase_suspended = 1;
990                         } else {
991                                 if (!map_word_bitsset(map, status, CMD(0x04)))
992                                         break;
993                                 newstate = FL_XIP_WHILE_WRITING;
994                                 chip->write_suspended = 1;
995                         }
996                         chip->state = newstate;
997                         map_write(map, CMD(0xff), adr);
998                         (void) map_read(map, adr);
999                         asm volatile (".rep 8; nop; .endr");
1000                         local_irq_enable();
1001                         spin_unlock(chip->mutex);
1002                         asm volatile (".rep 8; nop; .endr");
1003                         cond_resched();
1004
1005                         /*
1006                          * We're back.  However someone else might have
1007                          * decided to go write to the chip if we are in
1008                          * a suspended erase state.  If so let's wait
1009                          * until it's done.
1010                          */
1011                         spin_lock(chip->mutex);
1012                         while (chip->state != newstate) {
1013                                 DECLARE_WAITQUEUE(wait, current);
1014                                 set_current_state(TASK_UNINTERRUPTIBLE);
1015                                 add_wait_queue(&chip->wq, &wait);
1016                                 spin_unlock(chip->mutex);
1017                                 schedule();
1018                                 remove_wait_queue(&chip->wq, &wait);
1019                                 spin_lock(chip->mutex);
1020                         }
1021                         /* Disallow XIP again */
1022                         local_irq_disable();
1023
1024                         /* Resume the write or erase operation */
1025                         map_write(map, CMD(0xd0), adr);
1026                         map_write(map, CMD(0x70), adr);
1027                         chip->state = oldstate;
1028                         start = xip_currtime();
1029                 } else if (usec >= 1000000/HZ) {
1030                         /*
1031                          * Try to save on CPU power when waiting delay
1032                          * is at least a system timer tick period.
1033                          * No need to be extremely accurate here.
1034                          */
1035                         xip_cpu_idle();
1036                 }
1037                 status = map_read(map, adr);
1038                 done = xip_elapsed_since(start);
1039         } while (!map_word_andequal(map, status, OK, OK)
1040                  && done < usec);
1041
1042         return (done >= usec) ? -ETIME : 0;
1043 }
1044
1045 /*
1046  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1047  * the flash is actively programming or erasing since we have to poll for
1048  * the operation to complete anyway.  We can't do that in a generic way with
1049  * a XIP setup so do it before the actual flash operation in this case
1050  * and stub it out from INVAL_CACHE_AND_WAIT.
1051  */
1052 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1053         INVALIDATE_CACHED_RANGE(map, from, size)
1054
1055 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1056         xip_wait_for_operation(map, chip, cmd_adr, usec)
1057
1058 #else
1059
1060 #define xip_disable(map, chip, adr)
1061 #define xip_enable(map, chip, adr)
1062 #define XIP_INVAL_CACHED_RANGE(x...)
1063 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1064
1065 static int inval_cache_and_wait_for_operation(
1066                 struct map_info *map, struct flchip *chip,
1067                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1068                 unsigned int chip_op_time)
1069 {
1070         struct cfi_private *cfi = map->fldrv_priv;
1071         map_word status, status_OK = CMD(0x80);
1072         int chip_state = chip->state;
1073         unsigned int timeo, sleep_time;
1074
1075         spin_unlock(chip->mutex);
1076         if (inval_len)
1077                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1078         spin_lock(chip->mutex);
1079
1080         /* set our timeout to 8 times the expected delay */
1081         timeo = chip_op_time * 8;
1082         if (!timeo)
1083                 timeo = 500000;
1084         sleep_time = chip_op_time / 2;
1085
1086         for (;;) {
1087                 status = map_read(map, cmd_adr);
1088                 if (map_word_andequal(map, status, status_OK, status_OK))
1089                         break;
1090
1091                 if (!timeo) {
1092                         map_write(map, CMD(0x70), cmd_adr);
1093                         chip->state = FL_STATUS;
1094                         return -ETIME;
1095                 }
1096
1097                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1098                 spin_unlock(chip->mutex);
1099                 if (sleep_time >= 1000000/HZ) {
1100                         /*
1101                          * Half of the normal delay still remaining
1102                          * can be performed with a sleeping delay instead
1103                          * of busy waiting.
1104                          */
1105                         msleep(sleep_time/1000);
1106                         timeo -= sleep_time;
1107                         sleep_time = 1000000/HZ;
1108                 } else {
1109                         udelay(1);
1110                         cond_resched();
1111                         timeo--;
1112                 }
1113                 spin_lock(chip->mutex);
1114
1115                 while (chip->state != chip_state) {
1116                         /* Someone's suspended the operation: sleep */
1117                         DECLARE_WAITQUEUE(wait, current);
1118                         set_current_state(TASK_UNINTERRUPTIBLE);
1119                         add_wait_queue(&chip->wq, &wait);
1120                         spin_unlock(chip->mutex);
1121                         schedule();
1122                         remove_wait_queue(&chip->wq, &wait);
1123                         spin_lock(chip->mutex);
1124                 }
1125         }
1126
1127         /* Done and happy. */
1128         chip->state = FL_STATUS;
1129         return 0;
1130 }
1131
1132 #endif
1133
1134 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1135         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1136
1137
1138 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1139 {
1140         unsigned long cmd_addr;
1141         struct cfi_private *cfi = map->fldrv_priv;
1142         int ret = 0;
1143
1144         adr += chip->start;
1145
1146         /* Ensure cmd read/writes are aligned. */
1147         cmd_addr = adr & ~(map_bankwidth(map)-1);
1148
1149         spin_lock(chip->mutex);
1150
1151         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1152
1153         if (!ret) {
1154                 if (chip->state != FL_POINT && chip->state != FL_READY)
1155                         map_write(map, CMD(0xff), cmd_addr);
1156
1157                 chip->state = FL_POINT;
1158                 chip->ref_point_counter++;
1159         }
1160         spin_unlock(chip->mutex);
1161
1162         return ret;
1163 }
1164
1165 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1166 {
1167         struct map_info *map = mtd->priv;
1168         struct cfi_private *cfi = map->fldrv_priv;
1169         unsigned long ofs, last_end = 0;
1170         int chipnum;
1171         int ret = 0;
1172
1173         if (!map->virt || (from + len > mtd->size))
1174                 return -EINVAL;
1175
1176         /* Now lock the chip(s) to POINT state */
1177
1178         /* ofs: offset within the first chip that the first read should start */
1179         chipnum = (from >> cfi->chipshift);
1180         ofs = from - (chipnum << cfi->chipshift);
1181
1182         *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1183         *retlen = 0;
1184
1185         while (len) {
1186                 unsigned long thislen;
1187
1188                 if (chipnum >= cfi->numchips)
1189                         break;
1190
1191                 /* We cannot point across chips that are virtually disjoint */
1192                 if (!last_end)
1193                         last_end = cfi->chips[chipnum].start;
1194                 else if (cfi->chips[chipnum].start != last_end)
1195                         break;
1196
1197                 if ((len + ofs -1) >> cfi->chipshift)
1198                         thislen = (1<<cfi->chipshift) - ofs;
1199                 else
1200                         thislen = len;
1201
1202                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1203                 if (ret)
1204                         break;
1205
1206                 *retlen += thislen;
1207                 len -= thislen;
1208
1209                 ofs = 0;
1210                 last_end += 1 << cfi->chipshift;
1211                 chipnum++;
1212         }
1213         return 0;
1214 }
1215
1216 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1217 {
1218         struct map_info *map = mtd->priv;
1219         struct cfi_private *cfi = map->fldrv_priv;
1220         unsigned long ofs;
1221         int chipnum;
1222
1223         /* Now unlock the chip(s) POINT state */
1224
1225         /* ofs: offset within the first chip that the first read should start */
1226         chipnum = (from >> cfi->chipshift);
1227         ofs = from - (chipnum <<  cfi->chipshift);
1228
1229         while (len) {
1230                 unsigned long thislen;
1231                 struct flchip *chip;
1232
1233                 chip = &cfi->chips[chipnum];
1234                 if (chipnum >= cfi->numchips)
1235                         break;
1236
1237                 if ((len + ofs -1) >> cfi->chipshift)
1238                         thislen = (1<<cfi->chipshift) - ofs;
1239                 else
1240                         thislen = len;
1241
1242                 spin_lock(chip->mutex);
1243                 if (chip->state == FL_POINT) {
1244                         chip->ref_point_counter--;
1245                         if(chip->ref_point_counter == 0)
1246                                 chip->state = FL_READY;
1247                 } else
1248                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1249
1250                 put_chip(map, chip, chip->start);
1251                 spin_unlock(chip->mutex);
1252
1253                 len -= thislen;
1254                 ofs = 0;
1255                 chipnum++;
1256         }
1257 }
1258
1259 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1260 {
1261         unsigned long cmd_addr;
1262         struct cfi_private *cfi = map->fldrv_priv;
1263         int ret;
1264
1265         adr += chip->start;
1266
1267         /* Ensure cmd read/writes are aligned. */
1268         cmd_addr = adr & ~(map_bankwidth(map)-1);
1269
1270         spin_lock(chip->mutex);
1271         ret = get_chip(map, chip, cmd_addr, FL_READY);
1272         if (ret) {
1273                 spin_unlock(chip->mutex);
1274                 return ret;
1275         }
1276
1277         if (chip->state != FL_POINT && chip->state != FL_READY) {
1278                 map_write(map, CMD(0xff), cmd_addr);
1279
1280                 chip->state = FL_READY;
1281         }
1282
1283         map_copy_from(map, buf, adr, len);
1284
1285         put_chip(map, chip, cmd_addr);
1286
1287         spin_unlock(chip->mutex);
1288         return 0;
1289 }
1290
1291 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1292 {
1293         struct map_info *map = mtd->priv;
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         unsigned long ofs;
1296         int chipnum;
1297         int ret = 0;
1298
1299         /* ofs: offset within the first chip that the first read should start */
1300         chipnum = (from >> cfi->chipshift);
1301         ofs = from - (chipnum <<  cfi->chipshift);
1302
1303         *retlen = 0;
1304
1305         while (len) {
1306                 unsigned long thislen;
1307
1308                 if (chipnum >= cfi->numchips)
1309                         break;
1310
1311                 if ((len + ofs -1) >> cfi->chipshift)
1312                         thislen = (1<<cfi->chipshift) - ofs;
1313                 else
1314                         thislen = len;
1315
1316                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1317                 if (ret)
1318                         break;
1319
1320                 *retlen += thislen;
1321                 len -= thislen;
1322                 buf += thislen;
1323
1324                 ofs = 0;
1325                 chipnum++;
1326         }
1327         return ret;
1328 }
1329
1330 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1331                                      unsigned long adr, map_word datum, int mode)
1332 {
1333         struct cfi_private *cfi = map->fldrv_priv;
1334         map_word status, write_cmd;
1335         int ret=0;
1336
1337         adr += chip->start;
1338
1339         switch (mode) {
1340         case FL_WRITING:
1341                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1342                 break;
1343         case FL_OTP_WRITE:
1344                 write_cmd = CMD(0xc0);
1345                 break;
1346         default:
1347                 return -EINVAL;
1348         }
1349
1350         spin_lock(chip->mutex);
1351         ret = get_chip(map, chip, adr, mode);
1352         if (ret) {
1353                 spin_unlock(chip->mutex);
1354                 return ret;
1355         }
1356
1357         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1358         ENABLE_VPP(map);
1359         xip_disable(map, chip, adr);
1360         map_write(map, write_cmd, adr);
1361         map_write(map, datum, adr);
1362         chip->state = mode;
1363
1364         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1365                                    adr, map_bankwidth(map),
1366                                    chip->word_write_time);
1367         if (ret) {
1368                 xip_enable(map, chip, adr);
1369                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1370                 goto out;
1371         }
1372
1373         /* check for errors */
1374         status = map_read(map, adr);
1375         if (map_word_bitsset(map, status, CMD(0x1a))) {
1376                 unsigned long chipstatus = MERGESTATUS(status);
1377
1378                 /* reset status */
1379                 map_write(map, CMD(0x50), adr);
1380                 map_write(map, CMD(0x70), adr);
1381                 xip_enable(map, chip, adr);
1382
1383                 if (chipstatus & 0x02) {
1384                         ret = -EROFS;
1385                 } else if (chipstatus & 0x08) {
1386                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1387                         ret = -EIO;
1388                 } else {
1389                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1390                         ret = -EINVAL;
1391                 }
1392
1393                 goto out;
1394         }
1395
1396         xip_enable(map, chip, adr);
1397  out:   put_chip(map, chip, adr);
1398         spin_unlock(chip->mutex);
1399         return ret;
1400 }
1401
1402
1403 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1404 {
1405         struct map_info *map = mtd->priv;
1406         struct cfi_private *cfi = map->fldrv_priv;
1407         int ret = 0;
1408         int chipnum;
1409         unsigned long ofs;
1410
1411         *retlen = 0;
1412         if (!len)
1413                 return 0;
1414
1415         chipnum = to >> cfi->chipshift;
1416         ofs = to  - (chipnum << cfi->chipshift);
1417
1418         /* If it's not bus-aligned, do the first byte write */
1419         if (ofs & (map_bankwidth(map)-1)) {
1420                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1421                 int gap = ofs - bus_ofs;
1422                 int n;
1423                 map_word datum;
1424
1425                 n = min_t(int, len, map_bankwidth(map)-gap);
1426                 datum = map_word_ff(map);
1427                 datum = map_word_load_partial(map, datum, buf, gap, n);
1428
1429                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1430                                                bus_ofs, datum, FL_WRITING);
1431                 if (ret)
1432                         return ret;
1433
1434                 len -= n;
1435                 ofs += n;
1436                 buf += n;
1437                 (*retlen) += n;
1438
1439                 if (ofs >> cfi->chipshift) {
1440                         chipnum ++;
1441                         ofs = 0;
1442                         if (chipnum == cfi->numchips)
1443                                 return 0;
1444                 }
1445         }
1446
1447         while(len >= map_bankwidth(map)) {
1448                 map_word datum = map_word_load(map, buf);
1449
1450                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1451                                        ofs, datum, FL_WRITING);
1452                 if (ret)
1453                         return ret;
1454
1455                 ofs += map_bankwidth(map);
1456                 buf += map_bankwidth(map);
1457                 (*retlen) += map_bankwidth(map);
1458                 len -= map_bankwidth(map);
1459
1460                 if (ofs >> cfi->chipshift) {
1461                         chipnum ++;
1462                         ofs = 0;
1463                         if (chipnum == cfi->numchips)
1464                                 return 0;
1465                 }
1466         }
1467
1468         if (len & (map_bankwidth(map)-1)) {
1469                 map_word datum;
1470
1471                 datum = map_word_ff(map);
1472                 datum = map_word_load_partial(map, datum, buf, 0, len);
1473
1474                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1475                                        ofs, datum, FL_WRITING);
1476                 if (ret)
1477                         return ret;
1478
1479                 (*retlen) += len;
1480         }
1481
1482         return 0;
1483 }
1484
1485
1486 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1487                                     unsigned long adr, const struct kvec **pvec,
1488                                     unsigned long *pvec_seek, int len)
1489 {
1490         struct cfi_private *cfi = map->fldrv_priv;
1491         map_word status, write_cmd, datum;
1492         unsigned long cmd_adr;
1493         int ret, wbufsize, word_gap, words;
1494         const struct kvec *vec;
1495         unsigned long vec_seek;
1496
1497         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1498         adr += chip->start;
1499         cmd_adr = adr & ~(wbufsize-1);
1500
1501         /* Let's determine this according to the interleave only once */
1502         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1503
1504         spin_lock(chip->mutex);
1505         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1506         if (ret) {
1507                 spin_unlock(chip->mutex);
1508                 return ret;
1509         }
1510
1511         XIP_INVAL_CACHED_RANGE(map, adr, len);
1512         ENABLE_VPP(map);
1513         xip_disable(map, chip, cmd_adr);
1514
1515         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1516            [...], the device will not accept any more Write to Buffer commands".
1517            So we must check here and reset those bits if they're set. Otherwise
1518            we're just pissing in the wind */
1519         if (chip->state != FL_STATUS) {
1520                 map_write(map, CMD(0x70), cmd_adr);
1521                 chip->state = FL_STATUS;
1522         }
1523         status = map_read(map, cmd_adr);
1524         if (map_word_bitsset(map, status, CMD(0x30))) {
1525                 xip_enable(map, chip, cmd_adr);
1526                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1527                 xip_disable(map, chip, cmd_adr);
1528                 map_write(map, CMD(0x50), cmd_adr);
1529                 map_write(map, CMD(0x70), cmd_adr);
1530         }
1531
1532         chip->state = FL_WRITING_TO_BUFFER;
1533         map_write(map, write_cmd, cmd_adr);
1534         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1535         if (ret) {
1536                 /* Argh. Not ready for write to buffer */
1537                 map_word Xstatus = map_read(map, cmd_adr);
1538                 map_write(map, CMD(0x70), cmd_adr);
1539                 chip->state = FL_STATUS;
1540                 status = map_read(map, cmd_adr);
1541                 map_write(map, CMD(0x50), cmd_adr);
1542                 map_write(map, CMD(0x70), cmd_adr);
1543                 xip_enable(map, chip, cmd_adr);
1544                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1545                                 map->name, Xstatus.x[0], status.x[0]);
1546                 goto out;
1547         }
1548
1549         /* Figure out the number of words to write */
1550         word_gap = (-adr & (map_bankwidth(map)-1));
1551         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1552         if (!word_gap) {
1553                 words--;
1554         } else {
1555                 word_gap = map_bankwidth(map) - word_gap;
1556                 adr -= word_gap;
1557                 datum = map_word_ff(map);
1558         }
1559
1560         /* Write length of data to come */
1561         map_write(map, CMD(words), cmd_adr );
1562
1563         /* Write data */
1564         vec = *pvec;
1565         vec_seek = *pvec_seek;
1566         do {
1567                 int n = map_bankwidth(map) - word_gap;
1568                 if (n > vec->iov_len - vec_seek)
1569                         n = vec->iov_len - vec_seek;
1570                 if (n > len)
1571                         n = len;
1572
1573                 if (!word_gap && len < map_bankwidth(map))
1574                         datum = map_word_ff(map);
1575
1576                 datum = map_word_load_partial(map, datum,
1577                                               vec->iov_base + vec_seek,
1578                                               word_gap, n);
1579
1580                 len -= n;
1581                 word_gap += n;
1582                 if (!len || word_gap == map_bankwidth(map)) {
1583                         map_write(map, datum, adr);
1584                         adr += map_bankwidth(map);
1585                         word_gap = 0;
1586                 }
1587
1588                 vec_seek += n;
1589                 if (vec_seek == vec->iov_len) {
1590                         vec++;
1591                         vec_seek = 0;
1592                 }
1593         } while (len);
1594         *pvec = vec;
1595         *pvec_seek = vec_seek;
1596
1597         /* GO GO GO */
1598         map_write(map, CMD(0xd0), cmd_adr);
1599         chip->state = FL_WRITING;
1600
1601         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1602                                    adr, len,
1603                                    chip->buffer_write_time);
1604         if (ret) {
1605                 map_write(map, CMD(0x70), cmd_adr);
1606                 chip->state = FL_STATUS;
1607                 xip_enable(map, chip, cmd_adr);
1608                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1609                 goto out;
1610         }
1611
1612         /* check for errors */
1613         status = map_read(map, cmd_adr);
1614         if (map_word_bitsset(map, status, CMD(0x1a))) {
1615                 unsigned long chipstatus = MERGESTATUS(status);
1616
1617                 /* reset status */
1618                 map_write(map, CMD(0x50), cmd_adr);
1619                 map_write(map, CMD(0x70), cmd_adr);
1620                 xip_enable(map, chip, cmd_adr);
1621
1622                 if (chipstatus & 0x02) {
1623                         ret = -EROFS;
1624                 } else if (chipstatus & 0x08) {
1625                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1626                         ret = -EIO;
1627                 } else {
1628                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1629                         ret = -EINVAL;
1630                 }
1631
1632                 goto out;
1633         }
1634
1635         xip_enable(map, chip, cmd_adr);
1636  out:   put_chip(map, chip, cmd_adr);
1637         spin_unlock(chip->mutex);
1638         return ret;
1639 }
1640
1641 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1642                                 unsigned long count, loff_t to, size_t *retlen)
1643 {
1644         struct map_info *map = mtd->priv;
1645         struct cfi_private *cfi = map->fldrv_priv;
1646         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1647         int ret = 0;
1648         int chipnum;
1649         unsigned long ofs, vec_seek, i;
1650         size_t len = 0;
1651
1652         for (i = 0; i < count; i++)
1653                 len += vecs[i].iov_len;
1654
1655         *retlen = 0;
1656         if (!len)
1657                 return 0;
1658
1659         chipnum = to >> cfi->chipshift;
1660         ofs = to - (chipnum << cfi->chipshift);
1661         vec_seek = 0;
1662
1663         do {
1664                 /* We must not cross write block boundaries */
1665                 int size = wbufsize - (ofs & (wbufsize-1));
1666
1667                 if (size > len)
1668                         size = len;
1669                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1670                                       ofs, &vecs, &vec_seek, size);
1671                 if (ret)
1672                         return ret;
1673
1674                 ofs += size;
1675                 (*retlen) += size;
1676                 len -= size;
1677
1678                 if (ofs >> cfi->chipshift) {
1679                         chipnum ++;
1680                         ofs = 0;
1681                         if (chipnum == cfi->numchips)
1682                                 return 0;
1683                 }
1684
1685                 /* Be nice and reschedule with the chip in a usable state for other
1686                    processes. */
1687                 cond_resched();
1688
1689         } while (len);
1690
1691         return 0;
1692 }
1693
1694 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1695                                        size_t len, size_t *retlen, const u_char *buf)
1696 {
1697         struct kvec vec;
1698
1699         vec.iov_base = (void *) buf;
1700         vec.iov_len = len;
1701
1702         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1703 }
1704
1705 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1706                                       unsigned long adr, int len, void *thunk)
1707 {
1708         struct cfi_private *cfi = map->fldrv_priv;
1709         map_word status;
1710         int retries = 3;
1711         int ret;
1712
1713         adr += chip->start;
1714
1715  retry:
1716         spin_lock(chip->mutex);
1717         ret = get_chip(map, chip, adr, FL_ERASING);
1718         if (ret) {
1719                 spin_unlock(chip->mutex);
1720                 return ret;
1721         }
1722
1723         XIP_INVAL_CACHED_RANGE(map, adr, len);
1724         ENABLE_VPP(map);
1725         xip_disable(map, chip, adr);
1726
1727         /* Clear the status register first */
1728         map_write(map, CMD(0x50), adr);
1729
1730         /* Now erase */
1731         map_write(map, CMD(0x20), adr);
1732         map_write(map, CMD(0xD0), adr);
1733         chip->state = FL_ERASING;
1734         chip->erase_suspended = 0;
1735
1736         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1737                                    adr, len,
1738                                    chip->erase_time);
1739         if (ret) {
1740                 map_write(map, CMD(0x70), adr);
1741                 chip->state = FL_STATUS;
1742                 xip_enable(map, chip, adr);
1743                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1744                 goto out;
1745         }
1746
1747         /* We've broken this before. It doesn't hurt to be safe */
1748         map_write(map, CMD(0x70), adr);
1749         chip->state = FL_STATUS;
1750         status = map_read(map, adr);
1751
1752         /* check for errors */
1753         if (map_word_bitsset(map, status, CMD(0x3a))) {
1754                 unsigned long chipstatus = MERGESTATUS(status);
1755
1756                 /* Reset the error bits */
1757                 map_write(map, CMD(0x50), adr);
1758                 map_write(map, CMD(0x70), adr);
1759                 xip_enable(map, chip, adr);
1760
1761                 if ((chipstatus & 0x30) == 0x30) {
1762                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1763                         ret = -EINVAL;
1764                 } else if (chipstatus & 0x02) {
1765                         /* Protection bit set */
1766                         ret = -EROFS;
1767                 } else if (chipstatus & 0x8) {
1768                         /* Voltage */
1769                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1770                         ret = -EIO;
1771                 } else if (chipstatus & 0x20 && retries--) {
1772                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1773                         put_chip(map, chip, adr);
1774                         spin_unlock(chip->mutex);
1775                         goto retry;
1776                 } else {
1777                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1778                         ret = -EIO;
1779                 }
1780
1781                 goto out;
1782         }
1783
1784         xip_enable(map, chip, adr);
1785  out:   put_chip(map, chip, adr);
1786         spin_unlock(chip->mutex);
1787         return ret;
1788 }
1789
1790 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1791 {
1792         unsigned long ofs, len;
1793         int ret;
1794
1795         ofs = instr->addr;
1796         len = instr->len;
1797
1798         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1799         if (ret)
1800                 return ret;
1801
1802         instr->state = MTD_ERASE_DONE;
1803         mtd_erase_callback(instr);
1804
1805         return 0;
1806 }
1807
1808 static void cfi_intelext_sync (struct mtd_info *mtd)
1809 {
1810         struct map_info *map = mtd->priv;
1811         struct cfi_private *cfi = map->fldrv_priv;
1812         int i;
1813         struct flchip *chip;
1814         int ret = 0;
1815
1816         for (i=0; !ret && i<cfi->numchips; i++) {
1817                 chip = &cfi->chips[i];
1818
1819                 spin_lock(chip->mutex);
1820                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1821
1822                 if (!ret) {
1823                         chip->oldstate = chip->state;
1824                         chip->state = FL_SYNCING;
1825                         /* No need to wake_up() on this state change -
1826                          * as the whole point is that nobody can do anything
1827                          * with the chip now anyway.
1828                          */
1829                 }
1830                 spin_unlock(chip->mutex);
1831         }
1832
1833         /* Unlock the chips again */
1834
1835         for (i--; i >=0; i--) {
1836                 chip = &cfi->chips[i];
1837
1838                 spin_lock(chip->mutex);
1839
1840                 if (chip->state == FL_SYNCING) {
1841                         chip->state = chip->oldstate;
1842                         chip->oldstate = FL_READY;
1843                         wake_up(&chip->wq);
1844                 }
1845                 spin_unlock(chip->mutex);
1846         }
1847 }
1848
1849 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1850                                                 struct flchip *chip,
1851                                                 unsigned long adr,
1852                                                 int len, void *thunk)
1853 {
1854         struct cfi_private *cfi = map->fldrv_priv;
1855         int status, ofs_factor = cfi->interleave * cfi->device_type;
1856
1857         adr += chip->start;
1858         xip_disable(map, chip, adr+(2*ofs_factor));
1859         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1860         chip->state = FL_JEDEC_QUERY;
1861         status = cfi_read_query(map, adr+(2*ofs_factor));
1862         xip_enable(map, chip, 0);
1863         return status;
1864 }
1865
1866 #ifdef DEBUG_LOCK_BITS
1867 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1868                                                 struct flchip *chip,
1869                                                 unsigned long adr,
1870                                                 int len, void *thunk)
1871 {
1872         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1873                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1874         return 0;
1875 }
1876 #endif
1877
1878 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1879 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1880
1881 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1882                                        unsigned long adr, int len, void *thunk)
1883 {
1884         struct cfi_private *cfi = map->fldrv_priv;
1885         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1886         int udelay;
1887         int ret;
1888
1889         adr += chip->start;
1890
1891         spin_lock(chip->mutex);
1892         ret = get_chip(map, chip, adr, FL_LOCKING);
1893         if (ret) {
1894                 spin_unlock(chip->mutex);
1895                 return ret;
1896         }
1897
1898         ENABLE_VPP(map);
1899         xip_disable(map, chip, adr);
1900
1901         map_write(map, CMD(0x60), adr);
1902         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1903                 map_write(map, CMD(0x01), adr);
1904                 chip->state = FL_LOCKING;
1905         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1906                 map_write(map, CMD(0xD0), adr);
1907                 chip->state = FL_UNLOCKING;
1908         } else
1909                 BUG();
1910
1911         /*
1912          * If Instant Individual Block Locking supported then no need
1913          * to delay.
1914          */
1915         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1916
1917         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1918         if (ret) {
1919                 map_write(map, CMD(0x70), adr);
1920                 chip->state = FL_STATUS;
1921                 xip_enable(map, chip, adr);
1922                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1923                 goto out;
1924         }
1925
1926         xip_enable(map, chip, adr);
1927 out:    put_chip(map, chip, adr);
1928         spin_unlock(chip->mutex);
1929         return ret;
1930 }
1931
1932 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1933 {
1934         int ret;
1935
1936 #ifdef DEBUG_LOCK_BITS
1937         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1938                __FUNCTION__, ofs, len);
1939         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1940                 ofs, len, NULL);
1941 #endif
1942
1943         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1944                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1945
1946 #ifdef DEBUG_LOCK_BITS
1947         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1948                __FUNCTION__, ret);
1949         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1950                 ofs, len, NULL);
1951 #endif
1952
1953         return ret;
1954 }
1955
1956 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1957 {
1958         int ret;
1959
1960 #ifdef DEBUG_LOCK_BITS
1961         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1962                __FUNCTION__, ofs, len);
1963         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1964                 ofs, len, NULL);
1965 #endif
1966
1967         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1968                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1969
1970 #ifdef DEBUG_LOCK_BITS
1971         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1972                __FUNCTION__, ret);
1973         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1974                 ofs, len, NULL);
1975 #endif
1976
1977         return ret;
1978 }
1979
1980 #ifdef CONFIG_MTD_OTP
1981
1982 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1983                         u_long data_offset, u_char *buf, u_int size,
1984                         u_long prot_offset, u_int groupno, u_int groupsize);
1985
1986 static int __xipram
1987 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1988             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1989 {
1990         struct cfi_private *cfi = map->fldrv_priv;
1991         int ret;
1992
1993         spin_lock(chip->mutex);
1994         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1995         if (ret) {
1996                 spin_unlock(chip->mutex);
1997                 return ret;
1998         }
1999
2000         /* let's ensure we're not reading back cached data from array mode */
2001         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2002
2003         xip_disable(map, chip, chip->start);
2004         if (chip->state != FL_JEDEC_QUERY) {
2005                 map_write(map, CMD(0x90), chip->start);
2006                 chip->state = FL_JEDEC_QUERY;
2007         }
2008         map_copy_from(map, buf, chip->start + offset, size);
2009         xip_enable(map, chip, chip->start);
2010
2011         /* then ensure we don't keep OTP data in the cache */
2012         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2013
2014         put_chip(map, chip, chip->start);
2015         spin_unlock(chip->mutex);
2016         return 0;
2017 }
2018
2019 static int
2020 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2021              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2022 {
2023         int ret;
2024
2025         while (size) {
2026                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2027                 int gap = offset - bus_ofs;
2028                 int n = min_t(int, size, map_bankwidth(map)-gap);
2029                 map_word datum = map_word_ff(map);
2030
2031                 datum = map_word_load_partial(map, datum, buf, gap, n);
2032                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2033                 if (ret)
2034                         return ret;
2035
2036                 offset += n;
2037                 buf += n;
2038                 size -= n;
2039         }
2040
2041         return 0;
2042 }
2043
2044 static int
2045 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2046             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2047 {
2048         struct cfi_private *cfi = map->fldrv_priv;
2049         map_word datum;
2050
2051         /* make sure area matches group boundaries */
2052         if (size != grpsz)
2053                 return -EXDEV;
2054
2055         datum = map_word_ff(map);
2056         datum = map_word_clr(map, datum, CMD(1 << grpno));
2057         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2058 }
2059
2060 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2061                                  size_t *retlen, u_char *buf,
2062                                  otp_op_t action, int user_regs)
2063 {
2064         struct map_info *map = mtd->priv;
2065         struct cfi_private *cfi = map->fldrv_priv;
2066         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2067         struct flchip *chip;
2068         struct cfi_intelext_otpinfo *otp;
2069         u_long devsize, reg_prot_offset, data_offset;
2070         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2071         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2072         int ret;
2073
2074         *retlen = 0;
2075
2076         /* Check that we actually have some OTP registers */
2077         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2078                 return -ENODATA;
2079
2080         /* we need real chips here not virtual ones */
2081         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2082         chip_step = devsize >> cfi->chipshift;
2083         chip_num = 0;
2084
2085         /* Some chips have OTP located in the _top_ partition only.
2086            For example: Intel 28F256L18T (T means top-parameter device) */
2087         if (cfi->mfr == MANUFACTURER_INTEL) {
2088                 switch (cfi->id) {
2089                 case 0x880b:
2090                 case 0x880c:
2091                 case 0x880d:
2092                         chip_num = chip_step - 1;
2093                 }
2094         }
2095
2096         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2097                 chip = &cfi->chips[chip_num];
2098                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2099
2100                 /* first OTP region */
2101                 field = 0;
2102                 reg_prot_offset = extp->ProtRegAddr;
2103                 reg_fact_groups = 1;
2104                 reg_fact_size = 1 << extp->FactProtRegSize;
2105                 reg_user_groups = 1;
2106                 reg_user_size = 1 << extp->UserProtRegSize;
2107
2108                 while (len > 0) {
2109                         /* flash geometry fixup */
2110                         data_offset = reg_prot_offset + 1;
2111                         data_offset *= cfi->interleave * cfi->device_type;
2112                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2113                         reg_fact_size *= cfi->interleave;
2114                         reg_user_size *= cfi->interleave;
2115
2116                         if (user_regs) {
2117                                 groups = reg_user_groups;
2118                                 groupsize = reg_user_size;
2119                                 /* skip over factory reg area */
2120                                 groupno = reg_fact_groups;
2121                                 data_offset += reg_fact_groups * reg_fact_size;
2122                         } else {
2123                                 groups = reg_fact_groups;
2124                                 groupsize = reg_fact_size;
2125                                 groupno = 0;
2126                         }
2127
2128                         while (len > 0 && groups > 0) {
2129                                 if (!action) {
2130                                         /*
2131                                          * Special case: if action is NULL
2132                                          * we fill buf with otp_info records.
2133                                          */
2134                                         struct otp_info *otpinfo;
2135                                         map_word lockword;
2136                                         len -= sizeof(struct otp_info);
2137                                         if (len <= 0)
2138                                                 return -ENOSPC;
2139                                         ret = do_otp_read(map, chip,
2140                                                           reg_prot_offset,
2141                                                           (u_char *)&lockword,
2142                                                           map_bankwidth(map),
2143                                                           0, 0,  0);
2144                                         if (ret)
2145                                                 return ret;
2146                                         otpinfo = (struct otp_info *)buf;
2147                                         otpinfo->start = from;
2148                                         otpinfo->length = groupsize;
2149                                         otpinfo->locked =
2150                                            !map_word_bitsset(map, lockword,
2151                                                              CMD(1 << groupno));
2152                                         from += groupsize;
2153                                         buf += sizeof(*otpinfo);
2154                                         *retlen += sizeof(*otpinfo);
2155                                 } else if (from >= groupsize) {
2156                                         from -= groupsize;
2157                                         data_offset += groupsize;
2158                                 } else {
2159                                         int size = groupsize;
2160                                         data_offset += from;
2161                                         size -= from;
2162                                         from = 0;
2163                                         if (size > len)
2164                                                 size = len;
2165                                         ret = action(map, chip, data_offset,
2166                                                      buf, size, reg_prot_offset,
2167                                                      groupno, groupsize);
2168                                         if (ret < 0)
2169                                                 return ret;
2170                                         buf += size;
2171                                         len -= size;
2172                                         *retlen += size;
2173                                         data_offset += size;
2174                                 }
2175                                 groupno++;
2176                                 groups--;
2177                         }
2178
2179                         /* next OTP region */
2180                         if (++field == extp->NumProtectionFields)
2181                                 break;
2182                         reg_prot_offset = otp->ProtRegAddr;
2183                         reg_fact_groups = otp->FactGroups;
2184                         reg_fact_size = 1 << otp->FactProtRegSize;
2185                         reg_user_groups = otp->UserGroups;
2186                         reg_user_size = 1 << otp->UserProtRegSize;
2187                         otp++;
2188                 }
2189         }
2190
2191         return 0;
2192 }
2193
2194 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2195                                            size_t len, size_t *retlen,
2196                                             u_char *buf)
2197 {
2198         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2199                                      buf, do_otp_read, 0);
2200 }
2201
2202 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2203                                            size_t len, size_t *retlen,
2204                                             u_char *buf)
2205 {
2206         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2207                                      buf, do_otp_read, 1);
2208 }
2209
2210 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2211                                             size_t len, size_t *retlen,
2212                                              u_char *buf)
2213 {
2214         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2215                                      buf, do_otp_write, 1);
2216 }
2217
2218 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2219                                            loff_t from, size_t len)
2220 {
2221         size_t retlen;
2222         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2223                                      NULL, do_otp_lock, 1);
2224 }
2225
2226 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2227                                            struct otp_info *buf, size_t len)
2228 {
2229         size_t retlen;
2230         int ret;
2231
2232         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2233         return ret ? : retlen;
2234 }
2235
2236 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2237                                            struct otp_info *buf, size_t len)
2238 {
2239         size_t retlen;
2240         int ret;
2241
2242         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2243         return ret ? : retlen;
2244 }
2245
2246 #endif
2247
2248 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2249 {
2250         struct mtd_erase_region_info *region;
2251         int block, status, i;
2252         unsigned long adr;
2253         size_t len;
2254
2255         for (i = 0; i < mtd->numeraseregions; i++) {
2256                 region = &mtd->eraseregions[i];
2257                 if (!region->lockmap)
2258                         continue;
2259
2260                 for (block = 0; block < region->numblocks; block++){
2261                         len = region->erasesize;
2262                         adr = region->offset + block * len;
2263
2264                         status = cfi_varsize_frob(mtd,
2265                                         do_getlockstatus_oneblock, adr, len, NULL);
2266                         if (status)
2267                                 set_bit(block, region->lockmap);
2268                         else
2269                                 clear_bit(block, region->lockmap);
2270                 }
2271         }
2272 }
2273
2274 static int cfi_intelext_suspend(struct mtd_info *mtd)
2275 {
2276         struct map_info *map = mtd->priv;
2277         struct cfi_private *cfi = map->fldrv_priv;
2278         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2279         int i;
2280         struct flchip *chip;
2281         int ret = 0;
2282
2283         if ((mtd->flags & MTD_STUPID_LOCK)
2284             && extp && (extp->FeatureSupport & (1 << 5)))
2285                 cfi_intelext_save_locks(mtd);
2286
2287         for (i=0; !ret && i<cfi->numchips; i++) {
2288                 chip = &cfi->chips[i];
2289
2290                 spin_lock(chip->mutex);
2291
2292                 switch (chip->state) {
2293                 case FL_READY:
2294                 case FL_STATUS:
2295                 case FL_CFI_QUERY:
2296                 case FL_JEDEC_QUERY:
2297                         if (chip->oldstate == FL_READY) {
2298                                 /* place the chip in a known state before suspend */
2299                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2300                                 chip->oldstate = chip->state;
2301                                 chip->state = FL_PM_SUSPENDED;
2302                                 /* No need to wake_up() on this state change -
2303                                  * as the whole point is that nobody can do anything
2304                                  * with the chip now anyway.
2305                                  */
2306                         } else {
2307                                 /* There seems to be an operation pending. We must wait for it. */
2308                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2309                                 ret = -EAGAIN;
2310                         }
2311                         break;
2312                 default:
2313                         /* Should we actually wait? Once upon a time these routines weren't
2314                            allowed to. Or should we return -EAGAIN, because the upper layers
2315                            ought to have already shut down anything which was using the device
2316                            anyway? The latter for now. */
2317                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2318                         ret = -EAGAIN;
2319                 case FL_PM_SUSPENDED:
2320                         break;
2321                 }
2322                 spin_unlock(chip->mutex);
2323         }
2324
2325         /* Unlock the chips again */
2326
2327         if (ret) {
2328                 for (i--; i >=0; i--) {
2329                         chip = &cfi->chips[i];
2330
2331                         spin_lock(chip->mutex);
2332
2333                         if (chip->state == FL_PM_SUSPENDED) {
2334                                 /* No need to force it into a known state here,
2335                                    because we're returning failure, and it didn't
2336                                    get power cycled */
2337                                 chip->state = chip->oldstate;
2338                                 chip->oldstate = FL_READY;
2339                                 wake_up(&chip->wq);
2340                         }
2341                         spin_unlock(chip->mutex);
2342                 }
2343         }
2344
2345         return ret;
2346 }
2347
2348 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2349 {
2350         struct mtd_erase_region_info *region;
2351         int block, i;
2352         unsigned long adr;
2353         size_t len;
2354
2355         for (i = 0; i < mtd->numeraseregions; i++) {
2356                 region = &mtd->eraseregions[i];
2357                 if (!region->lockmap)
2358                         continue;
2359
2360                 for (block = 0; block < region->numblocks; block++) {
2361                         len = region->erasesize;
2362                         adr = region->offset + block * len;
2363
2364                         if (!test_bit(block, region->lockmap))
2365                                 cfi_intelext_unlock(mtd, adr, len);
2366                 }
2367         }
2368 }
2369
2370 static void cfi_intelext_resume(struct mtd_info *mtd)
2371 {
2372         struct map_info *map = mtd->priv;
2373         struct cfi_private *cfi = map->fldrv_priv;
2374         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2375         int i;
2376         struct flchip *chip;
2377
2378         for (i=0; i<cfi->numchips; i++) {
2379
2380                 chip = &cfi->chips[i];
2381
2382                 spin_lock(chip->mutex);
2383
2384                 /* Go to known state. Chip may have been power cycled */
2385                 if (chip->state == FL_PM_SUSPENDED) {
2386                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2387                         chip->oldstate = chip->state = FL_READY;
2388                         wake_up(&chip->wq);
2389                 }
2390
2391                 spin_unlock(chip->mutex);
2392         }
2393
2394         if ((mtd->flags & MTD_STUPID_LOCK)
2395             && extp && (extp->FeatureSupport & (1 << 5)))
2396                 cfi_intelext_restore_locks(mtd);
2397 }
2398
2399 static int cfi_intelext_reset(struct mtd_info *mtd)
2400 {
2401         struct map_info *map = mtd->priv;
2402         struct cfi_private *cfi = map->fldrv_priv;
2403         int i, ret;
2404
2405         for (i=0; i < cfi->numchips; i++) {
2406                 struct flchip *chip = &cfi->chips[i];
2407
2408                 /* force the completion of any ongoing operation
2409                    and switch to array mode so any bootloader in
2410                    flash is accessible for soft reboot. */
2411                 spin_lock(chip->mutex);
2412                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2413                 if (!ret) {
2414                         map_write(map, CMD(0xff), chip->start);
2415                         chip->state = FL_READY;
2416                 }
2417                 spin_unlock(chip->mutex);
2418         }
2419
2420         return 0;
2421 }
2422
2423 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2424                                void *v)
2425 {
2426         struct mtd_info *mtd;
2427
2428         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2429         cfi_intelext_reset(mtd);
2430         return NOTIFY_DONE;
2431 }
2432
2433 static void cfi_intelext_destroy(struct mtd_info *mtd)
2434 {
2435         struct map_info *map = mtd->priv;
2436         struct cfi_private *cfi = map->fldrv_priv;
2437         struct mtd_erase_region_info *region;
2438         int i;
2439         cfi_intelext_reset(mtd);
2440         unregister_reboot_notifier(&mtd->reboot_notifier);
2441         kfree(cfi->cmdset_priv);
2442         kfree(cfi->cfiq);
2443         kfree(cfi->chips[0].priv);
2444         kfree(cfi);
2445         for (i = 0; i < mtd->numeraseregions; i++) {
2446                 region = &mtd->eraseregions[i];
2447                 if (region->lockmap)
2448                         kfree(region->lockmap);
2449         }
2450         kfree(mtd->eraseregions);
2451 }
2452
2453 MODULE_LICENSE("GPL");
2454 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2455 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2456 MODULE_ALIAS("cfi_cmdset_0003");
2457 MODULE_ALIAS("cfi_cmdset_0200");