Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/drivers/mtd/nand/pxa3xx_nand.c

  1 /*
  2  * drivers/mtd/nand/pxa3xx_nand.c
  3  *
  4  * Copyright © 2005 Intel Corporation
  5  * Copyright © 2006 Marvell International Ltd.
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  *
 11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
 12  */
 13 
 14 #include <linux/kernel.h>
 15 #include <linux/module.h>
 16 #include <linux/interrupt.h>
 17 #include <linux/platform_device.h>
 18 #include <linux/dma-mapping.h>
 19 #include <linux/delay.h>
 20 #include <linux/clk.h>
 21 #include <linux/mtd/mtd.h>
 22 #include <linux/mtd/nand.h>
 23 #include <linux/mtd/partitions.h>
 24 #include <linux/io.h>
 25 #include <linux/irq.h>
 26 #include <linux/slab.h>
 27 #include <linux/of.h>
 28 #include <linux/of_device.h>
 29 #include <linux/of_mtd.h>
 30 
 31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 32 #define ARCH_HAS_DMA
 33 #endif
 34 
 35 #ifdef ARCH_HAS_DMA
 36 #include <mach/dma.h>
 37 #endif
 38 
 39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 40 
 41 #define NAND_DEV_READY_TIMEOUT  50
 42 #define CHIP_DELAY_TIMEOUT      (2 * HZ/10)
 43 #define NAND_STOP_DELAY         (2 * HZ/50)
 44 #define PAGE_CHUNK_SIZE         (2048)
 45 
 46 /*
 47  * Define a buffer size for the initial command that detects the flash device:
 48  * STATUS, READID and PARAM. The largest of these is the PARAM command,
 49  * needing 256 bytes.
 50  */
 51 #define INIT_BUFFER_SIZE        256
 52 
 53 /* registers and bit definitions */
 54 #define NDCR            (0x00) /* Control register */
 55 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
 56 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
 57 #define NDSR            (0x14) /* Status Register */
 58 #define NDPCR           (0x18) /* Page Count Register */
 59 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
 60 #define NDBDR1          (0x20) /* Bad Block Register 1 */
 61 #define NDECCCTRL       (0x28) /* ECC control */
 62 #define NDDB            (0x40) /* Data Buffer */
 63 #define NDCB0           (0x48) /* Command Buffer0 */
 64 #define NDCB1           (0x4C) /* Command Buffer1 */
 65 #define NDCB2           (0x50) /* Command Buffer2 */
 66 
 67 #define NDCR_SPARE_EN           (0x1 << 31)
 68 #define NDCR_ECC_EN             (0x1 << 30)
 69 #define NDCR_DMA_EN             (0x1 << 29)
 70 #define NDCR_ND_RUN             (0x1 << 28)
 71 #define NDCR_DWIDTH_C           (0x1 << 27)
 72 #define NDCR_DWIDTH_M           (0x1 << 26)
 73 #define NDCR_PAGE_SZ            (0x1 << 24)
 74 #define NDCR_NCSX               (0x1 << 23)
 75 #define NDCR_ND_MODE            (0x3 << 21)
 76 #define NDCR_NAND_MODE          (0x0)
 77 #define NDCR_CLR_PG_CNT         (0x1 << 20)
 78 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
 79 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
 80 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
 81 
 82 #define NDCR_RA_START           (0x1 << 15)
 83 #define NDCR_PG_PER_BLK         (0x1 << 14)
 84 #define NDCR_ND_ARB_EN          (0x1 << 12)
 85 #define NDCR_INT_MASK           (0xFFF)
 86 
 87 #define NDSR_MASK               (0xfff)
 88 #define NDSR_ERR_CNT_OFF        (16)
 89 #define NDSR_ERR_CNT_MASK       (0x1f)
 90 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
 91 #define NDSR_RDY                (0x1 << 12)
 92 #define NDSR_FLASH_RDY          (0x1 << 11)
 93 #define NDSR_CS0_PAGED          (0x1 << 10)
 94 #define NDSR_CS1_PAGED          (0x1 << 9)
 95 #define NDSR_CS0_CMDD           (0x1 << 8)
 96 #define NDSR_CS1_CMDD           (0x1 << 7)
 97 #define NDSR_CS0_BBD            (0x1 << 6)
 98 #define NDSR_CS1_BBD            (0x1 << 5)
 99 #define NDSR_UNCORERR           (0x1 << 4)
100 #define NDSR_CORERR             (0x1 << 3)
101 #define NDSR_WRDREQ             (0x1 << 2)
102 #define NDSR_RDDREQ             (0x1 << 1)
103 #define NDSR_WRCMDREQ           (0x1)
104 
105 #define NDCB0_LEN_OVRD          (0x1 << 28)
106 #define NDCB0_ST_ROW_EN         (0x1 << 26)
107 #define NDCB0_AUTO_RS           (0x1 << 25)
108 #define NDCB0_CSEL              (0x1 << 24)
109 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
110 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
111 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
112 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
113 #define NDCB0_NC                (0x1 << 20)
114 #define NDCB0_DBC               (0x1 << 19)
115 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
116 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
117 #define NDCB0_CMD2_MASK         (0xff << 8)
118 #define NDCB0_CMD1_MASK         (0xff)
119 #define NDCB0_ADDR_CYC_SHIFT    (16)
120 
121 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
122 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
123 #define EXT_CMD_TYPE_READ       4 /* Read */
124 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
125 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
126 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
127 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
128 
129 /* macros for registers read/write */
130 #define nand_writel(info, off, val)     \
131         __raw_writel((val), (info)->mmio_base + (off))
132 
133 #define nand_readl(info, off)           \
134         __raw_readl((info)->mmio_base + (off))
135 
136 /* error code and state */
137 enum {
138         ERR_NONE        = 0,
139         ERR_DMABUSERR   = -1,
140         ERR_SENDCMD     = -2,
141         ERR_UNCORERR    = -3,
142         ERR_BBERR       = -4,
143         ERR_CORERR      = -5,
144 };
145 
146 enum {
147         STATE_IDLE = 0,
148         STATE_PREPARED,
149         STATE_CMD_HANDLE,
150         STATE_DMA_READING,
151         STATE_DMA_WRITING,
152         STATE_DMA_DONE,
153         STATE_PIO_READING,
154         STATE_PIO_WRITING,
155         STATE_CMD_DONE,
156         STATE_READY,
157 };
158 
159 enum pxa3xx_nand_variant {
160         PXA3XX_NAND_VARIANT_PXA,
161         PXA3XX_NAND_VARIANT_ARMADA370,
162 };
163 
164 struct pxa3xx_nand_host {
165         struct nand_chip        chip;
166         struct mtd_info         *mtd;
167         void                    *info_data;
168 
169         /* page size of attached chip */
170         int                     use_ecc;
171         int                     cs;
172 
173         /* calculated from pxa3xx_nand_flash data */
174         unsigned int            col_addr_cycles;
175         unsigned int            row_addr_cycles;
176         size_t                  read_id_bytes;
177 
178 };
179 
180 struct pxa3xx_nand_info {
181         struct nand_hw_control  controller;
182         struct platform_device   *pdev;
183 
184         struct clk              *clk;
185         void __iomem            *mmio_base;
186         unsigned long           mmio_phys;
187         struct completion       cmd_complete, dev_ready;
188 
189         unsigned int            buf_start;
190         unsigned int            buf_count;
191         unsigned int            buf_size;
192         unsigned int            data_buff_pos;
193         unsigned int            oob_buff_pos;
194 
195         /* DMA information */
196         int                     drcmr_dat;
197         int                     drcmr_cmd;
198 
199         unsigned char           *data_buff;
200         unsigned char           *oob_buff;
201         dma_addr_t              data_buff_phys;
202         int                     data_dma_ch;
203         struct pxa_dma_desc     *data_desc;
204         dma_addr_t              data_desc_addr;
205 
206         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
207         unsigned int            state;
208 
209         /*
210          * This driver supports NFCv1 (as found in PXA SoC)
211          * and NFCv2 (as found in Armada 370/XP SoC).
212          */
213         enum pxa3xx_nand_variant variant;
214 
215         int                     cs;
216         int                     use_ecc;        /* use HW ECC ? */
217         int                     ecc_bch;        /* using BCH ECC? */
218         int                     use_dma;        /* use DMA ? */
219         int                     use_spare;      /* use spare ? */
220         int                     need_wait;
221 
222         unsigned int            data_size;      /* data to be read from FIFO */
223         unsigned int            chunk_size;     /* split commands chunk size */
224         unsigned int            oob_size;
225         unsigned int            spare_size;
226         unsigned int            ecc_size;
227         unsigned int            ecc_err_cnt;
228         unsigned int            max_bitflips;
229         int                     retcode;
230 
231         /* cached register value */
232         uint32_t                reg_ndcr;
233         uint32_t                ndtr0cs0;
234         uint32_t                ndtr1cs0;
235 
236         /* generated NDCBx register values */
237         uint32_t                ndcb0;
238         uint32_t                ndcb1;
239         uint32_t                ndcb2;
240         uint32_t                ndcb3;
241 };
242 
243 static bool use_dma = 1;
244 module_param(use_dma, bool, 0444);
245 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
246 
247 static struct pxa3xx_nand_timing timing[] = {
248         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
249         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
250         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
251         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
252 };
253 
254 static struct pxa3xx_nand_flash builtin_flash_types[] = {
255 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
256 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
257 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
258 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
259 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
260 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
261 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
262 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
263 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
264 };
265 
266 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '' };
267 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
268 
269 static struct nand_bbt_descr bbt_main_descr = {
270         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
271                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
272         .offs = 8,
273         .len = 6,
274         .veroffs = 14,
275         .maxblocks = 8,         /* Last 8 blocks in each chip */
276         .pattern = bbt_pattern
277 };
278 
279 static struct nand_bbt_descr bbt_mirror_descr = {
280         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
281                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
282         .offs = 8,
283         .len = 6,
284         .veroffs = 14,
285         .maxblocks = 8,         /* Last 8 blocks in each chip */
286         .pattern = bbt_mirror_pattern
287 };
288 
289 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
290         .eccbytes = 32,
291         .eccpos = {
292                 32, 33, 34, 35, 36, 37, 38, 39,
293                 40, 41, 42, 43, 44, 45, 46, 47,
294                 48, 49, 50, 51, 52, 53, 54, 55,
295                 56, 57, 58, 59, 60, 61, 62, 63},
296         .oobfree = { {2, 30} }
297 };
298 
299 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
300         .eccbytes = 64,
301         .eccpos = {
302                 32,  33,  34,  35,  36,  37,  38,  39,
303                 40,  41,  42,  43,  44,  45,  46,  47,
304                 48,  49,  50,  51,  52,  53,  54,  55,
305                 56,  57,  58,  59,  60,  61,  62,  63,
306                 96,  97,  98,  99,  100, 101, 102, 103,
307                 104, 105, 106, 107, 108, 109, 110, 111,
308                 112, 113, 114, 115, 116, 117, 118, 119,
309                 120, 121, 122, 123, 124, 125, 126, 127},
310         /* Bootrom looks in bytes 0 & 5 for bad blocks */
311         .oobfree = { {6, 26}, { 64, 32} }
312 };
313 
314 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
315         .eccbytes = 128,
316         .eccpos = {
317                 32,  33,  34,  35,  36,  37,  38,  39,
318                 40,  41,  42,  43,  44,  45,  46,  47,
319                 48,  49,  50,  51,  52,  53,  54,  55,
320                 56,  57,  58,  59,  60,  61,  62,  63},
321         .oobfree = { }
322 };
323 
324 /* Define a default flash type setting serve as flash detecting only */
325 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
326 
327 #define NDTR0_tCH(c)    (min((c), 7) << 19)
328 #define NDTR0_tCS(c)    (min((c), 7) << 16)
329 #define NDTR0_tWH(c)    (min((c), 7) << 11)
330 #define NDTR0_tWP(c)    (min((c), 7) << 8)
331 #define NDTR0_tRH(c)    (min((c), 7) << 3)
332 #define NDTR0_tRP(c)    (min((c), 7) << 0)
333 
334 #define NDTR1_tR(c)     (min((c), 65535) << 16)
335 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
336 #define NDTR1_tAR(c)    (min((c), 15) << 0)
337 
338 /* convert nano-seconds to nand flash controller clock cycles */
339 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
340 
341 static struct of_device_id pxa3xx_nand_dt_ids[] = {
342         {
343                 .compatible = "marvell,pxa3xx-nand",
344                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
345         },
346         {
347                 .compatible = "marvell,armada370-nand",
348                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
349         },
350         {}
351 };
352 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
353 
354 static enum pxa3xx_nand_variant
355 pxa3xx_nand_get_variant(struct platform_device *pdev)
356 {
357         const struct of_device_id *of_id =
358                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
359         if (!of_id)
360                 return PXA3XX_NAND_VARIANT_PXA;
361         return (enum pxa3xx_nand_variant)of_id->data;
362 }
363 
364 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
365                                    const struct pxa3xx_nand_timing *t)
366 {
367         struct pxa3xx_nand_info *info = host->info_data;
368         unsigned long nand_clk = clk_get_rate(info->clk);
369         uint32_t ndtr0, ndtr1;
370 
371         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
372                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
373                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
374                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
375                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
376                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
377 
378         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
379                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
380                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
381 
382         info->ndtr0cs0 = ndtr0;
383         info->ndtr1cs0 = ndtr1;
384         nand_writel(info, NDTR0CS0, ndtr0);
385         nand_writel(info, NDTR1CS0, ndtr1);
386 }
387 
388 /*
389  * Set the data and OOB size, depending on the selected
390  * spare and ECC configuration.
391  * Only applicable to READ0, READOOB and PAGEPROG commands.
392  */
393 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
394                                 struct mtd_info *mtd)
395 {
396         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
397 
398         info->data_size = mtd->writesize;
399         if (!oob_enable)
400                 return;
401 
402         info->oob_size = info->spare_size;
403         if (!info->use_ecc)
404                 info->oob_size += info->ecc_size;
405 }
406 
407 /**
408  * NOTE: it is a must to set ND_RUN firstly, then write
409  * command buffer, otherwise, it does not work.
410  * We enable all the interrupt at the same time, and
411  * let pxa3xx_nand_irq to handle all logic.
412  */
413 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
414 {
415         uint32_t ndcr;
416 
417         ndcr = info->reg_ndcr;
418 
419         if (info->use_ecc) {
420                 ndcr |= NDCR_ECC_EN;
421                 if (info->ecc_bch)
422                         nand_writel(info, NDECCCTRL, 0x1);
423         } else {
424                 ndcr &= ~NDCR_ECC_EN;
425                 if (info->ecc_bch)
426                         nand_writel(info, NDECCCTRL, 0x0);
427         }
428 
429         if (info->use_dma)
430                 ndcr |= NDCR_DMA_EN;
431         else
432                 ndcr &= ~NDCR_DMA_EN;
433 
434         if (info->use_spare)
435                 ndcr |= NDCR_SPARE_EN;
436         else
437                 ndcr &= ~NDCR_SPARE_EN;
438 
439         ndcr |= NDCR_ND_RUN;
440 
441         /* clear status bits and run */
442         nand_writel(info, NDCR, 0);
443         nand_writel(info, NDSR, NDSR_MASK);
444         nand_writel(info, NDCR, ndcr);
445 }
446 
447 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
448 {
449         uint32_t ndcr;
450         int timeout = NAND_STOP_DELAY;
451 
452         /* wait RUN bit in NDCR become 0 */
453         ndcr = nand_readl(info, NDCR);
454         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
455                 ndcr = nand_readl(info, NDCR);
456                 udelay(1);
457         }
458 
459         if (timeout <= 0) {
460                 ndcr &= ~NDCR_ND_RUN;
461                 nand_writel(info, NDCR, ndcr);
462         }
463         /* clear status bits */
464         nand_writel(info, NDSR, NDSR_MASK);
465 }
466 
467 static void __maybe_unused
468 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
469 {
470         uint32_t ndcr;
471 
472         ndcr = nand_readl(info, NDCR);
473         nand_writel(info, NDCR, ndcr & ~int_mask);
474 }
475 
476 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
477 {
478         uint32_t ndcr;
479 
480         ndcr = nand_readl(info, NDCR);
481         nand_writel(info, NDCR, ndcr | int_mask);
482 }
483 
484 static void handle_data_pio(struct pxa3xx_nand_info *info)
485 {
486         unsigned int do_bytes = min(info->data_size, info->chunk_size);
487 
488         switch (info->state) {
489         case STATE_PIO_WRITING:
490                 __raw_writesl(info->mmio_base + NDDB,
491                               info->data_buff + info->data_buff_pos,
492                               DIV_ROUND_UP(do_bytes, 4));
493 
494                 if (info->oob_size > 0)
495                         __raw_writesl(info->mmio_base + NDDB,
496                                       info->oob_buff + info->oob_buff_pos,
497                                       DIV_ROUND_UP(info->oob_size, 4));
498                 break;
499         case STATE_PIO_READING:
500                 __raw_readsl(info->mmio_base + NDDB,
501                              info->data_buff + info->data_buff_pos,
502                              DIV_ROUND_UP(do_bytes, 4));
503 
504                 if (info->oob_size > 0)
505                         __raw_readsl(info->mmio_base + NDDB,
506                                      info->oob_buff + info->oob_buff_pos,
507                                      DIV_ROUND_UP(info->oob_size, 4));
508                 break;
509         default:
510                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
511                                 info->state);
512                 BUG();
513         }
514 
515         /* Update buffer pointers for multi-page read/write */
516         info->data_buff_pos += do_bytes;
517         info->oob_buff_pos += info->oob_size;
518         info->data_size -= do_bytes;
519 }
520 
521 #ifdef ARCH_HAS_DMA
522 static void start_data_dma(struct pxa3xx_nand_info *info)
523 {
524         struct pxa_dma_desc *desc = info->data_desc;
525         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
526 
527         desc->ddadr = DDADR_STOP;
528         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
529 
530         switch (info->state) {
531         case STATE_DMA_WRITING:
532                 desc->dsadr = info->data_buff_phys;
533                 desc->dtadr = info->mmio_phys + NDDB;
534                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
535                 break;
536         case STATE_DMA_READING:
537                 desc->dtadr = info->data_buff_phys;
538                 desc->dsadr = info->mmio_phys + NDDB;
539                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
540                 break;
541         default:
542                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
543                                 info->state);
544                 BUG();
545         }
546 
547         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
548         DDADR(info->data_dma_ch) = info->data_desc_addr;
549         DCSR(info->data_dma_ch) |= DCSR_RUN;
550 }
551 
552 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
553 {
554         struct pxa3xx_nand_info *info = data;
555         uint32_t dcsr;
556 
557         dcsr = DCSR(channel);
558         DCSR(channel) = dcsr;
559 
560         if (dcsr & DCSR_BUSERR) {
561                 info->retcode = ERR_DMABUSERR;
562         }
563 
564         info->state = STATE_DMA_DONE;
565         enable_int(info, NDCR_INT_MASK);
566         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
567 }
568 #else
569 static void start_data_dma(struct pxa3xx_nand_info *info)
570 {}
571 #endif
572 
573 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
574 {
575         struct pxa3xx_nand_info *info = devid;
576         unsigned int status, is_completed = 0, is_ready = 0;
577         unsigned int ready, cmd_done;
578 
579         if (info->cs == 0) {
580                 ready           = NDSR_FLASH_RDY;
581                 cmd_done        = NDSR_CS0_CMDD;
582         } else {
583                 ready           = NDSR_RDY;
584                 cmd_done        = NDSR_CS1_CMDD;
585         }
586 
587         status = nand_readl(info, NDSR);
588 
589         if (status & NDSR_UNCORERR)
590                 info->retcode = ERR_UNCORERR;
591         if (status & NDSR_CORERR) {
592                 info->retcode = ERR_CORERR;
593                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
594                     info->ecc_bch)
595                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
596                 else
597                         info->ecc_err_cnt = 1;
598 
599                 /*
600                  * Each chunk composing a page is corrected independently,
601                  * and we need to store maximum number of corrected bitflips
602                  * to return it to the MTD layer in ecc.read_page().
603                  */
604                 info->max_bitflips = max_t(unsigned int,
605                                            info->max_bitflips,
606                                            info->ecc_err_cnt);
607         }
608         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
609                 /* whether use dma to transfer data */
610                 if (info->use_dma) {
611                         disable_int(info, NDCR_INT_MASK);
612                         info->state = (status & NDSR_RDDREQ) ?
613                                       STATE_DMA_READING : STATE_DMA_WRITING;
614                         start_data_dma(info);
615                         goto NORMAL_IRQ_EXIT;
616                 } else {
617                         info->state = (status & NDSR_RDDREQ) ?
618                                       STATE_PIO_READING : STATE_PIO_WRITING;
619                         handle_data_pio(info);
620                 }
621         }
622         if (status & cmd_done) {
623                 info->state = STATE_CMD_DONE;
624                 is_completed = 1;
625         }
626         if (status & ready) {
627                 info->state = STATE_READY;
628                 is_ready = 1;
629         }
630 
631         if (status & NDSR_WRCMDREQ) {
632                 nand_writel(info, NDSR, NDSR_WRCMDREQ);
633                 status &= ~NDSR_WRCMDREQ;
634                 info->state = STATE_CMD_HANDLE;
635 
636                 /*
637                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
638                  * must be loaded by writing directly either 12 or 16
639                  * bytes directly to NDCB0, four bytes at a time.
640                  *
641                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
642                  * but each NDCBx register can be read.
643                  */
644                 nand_writel(info, NDCB0, info->ndcb0);
645                 nand_writel(info, NDCB0, info->ndcb1);
646                 nand_writel(info, NDCB0, info->ndcb2);
647 
648                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
649                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
650                         nand_writel(info, NDCB0, info->ndcb3);
651         }
652 
653         /* clear NDSR to let the controller exit the IRQ */
654         nand_writel(info, NDSR, status);
655         if (is_completed)
656                 complete(&info->cmd_complete);
657         if (is_ready)
658                 complete(&info->dev_ready);
659 NORMAL_IRQ_EXIT:
660         return IRQ_HANDLED;
661 }
662 
663 static inline int is_buf_blank(uint8_t *buf, size_t len)
664 {
665         for (; len > 0; len--)
666                 if (*buf++ != 0xff)
667                         return 0;
668         return 1;
669 }
670 
671 static void set_command_address(struct pxa3xx_nand_info *info,
672                 unsigned int page_size, uint16_t column, int page_addr)
673 {
674         /* small page addr setting */
675         if (page_size < PAGE_CHUNK_SIZE) {
676                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
677                                 | (column & 0xFF);
678 
679                 info->ndcb2 = 0;
680         } else {
681                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
682                                 | (column & 0xFFFF);
683 
684                 if (page_addr & 0xFF0000)
685                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
686                 else
687                         info->ndcb2 = 0;
688         }
689 }
690 
691 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
692 {
693         struct pxa3xx_nand_host *host = info->host[info->cs];
694         struct mtd_info *mtd = host->mtd;
695 
696         /* reset data and oob column point to handle data */
697         info->buf_start         = 0;
698         info->buf_count         = 0;
699         info->oob_size          = 0;
700         info->data_buff_pos     = 0;
701         info->oob_buff_pos      = 0;
702         info->use_ecc           = 0;
703         info->use_spare         = 1;
704         info->retcode           = ERR_NONE;
705         info->ecc_err_cnt       = 0;
706         info->ndcb3             = 0;
707         info->need_wait         = 0;
708 
709         switch (command) {
710         case NAND_CMD_READ0:
711         case NAND_CMD_PAGEPROG:
712                 info->use_ecc = 1;
713         case NAND_CMD_READOOB:
714                 pxa3xx_set_datasize(info, mtd);
715                 break;
716         case NAND_CMD_PARAM:
717                 info->use_spare = 0;
718                 break;
719         default:
720                 info->ndcb1 = 0;
721                 info->ndcb2 = 0;
722                 break;
723         }
724 
725         /*
726          * If we are about to issue a read command, or about to set
727          * the write address, then clean the data buffer.
728          */
729         if (command == NAND_CMD_READ0 ||
730             command == NAND_CMD_READOOB ||
731             command == NAND_CMD_SEQIN) {
732 
733                 info->buf_count = mtd->writesize + mtd->oobsize;
734                 memset(info->data_buff, 0xFF, info->buf_count);
735         }
736 
737 }
738 
739 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
740                 int ext_cmd_type, uint16_t column, int page_addr)
741 {
742         int addr_cycle, exec_cmd;
743         struct pxa3xx_nand_host *host;
744         struct mtd_info *mtd;
745 
746         host = info->host[info->cs];
747         mtd = host->mtd;
748         addr_cycle = 0;
749         exec_cmd = 1;
750 
751         if (info->cs != 0)
752                 info->ndcb0 = NDCB0_CSEL;
753         else
754                 info->ndcb0 = 0;
755 
756         if (command == NAND_CMD_SEQIN)
757                 exec_cmd = 0;
758 
759         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
760                                     + host->col_addr_cycles);
761 
762         switch (command) {
763         case NAND_CMD_READOOB:
764         case NAND_CMD_READ0:
765                 info->buf_start = column;
766                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
767                                 | addr_cycle
768                                 | NAND_CMD_READ0;
769 
770                 if (command == NAND_CMD_READOOB)
771                         info->buf_start += mtd->writesize;
772 
773                 /*
774                  * Multiple page read needs an 'extended command type' field,
775                  * which is either naked-read or last-read according to the
776                  * state.
777                  */
778                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
779                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
780                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
781                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
782                                         | NDCB0_LEN_OVRD
783                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
784                         info->ndcb3 = info->chunk_size +
785                                       info->oob_size;
786                 }
787 
788                 set_command_address(info, mtd->writesize, column, page_addr);
789                 break;
790 
791         case NAND_CMD_SEQIN:
792 
793                 info->buf_start = column;
794                 set_command_address(info, mtd->writesize, 0, page_addr);
795 
796                 /*
797                  * Multiple page programming needs to execute the initial
798                  * SEQIN command that sets the page address.
799                  */
800                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
801                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
802                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
803                                 | addr_cycle
804                                 | command;
805                         /* No data transfer in this case */
806                         info->data_size = 0;
807                         exec_cmd = 1;
808                 }
809                 break;
810 
811         case NAND_CMD_PAGEPROG:
812                 if (is_buf_blank(info->data_buff,
813                                         (mtd->writesize + mtd->oobsize))) {
814                         exec_cmd = 0;
815                         break;
816                 }
817 
818                 /* Second command setting for large pages */
819                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
820                         /*
821                          * Multiple page write uses the 'extended command'
822                          * field. This can be used to issue a command dispatch
823                          * or a naked-write depending on the current stage.
824                          */
825                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
826                                         | NDCB0_LEN_OVRD
827                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
828                         info->ndcb3 = info->chunk_size +
829                                       info->oob_size;
830 
831                         /*
832                          * This is the command dispatch that completes a chunked
833                          * page program operation.
834                          */
835                         if (info->data_size == 0) {
836                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
837                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
838                                         | command;
839                                 info->ndcb1 = 0;
840                                 info->ndcb2 = 0;
841                                 info->ndcb3 = 0;
842                         }
843                 } else {
844                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
845                                         | NDCB0_AUTO_RS
846                                         | NDCB0_ST_ROW_EN
847                                         | NDCB0_DBC
848                                         | (NAND_CMD_PAGEPROG << 8)
849                                         | NAND_CMD_SEQIN
850                                         | addr_cycle;
851                 }
852                 break;
853 
854         case NAND_CMD_PARAM:
855                 info->buf_count = 256;
856                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
857                                 | NDCB0_ADDR_CYC(1)
858                                 | NDCB0_LEN_OVRD
859                                 | command;
860                 info->ndcb1 = (column & 0xFF);
861                 info->ndcb3 = 256;
862                 info->data_size = 256;
863                 break;
864 
865         case NAND_CMD_READID:
866                 info->buf_count = host->read_id_bytes;
867                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
868                                 | NDCB0_ADDR_CYC(1)
869                                 | command;
870                 info->ndcb1 = (column & 0xFF);
871 
872                 info->data_size = 8;
873                 break;
874         case NAND_CMD_STATUS:
875                 info->buf_count = 1;
876                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
877                                 | NDCB0_ADDR_CYC(1)
878                                 | command;
879 
880                 info->data_size = 8;
881                 break;
882 
883         case NAND_CMD_ERASE1:
884                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
885                                 | NDCB0_AUTO_RS
886                                 | NDCB0_ADDR_CYC(3)
887                                 | NDCB0_DBC
888                                 | (NAND_CMD_ERASE2 << 8)
889                                 | NAND_CMD_ERASE1;
890                 info->ndcb1 = page_addr;
891                 info->ndcb2 = 0;
892 
893                 break;
894         case NAND_CMD_RESET:
895                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
896                                 | command;
897 
898                 break;
899 
900         case NAND_CMD_ERASE2:
901                 exec_cmd = 0;
902                 break;
903 
904         default:
905                 exec_cmd = 0;
906                 dev_err(&info->pdev->dev, "non-supported command %x\n",
907                                 command);
908                 break;
909         }
910 
911         return exec_cmd;
912 }
913 
914 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
915                          int column, int page_addr)
916 {
917         struct pxa3xx_nand_host *host = mtd->priv;
918         struct pxa3xx_nand_info *info = host->info_data;
919         int ret, exec_cmd;
920 
921         /*
922          * if this is a x16 device ,then convert the input
923          * "byte" address into a "word" address appropriate
924          * for indexing a word-oriented device
925          */
926         if (info->reg_ndcr & NDCR_DWIDTH_M)
927                 column /= 2;
928 
929         /*
930          * There may be different NAND chip hooked to
931          * different chip select, so check whether
932          * chip select has been changed, if yes, reset the timing
933          */
934         if (info->cs != host->cs) {
935                 info->cs = host->cs;
936                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
937                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
938         }
939 
940         prepare_start_command(info, command);
941 
942         info->state = STATE_PREPARED;
943         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
944 
945         if (exec_cmd) {
946                 init_completion(&info->cmd_complete);
947                 init_completion(&info->dev_ready);
948                 info->need_wait = 1;
949                 pxa3xx_nand_start(info);
950 
951                 ret = wait_for_completion_timeout(&info->cmd_complete,
952                                 CHIP_DELAY_TIMEOUT);
953                 if (!ret) {
954                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
955                         /* Stop State Machine for next command cycle */
956                         pxa3xx_nand_stop(info);
957                 }
958         }
959         info->state = STATE_IDLE;
960 }
961 
962 static void nand_cmdfunc_extended(struct mtd_info *mtd,
963                                   const unsigned command,
964                                   int column, int page_addr)
965 {
966         struct pxa3xx_nand_host *host = mtd->priv;
967         struct pxa3xx_nand_info *info = host->info_data;
968         int ret, exec_cmd, ext_cmd_type;
969 
970         /*
971          * if this is a x16 device then convert the input
972          * "byte" address into a "word" address appropriate
973          * for indexing a word-oriented device
974          */
975         if (info->reg_ndcr & NDCR_DWIDTH_M)
976                 column /= 2;
977 
978         /*
979          * There may be different NAND chip hooked to
980          * different chip select, so check whether
981          * chip select has been changed, if yes, reset the timing
982          */
983         if (info->cs != host->cs) {
984                 info->cs = host->cs;
985                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
986                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
987         }
988 
989         /* Select the extended command for the first command */
990         switch (command) {
991         case NAND_CMD_READ0:
992         case NAND_CMD_READOOB:
993                 ext_cmd_type = EXT_CMD_TYPE_MONO;
994                 break;
995         case NAND_CMD_SEQIN:
996                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
997                 break;
998         case NAND_CMD_PAGEPROG:
999                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1000                 break;
1001         default:
1002                 ext_cmd_type = 0;
1003                 break;
1004         }
1005 
1006         prepare_start_command(info, command);
1007 
1008         /*
1009          * Prepare the "is ready" completion before starting a command
1010          * transaction sequence. If the command is not executed the
1011          * completion will be completed, see below.
1012          *
1013          * We can do that inside the loop because the command variable
1014          * is invariant and thus so is the exec_cmd.
1015          */
1016         info->need_wait = 1;
1017         init_completion(&info->dev_ready);
1018         do {
1019                 info->state = STATE_PREPARED;
1020                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1021                                                column, page_addr);
1022                 if (!exec_cmd) {
1023                         info->need_wait = 0;
1024                         complete(&info->dev_ready);
1025                         break;
1026                 }
1027 
1028                 init_completion(&info->cmd_complete);
1029                 pxa3xx_nand_start(info);
1030 
1031                 ret = wait_for_completion_timeout(&info->cmd_complete,
1032                                 CHIP_DELAY_TIMEOUT);
1033                 if (!ret) {
1034                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1035                         /* Stop State Machine for next command cycle */
1036                         pxa3xx_nand_stop(info);
1037                         break;
1038                 }
1039 
1040                 /* Check if the sequence is complete */
1041                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1042                         break;
1043 
1044                 /*
1045                  * After a splitted program command sequence has issued
1046                  * the command dispatch, the command sequence is complete.
1047                  */
1048                 if (info->data_size == 0 &&
1049                     command == NAND_CMD_PAGEPROG &&
1050                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1051                         break;
1052 
1053                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1054                         /* Last read: issue a 'last naked read' */
1055                         if (info->data_size == info->chunk_size)
1056                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1057                         else
1058                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1059 
1060                 /*
1061                  * If a splitted program command has no more data to transfer,
1062                  * the command dispatch must be issued to complete.
1063                  */
1064                 } else if (command == NAND_CMD_PAGEPROG &&
1065                            info->data_size == 0) {
1066                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1067                 }
1068         } while (1);
1069 
1070         info->state = STATE_IDLE;
1071 }
1072 
1073 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1074                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1075 {
1076         chip->write_buf(mtd, buf, mtd->writesize);
1077         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1078 
1079         return 0;
1080 }
1081 
1082 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1083                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1084                 int page)
1085 {
1086         struct pxa3xx_nand_host *host = mtd->priv;
1087         struct pxa3xx_nand_info *info = host->info_data;
1088 
1089         chip->read_buf(mtd, buf, mtd->writesize);
1090         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1091 
1092         if (info->retcode == ERR_CORERR && info->use_ecc) {
1093                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1094 
1095         } else if (info->retcode == ERR_UNCORERR) {
1096                 /*
1097                  * for blank page (all 0xff), HW will calculate its ECC as
1098                  * 0, which is different from the ECC information within
1099                  * OOB, ignore such uncorrectable errors
1100                  */
1101                 if (is_buf_blank(buf, mtd->writesize))
1102                         info->retcode = ERR_NONE;
1103                 else
1104                         mtd->ecc_stats.failed++;
1105         }
1106 
1107         return info->max_bitflips;
1108 }
1109 
1110 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1111 {
1112         struct pxa3xx_nand_host *host = mtd->priv;
1113         struct pxa3xx_nand_info *info = host->info_data;
1114         char retval = 0xFF;
1115 
1116         if (info->buf_start < info->buf_count)
1117                 /* Has just send a new command? */
1118                 retval = info->data_buff[info->buf_start++];
1119 
1120         return retval;
1121 }
1122 
1123 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1124 {
1125         struct pxa3xx_nand_host *host = mtd->priv;
1126         struct pxa3xx_nand_info *info = host->info_data;
1127         u16 retval = 0xFFFF;
1128 
1129         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1130                 retval = *((u16 *)(info->data_buff+info->buf_start));
1131                 info->buf_start += 2;
1132         }
1133         return retval;
1134 }
1135 
1136 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1137 {
1138         struct pxa3xx_nand_host *host = mtd->priv;
1139         struct pxa3xx_nand_info *info = host->info_data;
1140         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1141 
1142         memcpy(buf, info->data_buff + info->buf_start, real_len);
1143         info->buf_start += real_len;
1144 }
1145 
1146 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1147                 const uint8_t *buf, int len)
1148 {
1149         struct pxa3xx_nand_host *host = mtd->priv;
1150         struct pxa3xx_nand_info *info = host->info_data;
1151         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1152 
1153         memcpy(info->data_buff + info->buf_start, buf, real_len);
1154         info->buf_start += real_len;
1155 }
1156 
1157 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1158 {
1159         return;
1160 }
1161 
1162 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1163 {
1164         struct pxa3xx_nand_host *host = mtd->priv;
1165         struct pxa3xx_nand_info *info = host->info_data;
1166         int ret;
1167 
1168         if (info->need_wait) {
1169                 ret = wait_for_completion_timeout(&info->dev_ready,
1170                                 CHIP_DELAY_TIMEOUT);
1171                 info->need_wait = 0;
1172                 if (!ret) {
1173                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1174                         return NAND_STATUS_FAIL;
1175                 }
1176         }
1177 
1178         /* pxa3xx_nand_send_command has waited for command complete */
1179         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1180                 if (info->retcode == ERR_NONE)
1181                         return 0;
1182                 else
1183                         return NAND_STATUS_FAIL;
1184         }
1185 
1186         return NAND_STATUS_READY;
1187 }
1188 
1189 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1190                                     const struct pxa3xx_nand_flash *f)
1191 {
1192         struct platform_device *pdev = info->pdev;
1193         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1194         struct pxa3xx_nand_host *host = info->host[info->cs];
1195         uint32_t ndcr = 0x0; /* enable all interrupts */
1196 
1197         if (f->page_size != 2048 && f->page_size != 512) {
1198                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1199                 return -EINVAL;
1200         }
1201 
1202         if (f->flash_width != 16 && f->flash_width != 8) {
1203                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1204                 return -EINVAL;
1205         }
1206 
1207         /* calculate flash information */
1208         host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1209 
1210         /* calculate addressing information */
1211         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1212 
1213         if (f->num_blocks * f->page_per_block > 65536)
1214                 host->row_addr_cycles = 3;
1215         else
1216                 host->row_addr_cycles = 2;
1217 
1218         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1219         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1220         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1221         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1222         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1223         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1224 
1225         ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1226         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1227 
1228         info->reg_ndcr = ndcr;
1229 
1230         pxa3xx_nand_set_timing(host, f->timing);
1231         return 0;
1232 }
1233 
1234 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1235 {
1236         /*
1237          * We set 0 by hard coding here, for we don't support keep_config
1238          * when there is more than one chip attached to the controller
1239          */
1240         struct pxa3xx_nand_host *host = info->host[0];
1241         uint32_t ndcr = nand_readl(info, NDCR);
1242 
1243         if (ndcr & NDCR_PAGE_SZ) {
1244                 /* Controller's FIFO size */
1245                 info->chunk_size = 2048;
1246                 host->read_id_bytes = 4;
1247         } else {
1248                 info->chunk_size = 512;
1249                 host->read_id_bytes = 2;
1250         }
1251 
1252         /* Set an initial chunk size */
1253         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1254         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1255         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1256         return 0;
1257 }
1258 
1259 #ifdef ARCH_HAS_DMA
1260 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1261 {
1262         struct platform_device *pdev = info->pdev;
1263         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1264 
1265         if (use_dma == 0) {
1266                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1267                 if (info->data_buff == NULL)
1268                         return -ENOMEM;
1269                 return 0;
1270         }
1271 
1272         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1273                                 &info->data_buff_phys, GFP_KERNEL);
1274         if (info->data_buff == NULL) {
1275                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1276                 return -ENOMEM;
1277         }
1278 
1279         info->data_desc = (void *)info->data_buff + data_desc_offset;
1280         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1281 
1282         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1283                                 pxa3xx_nand_data_dma_irq, info);
1284         if (info->data_dma_ch < 0) {
1285                 dev_err(&pdev->dev, "failed to request data dma\n");
1286                 dma_free_coherent(&pdev->dev, info->buf_size,
1287                                 info->data_buff, info->data_buff_phys);
1288                 return info->data_dma_ch;
1289         }
1290 
1291         /*
1292          * Now that DMA buffers are allocated we turn on
1293          * DMA proper for I/O operations.
1294          */
1295         info->use_dma = 1;
1296         return 0;
1297 }
1298 
1299 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1300 {
1301         struct platform_device *pdev = info->pdev;
1302         if (info->use_dma) {
1303                 pxa_free_dma(info->data_dma_ch);
1304                 dma_free_coherent(&pdev->dev, info->buf_size,
1305                                   info->data_buff, info->data_buff_phys);
1306         } else {
1307                 kfree(info->data_buff);
1308         }
1309 }
1310 #else
1311 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1312 {
1313         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1314         if (info->data_buff == NULL)
1315                 return -ENOMEM;
1316         return 0;
1317 }
1318 
1319 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1320 {
1321         kfree(info->data_buff);
1322 }
1323 #endif
1324 
1325 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1326 {
1327         struct mtd_info *mtd;
1328         struct nand_chip *chip;
1329         int ret;
1330 
1331         mtd = info->host[info->cs]->mtd;
1332         chip = mtd->priv;
1333 
1334         /* use the common timing to make a try */
1335         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1336         if (ret)
1337                 return ret;
1338 
1339         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1340         ret = chip->waitfunc(mtd, chip);
1341         if (ret & NAND_STATUS_FAIL)
1342                 return -ENODEV;
1343 
1344         return 0;
1345 }
1346 
1347 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1348                         struct nand_ecc_ctrl *ecc,
1349                         int strength, int ecc_stepsize, int page_size)
1350 {
1351         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1352                 info->chunk_size = 2048;
1353                 info->spare_size = 40;
1354                 info->ecc_size = 24;
1355                 ecc->mode = NAND_ECC_HW;
1356                 ecc->size = 512;
1357                 ecc->strength = 1;
1358                 return 1;
1359 
1360         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1361                 info->chunk_size = 512;
1362                 info->spare_size = 8;
1363                 info->ecc_size = 8;
1364                 ecc->mode = NAND_ECC_HW;
1365                 ecc->size = 512;
1366                 ecc->strength = 1;
1367                 return 1;
1368 
1369         /*
1370          * Required ECC: 4-bit correction per 512 bytes
1371          * Select: 16-bit correction per 2048 bytes
1372          */
1373         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1374                 info->ecc_bch = 1;
1375                 info->chunk_size = 2048;
1376                 info->spare_size = 32;
1377                 info->ecc_size = 32;
1378                 ecc->mode = NAND_ECC_HW;
1379                 ecc->size = info->chunk_size;
1380                 ecc->layout = &ecc_layout_2KB_bch4bit;
1381                 ecc->strength = 16;
1382                 return 1;
1383 
1384         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1385                 info->ecc_bch = 1;
1386                 info->chunk_size = 2048;
1387                 info->spare_size = 32;
1388                 info->ecc_size = 32;
1389                 ecc->mode = NAND_ECC_HW;
1390                 ecc->size = info->chunk_size;
1391                 ecc->layout = &ecc_layout_4KB_bch4bit;
1392                 ecc->strength = 16;
1393                 return 1;
1394 
1395         /*
1396          * Required ECC: 8-bit correction per 512 bytes
1397          * Select: 16-bit correction per 1024 bytes
1398          */
1399         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1400                 info->ecc_bch = 1;
1401                 info->chunk_size = 1024;
1402                 info->spare_size = 0;
1403                 info->ecc_size = 32;
1404                 ecc->mode = NAND_ECC_HW;
1405                 ecc->size = info->chunk_size;
1406                 ecc->layout = &ecc_layout_4KB_bch8bit;
1407                 ecc->strength = 16;
1408                 return 1;
1409         }
1410         return 0;
1411 }
1412 
1413 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1414 {
1415         struct pxa3xx_nand_host *host = mtd->priv;
1416         struct pxa3xx_nand_info *info = host->info_data;
1417         struct platform_device *pdev = info->pdev;
1418         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1419         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1420         const struct pxa3xx_nand_flash *f = NULL;
1421         struct nand_chip *chip = mtd->priv;
1422         uint32_t id = -1;
1423         uint64_t chipsize;
1424         int i, ret, num;
1425         uint16_t ecc_strength, ecc_step;
1426 
1427         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1428                 goto KEEP_CONFIG;
1429 
1430         ret = pxa3xx_nand_sensing(info);
1431         if (ret) {
1432                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1433                          info->cs);
1434 
1435                 return ret;
1436         }
1437 
1438         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1439         id = *((uint16_t *)(info->data_buff));
1440         if (id != 0)
1441                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1442         else {
1443                 dev_warn(&info->pdev->dev,
1444                          "Read out ID 0, potential timing set wrong!!\n");
1445 
1446                 return -EINVAL;
1447         }
1448 
1449         num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1450         for (i = 0; i < num; i++) {
1451                 if (i < pdata->num_flash)
1452                         f = pdata->flash + i;
1453                 else
1454                         f = &builtin_flash_types[i - pdata->num_flash + 1];
1455 
1456                 /* find the chip in default list */
1457                 if (f->chip_id == id)
1458                         break;
1459         }
1460 
1461         if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1462                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1463 
1464                 return -EINVAL;
1465         }
1466 
1467         ret = pxa3xx_nand_config_flash(info, f);
1468         if (ret) {
1469                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1470                 return ret;
1471         }
1472 
1473         pxa3xx_flash_ids[0].name = f->name;
1474         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1475         pxa3xx_flash_ids[0].pagesize = f->page_size;
1476         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1477         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1478         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1479         if (f->flash_width == 16)
1480                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1481         pxa3xx_flash_ids[1].name = NULL;
1482         def = pxa3xx_flash_ids;
1483 KEEP_CONFIG:
1484         if (info->reg_ndcr & NDCR_DWIDTH_M)
1485                 chip->options |= NAND_BUSWIDTH_16;
1486 
1487         /* Device detection must be done with ECC disabled */
1488         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1489                 nand_writel(info, NDECCCTRL, 0x0);
1490 
1491         if (nand_scan_ident(mtd, 1, def))
1492                 return -ENODEV;
1493 
1494         if (pdata->flash_bbt) {
1495                 /*
1496                  * We'll use a bad block table stored in-flash and don't
1497                  * allow writing the bad block marker to the flash.
1498                  */
1499                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1500                                      NAND_BBT_NO_OOB_BBM;
1501                 chip->bbt_td = &bbt_main_descr;
1502                 chip->bbt_md = &bbt_mirror_descr;
1503         }
1504 
1505         /*
1506          * If the page size is bigger than the FIFO size, let's check
1507          * we are given the right variant and then switch to the extended
1508          * (aka splitted) command handling,
1509          */
1510         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1511                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1512                         chip->cmdfunc = nand_cmdfunc_extended;
1513                 } else {
1514                         dev_err(&info->pdev->dev,
1515                                 "unsupported page size on this variant\n");
1516                         return -ENODEV;
1517                 }
1518         }
1519 
1520         ecc_strength = chip->ecc_strength_ds;
1521         ecc_step = chip->ecc_step_ds;
1522 
1523         /* Set default ECC strength requirements on non-ONFI devices */
1524         if (ecc_strength < 1 && ecc_step < 1) {
1525                 ecc_strength = 1;
1526                 ecc_step = 512;
1527         }
1528 
1529         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1530                            ecc_step, mtd->writesize);
1531         if (!ret) {
1532                 dev_err(&info->pdev->dev,
1533                         "ECC strength %d at page size %d is not supported\n",
1534                         chip->ecc_strength_ds, mtd->writesize);
1535                 return -ENODEV;
1536         }
1537 
1538         /* calculate addressing information */
1539         if (mtd->writesize >= 2048)
1540                 host->col_addr_cycles = 2;
1541         else
1542                 host->col_addr_cycles = 1;
1543 
1544         /* release the initial buffer */
1545         kfree(info->data_buff);
1546 
1547         /* allocate the real data + oob buffer */
1548         info->buf_size = mtd->writesize + mtd->oobsize;
1549         ret = pxa3xx_nand_init_buff(info);
1550         if (ret)
1551                 return ret;
1552         info->oob_buff = info->data_buff + mtd->writesize;
1553 
1554         if ((mtd->size >> chip->page_shift) > 65536)
1555                 host->row_addr_cycles = 3;
1556         else
1557                 host->row_addr_cycles = 2;
1558         return nand_scan_tail(mtd);
1559 }
1560 
1561 static int alloc_nand_resource(struct platform_device *pdev)
1562 {
1563         struct pxa3xx_nand_platform_data *pdata;
1564         struct pxa3xx_nand_info *info;
1565         struct pxa3xx_nand_host *host;
1566         struct nand_chip *chip = NULL;
1567         struct mtd_info *mtd;
1568         struct resource *r;
1569         int ret, irq, cs;
1570 
1571         pdata = dev_get_platdata(&pdev->dev);
1572         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1573                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1574         if (!info)
1575                 return -ENOMEM;
1576 
1577         info->pdev = pdev;
1578         info->variant = pxa3xx_nand_get_variant(pdev);
1579         for (cs = 0; cs < pdata->num_cs; cs++) {
1580                 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1581                       (sizeof(*mtd) + sizeof(*host)) * cs);
1582                 chip = (struct nand_chip *)(&mtd[1]);
1583                 host = (struct pxa3xx_nand_host *)chip;
1584                 info->host[cs] = host;
1585                 host->mtd = mtd;
1586                 host->cs = cs;
1587                 host->info_data = info;
1588                 mtd->priv = host;
1589                 mtd->owner = THIS_MODULE;
1590 
1591                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1592                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1593                 chip->controller        = &info->controller;
1594                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1595                 chip->select_chip       = pxa3xx_nand_select_chip;
1596                 chip->read_word         = pxa3xx_nand_read_word;
1597                 chip->read_byte         = pxa3xx_nand_read_byte;
1598                 chip->read_buf          = pxa3xx_nand_read_buf;
1599                 chip->write_buf         = pxa3xx_nand_write_buf;
1600                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1601                 chip->cmdfunc           = nand_cmdfunc;
1602         }
1603 
1604         spin_lock_init(&chip->controller->lock);
1605         init_waitqueue_head(&chip->controller->wq);
1606         info->clk = devm_clk_get(&pdev->dev, NULL);
1607         if (IS_ERR(info->clk)) {
1608                 dev_err(&pdev->dev, "failed to get nand clock\n");
1609                 return PTR_ERR(info->clk);
1610         }
1611         ret = clk_prepare_enable(info->clk);
1612         if (ret < 0)
1613                 return ret;
1614 
1615         if (use_dma) {
1616                 /*
1617                  * This is a dirty hack to make this driver work from
1618                  * devicetree bindings. It can be removed once we have
1619                  * a prober DMA controller framework for DT.
1620                  */
1621                 if (pdev->dev.of_node &&
1622                     of_machine_is_compatible("marvell,pxa3xx")) {
1623                         info->drcmr_dat = 97;
1624                         info->drcmr_cmd = 99;
1625                 } else {
1626                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1627                         if (r == NULL) {
1628                                 dev_err(&pdev->dev,
1629                                         "no resource defined for data DMA\n");
1630                                 ret = -ENXIO;
1631                                 goto fail_disable_clk;
1632                         }
1633                         info->drcmr_dat = r->start;
1634 
1635                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1636                         if (r == NULL) {
1637                                 dev_err(&pdev->dev,
1638                                         "no resource defined for cmd DMA\n");
1639                                 ret = -ENXIO;
1640                                 goto fail_disable_clk;
1641                         }
1642                         info->drcmr_cmd = r->start;
1643                 }
1644         }
1645 
1646         irq = platform_get_irq(pdev, 0);
1647         if (irq < 0) {
1648                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1649                 ret = -ENXIO;
1650                 goto fail_disable_clk;
1651         }
1652 
1653         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1654         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1655         if (IS_ERR(info->mmio_base)) {
1656                 ret = PTR_ERR(info->mmio_base);
1657                 goto fail_disable_clk;
1658         }
1659         info->mmio_phys = r->start;
1660 
1661         /* Allocate a buffer to allow flash detection */
1662         info->buf_size = INIT_BUFFER_SIZE;
1663         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1664         if (info->data_buff == NULL) {
1665                 ret = -ENOMEM;
1666                 goto fail_disable_clk;
1667         }
1668 
1669         /* initialize all interrupts to be disabled */
1670         disable_int(info, NDSR_MASK);
1671 
1672         ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
1673         if (ret < 0) {
1674                 dev_err(&pdev->dev, "failed to request IRQ\n");
1675                 goto fail_free_buf;
1676         }
1677 
1678         platform_set_drvdata(pdev, info);
1679 
1680         return 0;
1681 
1682 fail_free_buf:
1683         free_irq(irq, info);
1684         kfree(info->data_buff);
1685 fail_disable_clk:
1686         clk_disable_unprepare(info->clk);
1687         return ret;
1688 }
1689 
1690 static int pxa3xx_nand_remove(struct platform_device *pdev)
1691 {
1692         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1693         struct pxa3xx_nand_platform_data *pdata;
1694         int irq, cs;
1695 
1696         if (!info)
1697                 return 0;
1698 
1699         pdata = dev_get_platdata(&pdev->dev);
1700 
1701         irq = platform_get_irq(pdev, 0);
1702         if (irq >= 0)
1703                 free_irq(irq, info);
1704         pxa3xx_nand_free_buff(info);
1705 
1706         clk_disable_unprepare(info->clk);
1707 
1708         for (cs = 0; cs < pdata->num_cs; cs++)
1709                 nand_release(info->host[cs]->mtd);
1710         return 0;
1711 }
1712 
1713 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1714 {
1715         struct pxa3xx_nand_platform_data *pdata;
1716         struct device_node *np = pdev->dev.of_node;
1717         const struct of_device_id *of_id =
1718                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1719 
1720         if (!of_id)
1721                 return 0;
1722 
1723         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1724         if (!pdata)
1725                 return -ENOMEM;
1726 
1727         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1728                 pdata->enable_arbiter = 1;
1729         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1730                 pdata->keep_config = 1;
1731         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1732         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1733 
1734         pdev->dev.platform_data = pdata;
1735 
1736         return 0;
1737 }
1738 
1739 static int pxa3xx_nand_probe(struct platform_device *pdev)
1740 {
1741         struct pxa3xx_nand_platform_data *pdata;
1742         struct mtd_part_parser_data ppdata = {};
1743         struct pxa3xx_nand_info *info;
1744         int ret, cs, probe_success;
1745 
1746 #ifndef ARCH_HAS_DMA
1747         if (use_dma) {
1748                 use_dma = 0;
1749                 dev_warn(&pdev->dev,
1750                          "This platform can't do DMA on this device\n");
1751         }
1752 #endif
1753         ret = pxa3xx_nand_probe_dt(pdev);
1754         if (ret)
1755                 return ret;
1756 
1757         pdata = dev_get_platdata(&pdev->dev);
1758         if (!pdata) {
1759                 dev_err(&pdev->dev, "no platform data defined\n");
1760                 return -ENODEV;
1761         }
1762 
1763         ret = alloc_nand_resource(pdev);
1764         if (ret) {
1765                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1766                 return ret;
1767         }
1768 
1769         info = platform_get_drvdata(pdev);
1770         probe_success = 0;
1771         for (cs = 0; cs < pdata->num_cs; cs++) {
1772                 struct mtd_info *mtd = info->host[cs]->mtd;
1773 
1774                 /*
1775                  * The mtd name matches the one used in 'mtdparts' kernel
1776                  * parameter. This name cannot be changed or otherwise
1777                  * user's mtd partitions configuration would get broken.
1778                  */
1779                 mtd->name = "pxa3xx_nand-0";
1780                 info->cs = cs;
1781                 ret = pxa3xx_nand_scan(mtd);
1782                 if (ret) {
1783                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1784                                 cs);
1785                         continue;
1786                 }
1787 
1788                 ppdata.of_node = pdev->dev.of_node;
1789                 ret = mtd_device_parse_register(mtd, NULL,
1790                                                 &ppdata, pdata->parts[cs],
1791                                                 pdata->nr_parts[cs]);
1792                 if (!ret)
1793                         probe_success = 1;
1794         }
1795 
1796         if (!probe_success) {
1797                 pxa3xx_nand_remove(pdev);
1798                 return -ENODEV;
1799         }
1800 
1801         return 0;
1802 }
1803 
1804 #ifdef CONFIG_PM
1805 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1806 {
1807         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1808         struct pxa3xx_nand_platform_data *pdata;
1809         struct mtd_info *mtd;
1810         int cs;
1811 
1812         pdata = dev_get_platdata(&pdev->dev);
1813         if (info->state) {
1814                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1815                 return -EAGAIN;
1816         }
1817 
1818         for (cs = 0; cs < pdata->num_cs; cs++) {
1819                 mtd = info->host[cs]->mtd;
1820                 mtd_suspend(mtd);
1821         }
1822 
1823         return 0;
1824 }
1825 
1826 static int pxa3xx_nand_resume(struct platform_device *pdev)
1827 {
1828         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1829         struct pxa3xx_nand_platform_data *pdata;
1830         struct mtd_info *mtd;
1831         int cs;
1832 
1833         pdata = dev_get_platdata(&pdev->dev);
1834         /* We don't want to handle interrupt without calling mtd routine */
1835         disable_int(info, NDCR_INT_MASK);
1836 
1837         /*
1838          * Directly set the chip select to a invalid value,
1839          * then the driver would reset the timing according
1840          * to current chip select at the beginning of cmdfunc
1841          */
1842         info->cs = 0xff;
1843 
1844         /*
1845          * As the spec says, the NDSR would be updated to 0x1800 when
1846          * doing the nand_clk disable/enable.
1847          * To prevent it damaging state machine of the driver, clear
1848          * all status before resume
1849          */
1850         nand_writel(info, NDSR, NDSR_MASK);
1851         for (cs = 0; cs < pdata->num_cs; cs++) {
1852                 mtd = info->host[cs]->mtd;
1853                 mtd_resume(mtd);
1854         }
1855 
1856         return 0;
1857 }
1858 #else
1859 #define pxa3xx_nand_suspend     NULL
1860 #define pxa3xx_nand_resume      NULL
1861 #endif
1862 
1863 static struct platform_driver pxa3xx_nand_driver = {
1864         .driver = {
1865                 .name   = "pxa3xx-nand",
1866                 .of_match_table = pxa3xx_nand_dt_ids,
1867         },
1868         .probe          = pxa3xx_nand_probe,
1869         .remove         = pxa3xx_nand_remove,
1870         .suspend        = pxa3xx_nand_suspend,
1871         .resume         = pxa3xx_nand_resume,
1872 };
1873 
1874 module_platform_driver(pxa3xx_nand_driver);
1875 
1876 MODULE_LICENSE("GPL");
1877 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1878 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us