Version:  2.0.40 2.2.26 2.4.37 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7

Linux/drivers/mtd/nand/fsmc_nand.c

  1 /*
  2  * drivers/mtd/nand/fsmc_nand.c
  3  *
  4  * ST Microelectronics
  5  * Flexible Static Memory Controller (FSMC)
  6  * Driver for NAND portions
  7  *
  8  * Copyright © 2010 ST Microelectronics
  9  * Vipin Kumar <vipin.kumar@st.com>
 10  * Ashish Priyadarshi
 11  *
 12  * Based on drivers/mtd/nand/nomadik_nand.c
 13  *
 14  * This file is licensed under the terms of the GNU General Public
 15  * License version 2. This program is licensed "as is" without any
 16  * warranty of any kind, whether express or implied.
 17  */
 18 
 19 #include <linux/clk.h>
 20 #include <linux/completion.h>
 21 #include <linux/dmaengine.h>
 22 #include <linux/dma-direction.h>
 23 #include <linux/dma-mapping.h>
 24 #include <linux/err.h>
 25 #include <linux/init.h>
 26 #include <linux/module.h>
 27 #include <linux/resource.h>
 28 #include <linux/sched.h>
 29 #include <linux/types.h>
 30 #include <linux/mtd/mtd.h>
 31 #include <linux/mtd/nand.h>
 32 #include <linux/mtd/nand_ecc.h>
 33 #include <linux/platform_device.h>
 34 #include <linux/of.h>
 35 #include <linux/mtd/partitions.h>
 36 #include <linux/io.h>
 37 #include <linux/slab.h>
 38 #include <linux/mtd/fsmc.h>
 39 #include <linux/amba/bus.h>
 40 #include <mtd/mtd-abi.h>
 41 
 42 static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
 43                                    struct mtd_oob_region *oobregion)
 44 {
 45         struct nand_chip *chip = mtd_to_nand(mtd);
 46 
 47         if (section >= chip->ecc.steps)
 48                 return -ERANGE;
 49 
 50         oobregion->offset = (section * 16) + 2;
 51         oobregion->length = 3;
 52 
 53         return 0;
 54 }
 55 
 56 static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
 57                                     struct mtd_oob_region *oobregion)
 58 {
 59         struct nand_chip *chip = mtd_to_nand(mtd);
 60 
 61         if (section >= chip->ecc.steps)
 62                 return -ERANGE;
 63 
 64         oobregion->offset = (section * 16) + 8;
 65 
 66         if (section < chip->ecc.steps - 1)
 67                 oobregion->length = 8;
 68         else
 69                 oobregion->length = mtd->oobsize - oobregion->offset;
 70 
 71         return 0;
 72 }
 73 
 74 static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
 75         .ecc = fsmc_ecc1_ooblayout_ecc,
 76         .free = fsmc_ecc1_ooblayout_free,
 77 };
 78 
 79 /*
 80  * ECC placement definitions in oobfree type format.
 81  * There are 13 bytes of ecc for every 512 byte block and it has to be read
 82  * consecutively and immediately after the 512 byte data block for hardware to
 83  * generate the error bit offsets in 512 byte data.
 84  */
 85 static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
 86                                    struct mtd_oob_region *oobregion)
 87 {
 88         struct nand_chip *chip = mtd_to_nand(mtd);
 89 
 90         if (section >= chip->ecc.steps)
 91                 return -ERANGE;
 92 
 93         oobregion->length = chip->ecc.bytes;
 94 
 95         if (!section && mtd->writesize <= 512)
 96                 oobregion->offset = 0;
 97         else
 98                 oobregion->offset = (section * 16) + 2;
 99 
100         return 0;
101 }
102 
103 static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
104                                     struct mtd_oob_region *oobregion)
105 {
106         struct nand_chip *chip = mtd_to_nand(mtd);
107 
108         if (section >= chip->ecc.steps)
109                 return -ERANGE;
110 
111         oobregion->offset = (section * 16) + 15;
112 
113         if (section < chip->ecc.steps - 1)
114                 oobregion->length = 3;
115         else
116                 oobregion->length = mtd->oobsize - oobregion->offset;
117 
118         return 0;
119 }
120 
121 static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
122         .ecc = fsmc_ecc4_ooblayout_ecc,
123         .free = fsmc_ecc4_ooblayout_free,
124 };
125 
126 /**
127  * struct fsmc_nand_data - structure for FSMC NAND device state
128  *
129  * @pid:                Part ID on the AMBA PrimeCell format
130  * @mtd:                MTD info for a NAND flash.
131  * @nand:               Chip related info for a NAND flash.
132  * @partitions:         Partition info for a NAND Flash.
133  * @nr_partitions:      Total number of partition of a NAND flash.
134  *
135  * @bank:               Bank number for probed device.
136  * @clk:                Clock structure for FSMC.
137  *
138  * @read_dma_chan:      DMA channel for read access
139  * @write_dma_chan:     DMA channel for write access to NAND
140  * @dma_access_complete: Completion structure
141  *
142  * @data_pa:            NAND Physical port for Data.
143  * @data_va:            NAND port for Data.
144  * @cmd_va:             NAND port for Command.
145  * @addr_va:            NAND port for Address.
146  * @regs_va:            FSMC regs base address.
147  */
148 struct fsmc_nand_data {
149         u32                     pid;
150         struct nand_chip        nand;
151         struct mtd_partition    *partitions;
152         unsigned int            nr_partitions;
153 
154         unsigned int            bank;
155         struct device           *dev;
156         enum access_mode        mode;
157         struct clk              *clk;
158 
159         /* DMA related objects */
160         struct dma_chan         *read_dma_chan;
161         struct dma_chan         *write_dma_chan;
162         struct completion       dma_access_complete;
163 
164         struct fsmc_nand_timings *dev_timings;
165 
166         dma_addr_t              data_pa;
167         void __iomem            *data_va;
168         void __iomem            *cmd_va;
169         void __iomem            *addr_va;
170         void __iomem            *regs_va;
171 
172         void                    (*select_chip)(uint32_t bank, uint32_t busw);
173 };
174 
175 static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
176 {
177         return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
178 }
179 
180 /* Assert CS signal based on chipnr */
181 static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
182 {
183         struct nand_chip *chip = mtd_to_nand(mtd);
184         struct fsmc_nand_data *host;
185 
186         host = mtd_to_fsmc(mtd);
187 
188         switch (chipnr) {
189         case -1:
190                 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
191                 break;
192         case 0:
193         case 1:
194         case 2:
195         case 3:
196                 if (host->select_chip)
197                         host->select_chip(chipnr,
198                                         chip->options & NAND_BUSWIDTH_16);
199                 break;
200 
201         default:
202                 dev_err(host->dev, "unsupported chip-select %d\n", chipnr);
203         }
204 }
205 
206 /*
207  * fsmc_cmd_ctrl - For facilitaing Hardware access
208  * This routine allows hardware specific access to control-lines(ALE,CLE)
209  */
210 static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
211 {
212         struct nand_chip *this = mtd_to_nand(mtd);
213         struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
214         void __iomem *regs = host->regs_va;
215         unsigned int bank = host->bank;
216 
217         if (ctrl & NAND_CTRL_CHANGE) {
218                 u32 pc;
219 
220                 if (ctrl & NAND_CLE) {
221                         this->IO_ADDR_R = host->cmd_va;
222                         this->IO_ADDR_W = host->cmd_va;
223                 } else if (ctrl & NAND_ALE) {
224                         this->IO_ADDR_R = host->addr_va;
225                         this->IO_ADDR_W = host->addr_va;
226                 } else {
227                         this->IO_ADDR_R = host->data_va;
228                         this->IO_ADDR_W = host->data_va;
229                 }
230 
231                 pc = readl(FSMC_NAND_REG(regs, bank, PC));
232                 if (ctrl & NAND_NCE)
233                         pc |= FSMC_ENABLE;
234                 else
235                         pc &= ~FSMC_ENABLE;
236                 writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
237         }
238 
239         mb();
240 
241         if (cmd != NAND_CMD_NONE)
242                 writeb_relaxed(cmd, this->IO_ADDR_W);
243 }
244 
245 /*
246  * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
247  *
248  * This routine initializes timing parameters related to NAND memory access in
249  * FSMC registers
250  */
251 static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
252                            uint32_t busw, struct fsmc_nand_timings *timings)
253 {
254         uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
255         uint32_t tclr, tar, thiz, thold, twait, tset;
256         struct fsmc_nand_timings *tims;
257         struct fsmc_nand_timings default_timings = {
258                 .tclr   = FSMC_TCLR_1,
259                 .tar    = FSMC_TAR_1,
260                 .thiz   = FSMC_THIZ_1,
261                 .thold  = FSMC_THOLD_4,
262                 .twait  = FSMC_TWAIT_6,
263                 .tset   = FSMC_TSET_0,
264         };
265 
266         if (timings)
267                 tims = timings;
268         else
269                 tims = &default_timings;
270 
271         tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
272         tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
273         thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
274         thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
275         twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
276         tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
277 
278         if (busw)
279                 writel_relaxed(value | FSMC_DEVWID_16,
280                                 FSMC_NAND_REG(regs, bank, PC));
281         else
282                 writel_relaxed(value | FSMC_DEVWID_8,
283                                 FSMC_NAND_REG(regs, bank, PC));
284 
285         writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
286                         FSMC_NAND_REG(regs, bank, PC));
287         writel_relaxed(thiz | thold | twait | tset,
288                         FSMC_NAND_REG(regs, bank, COMM));
289         writel_relaxed(thiz | thold | twait | tset,
290                         FSMC_NAND_REG(regs, bank, ATTRIB));
291 }
292 
293 /*
294  * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
295  */
296 static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
297 {
298         struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
299         void __iomem *regs = host->regs_va;
300         uint32_t bank = host->bank;
301 
302         writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
303                         FSMC_NAND_REG(regs, bank, PC));
304         writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
305                         FSMC_NAND_REG(regs, bank, PC));
306         writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
307                         FSMC_NAND_REG(regs, bank, PC));
308 }
309 
310 /*
311  * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
312  * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
313  * max of 8-bits)
314  */
315 static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
316                                 uint8_t *ecc)
317 {
318         struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
319         void __iomem *regs = host->regs_va;
320         uint32_t bank = host->bank;
321         uint32_t ecc_tmp;
322         unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
323 
324         do {
325                 if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
326                         break;
327                 else
328                         cond_resched();
329         } while (!time_after_eq(jiffies, deadline));
330 
331         if (time_after_eq(jiffies, deadline)) {
332                 dev_err(host->dev, "calculate ecc timed out\n");
333                 return -ETIMEDOUT;
334         }
335 
336         ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
337         ecc[0] = (uint8_t) (ecc_tmp >> 0);
338         ecc[1] = (uint8_t) (ecc_tmp >> 8);
339         ecc[2] = (uint8_t) (ecc_tmp >> 16);
340         ecc[3] = (uint8_t) (ecc_tmp >> 24);
341 
342         ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
343         ecc[4] = (uint8_t) (ecc_tmp >> 0);
344         ecc[5] = (uint8_t) (ecc_tmp >> 8);
345         ecc[6] = (uint8_t) (ecc_tmp >> 16);
346         ecc[7] = (uint8_t) (ecc_tmp >> 24);
347 
348         ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
349         ecc[8] = (uint8_t) (ecc_tmp >> 0);
350         ecc[9] = (uint8_t) (ecc_tmp >> 8);
351         ecc[10] = (uint8_t) (ecc_tmp >> 16);
352         ecc[11] = (uint8_t) (ecc_tmp >> 24);
353 
354         ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
355         ecc[12] = (uint8_t) (ecc_tmp >> 16);
356 
357         return 0;
358 }
359 
360 /*
361  * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
362  * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
363  * max of 1-bit)
364  */
365 static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
366                                 uint8_t *ecc)
367 {
368         struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
369         void __iomem *regs = host->regs_va;
370         uint32_t bank = host->bank;
371         uint32_t ecc_tmp;
372 
373         ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
374         ecc[0] = (uint8_t) (ecc_tmp >> 0);
375         ecc[1] = (uint8_t) (ecc_tmp >> 8);
376         ecc[2] = (uint8_t) (ecc_tmp >> 16);
377 
378         return 0;
379 }
380 
381 /* Count the number of 0's in buff upto a max of max_bits */
382 static int count_written_bits(uint8_t *buff, int size, int max_bits)
383 {
384         int k, written_bits = 0;
385 
386         for (k = 0; k < size; k++) {
387                 written_bits += hweight8(~buff[k]);
388                 if (written_bits > max_bits)
389                         break;
390         }
391 
392         return written_bits;
393 }
394 
395 static void dma_complete(void *param)
396 {
397         struct fsmc_nand_data *host = param;
398 
399         complete(&host->dma_access_complete);
400 }
401 
402 static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
403                 enum dma_data_direction direction)
404 {
405         struct dma_chan *chan;
406         struct dma_device *dma_dev;
407         struct dma_async_tx_descriptor *tx;
408         dma_addr_t dma_dst, dma_src, dma_addr;
409         dma_cookie_t cookie;
410         unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
411         int ret;
412         unsigned long time_left;
413 
414         if (direction == DMA_TO_DEVICE)
415                 chan = host->write_dma_chan;
416         else if (direction == DMA_FROM_DEVICE)
417                 chan = host->read_dma_chan;
418         else
419                 return -EINVAL;
420 
421         dma_dev = chan->device;
422         dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
423 
424         if (direction == DMA_TO_DEVICE) {
425                 dma_src = dma_addr;
426                 dma_dst = host->data_pa;
427         } else {
428                 dma_src = host->data_pa;
429                 dma_dst = dma_addr;
430         }
431 
432         tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
433                         len, flags);
434         if (!tx) {
435                 dev_err(host->dev, "device_prep_dma_memcpy error\n");
436                 ret = -EIO;
437                 goto unmap_dma;
438         }
439 
440         tx->callback = dma_complete;
441         tx->callback_param = host;
442         cookie = tx->tx_submit(tx);
443 
444         ret = dma_submit_error(cookie);
445         if (ret) {
446                 dev_err(host->dev, "dma_submit_error %d\n", cookie);
447                 goto unmap_dma;
448         }
449 
450         dma_async_issue_pending(chan);
451 
452         time_left =
453         wait_for_completion_timeout(&host->dma_access_complete,
454                                 msecs_to_jiffies(3000));
455         if (time_left == 0) {
456                 dmaengine_terminate_all(chan);
457                 dev_err(host->dev, "wait_for_completion_timeout\n");
458                 ret = -ETIMEDOUT;
459                 goto unmap_dma;
460         }
461 
462         ret = 0;
463 
464 unmap_dma:
465         dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
466 
467         return ret;
468 }
469 
470 /*
471  * fsmc_write_buf - write buffer to chip
472  * @mtd:        MTD device structure
473  * @buf:        data buffer
474  * @len:        number of bytes to write
475  */
476 static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
477 {
478         int i;
479         struct nand_chip *chip = mtd_to_nand(mtd);
480 
481         if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
482                         IS_ALIGNED(len, sizeof(uint32_t))) {
483                 uint32_t *p = (uint32_t *)buf;
484                 len = len >> 2;
485                 for (i = 0; i < len; i++)
486                         writel_relaxed(p[i], chip->IO_ADDR_W);
487         } else {
488                 for (i = 0; i < len; i++)
489                         writeb_relaxed(buf[i], chip->IO_ADDR_W);
490         }
491 }
492 
493 /*
494  * fsmc_read_buf - read chip data into buffer
495  * @mtd:        MTD device structure
496  * @buf:        buffer to store date
497  * @len:        number of bytes to read
498  */
499 static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
500 {
501         int i;
502         struct nand_chip *chip = mtd_to_nand(mtd);
503 
504         if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
505                         IS_ALIGNED(len, sizeof(uint32_t))) {
506                 uint32_t *p = (uint32_t *)buf;
507                 len = len >> 2;
508                 for (i = 0; i < len; i++)
509                         p[i] = readl_relaxed(chip->IO_ADDR_R);
510         } else {
511                 for (i = 0; i < len; i++)
512                         buf[i] = readb_relaxed(chip->IO_ADDR_R);
513         }
514 }
515 
516 /*
517  * fsmc_read_buf_dma - read chip data into buffer
518  * @mtd:        MTD device structure
519  * @buf:        buffer to store date
520  * @len:        number of bytes to read
521  */
522 static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
523 {
524         struct fsmc_nand_data *host  = mtd_to_fsmc(mtd);
525 
526         dma_xfer(host, buf, len, DMA_FROM_DEVICE);
527 }
528 
529 /*
530  * fsmc_write_buf_dma - write buffer to chip
531  * @mtd:        MTD device structure
532  * @buf:        data buffer
533  * @len:        number of bytes to write
534  */
535 static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
536                 int len)
537 {
538         struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
539 
540         dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
541 }
542 
543 /*
544  * fsmc_read_page_hwecc
545  * @mtd:        mtd info structure
546  * @chip:       nand chip info structure
547  * @buf:        buffer to store read data
548  * @oob_required:       caller expects OOB data read to chip->oob_poi
549  * @page:       page number to read
550  *
551  * This routine is needed for fsmc version 8 as reading from NAND chip has to be
552  * performed in a strict sequence as follows:
553  * data(512 byte) -> ecc(13 byte)
554  * After this read, fsmc hardware generates and reports error data bits(up to a
555  * max of 8 bits)
556  */
557 static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
558                                  uint8_t *buf, int oob_required, int page)
559 {
560         int i, j, s, stat, eccsize = chip->ecc.size;
561         int eccbytes = chip->ecc.bytes;
562         int eccsteps = chip->ecc.steps;
563         uint8_t *p = buf;
564         uint8_t *ecc_calc = chip->buffers->ecccalc;
565         uint8_t *ecc_code = chip->buffers->ecccode;
566         int off, len, group = 0;
567         /*
568          * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
569          * end up reading 14 bytes (7 words) from oob. The local array is
570          * to maintain word alignment
571          */
572         uint16_t ecc_oob[7];
573         uint8_t *oob = (uint8_t *)&ecc_oob[0];
574         unsigned int max_bitflips = 0;
575 
576         for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
577                 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
578                 chip->ecc.hwctl(mtd, NAND_ECC_READ);
579                 chip->read_buf(mtd, p, eccsize);
580 
581                 for (j = 0; j < eccbytes;) {
582                         struct mtd_oob_region oobregion;
583                         int ret;
584 
585                         ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
586                         if (ret)
587                                 return ret;
588 
589                         off = oobregion.offset;
590                         len = oobregion.length;
591 
592                         /*
593                          * length is intentionally kept a higher multiple of 2
594                          * to read at least 13 bytes even in case of 16 bit NAND
595                          * devices
596                          */
597                         if (chip->options & NAND_BUSWIDTH_16)
598                                 len = roundup(len, 2);
599 
600                         chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
601                         chip->read_buf(mtd, oob + j, len);
602                         j += len;
603                 }
604 
605                 memcpy(&ecc_code[i], oob, chip->ecc.bytes);
606                 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
607 
608                 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
609                 if (stat < 0) {
610                         mtd->ecc_stats.failed++;
611                 } else {
612                         mtd->ecc_stats.corrected += stat;
613                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
614                 }
615         }
616 
617         return max_bitflips;
618 }
619 
620 /*
621  * fsmc_bch8_correct_data
622  * @mtd:        mtd info structure
623  * @dat:        buffer of read data
624  * @read_ecc:   ecc read from device spare area
625  * @calc_ecc:   ecc calculated from read data
626  *
627  * calc_ecc is a 104 bit information containing maximum of 8 error
628  * offset informations of 13 bits each in 512 bytes of read data.
629  */
630 static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
631                              uint8_t *read_ecc, uint8_t *calc_ecc)
632 {
633         struct nand_chip *chip = mtd_to_nand(mtd);
634         struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
635         void __iomem *regs = host->regs_va;
636         unsigned int bank = host->bank;
637         uint32_t err_idx[8];
638         uint32_t num_err, i;
639         uint32_t ecc1, ecc2, ecc3, ecc4;
640 
641         num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
642 
643         /* no bit flipping */
644         if (likely(num_err == 0))
645                 return 0;
646 
647         /* too many errors */
648         if (unlikely(num_err > 8)) {
649                 /*
650                  * This is a temporary erase check. A newly erased page read
651                  * would result in an ecc error because the oob data is also
652                  * erased to FF and the calculated ecc for an FF data is not
653                  * FF..FF.
654                  * This is a workaround to skip performing correction in case
655                  * data is FF..FF
656                  *
657                  * Logic:
658                  * For every page, each bit written as 0 is counted until these
659                  * number of bits are greater than 8 (the maximum correction
660                  * capability of FSMC for each 512 + 13 bytes)
661                  */
662 
663                 int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
664                 int bits_data = count_written_bits(dat, chip->ecc.size, 8);
665 
666                 if ((bits_ecc + bits_data) <= 8) {
667                         if (bits_data)
668                                 memset(dat, 0xff, chip->ecc.size);
669                         return bits_data;
670                 }
671 
672                 return -EBADMSG;
673         }
674 
675         /*
676          * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
677          * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
678          *
679          * calc_ecc is a 104 bit information containing maximum of 8 error
680          * offset informations of 13 bits each. calc_ecc is copied into a
681          * uint64_t array and error offset indexes are populated in err_idx
682          * array
683          */
684         ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
685         ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
686         ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
687         ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
688 
689         err_idx[0] = (ecc1 >> 0) & 0x1FFF;
690         err_idx[1] = (ecc1 >> 13) & 0x1FFF;
691         err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
692         err_idx[3] = (ecc2 >> 7) & 0x1FFF;
693         err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
694         err_idx[5] = (ecc3 >> 1) & 0x1FFF;
695         err_idx[6] = (ecc3 >> 14) & 0x1FFF;
696         err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
697 
698         i = 0;
699         while (num_err--) {
700                 change_bit(0, (unsigned long *)&err_idx[i]);
701                 change_bit(1, (unsigned long *)&err_idx[i]);
702 
703                 if (err_idx[i] < chip->ecc.size * 8) {
704                         change_bit(err_idx[i], (unsigned long *)dat);
705                         i++;
706                 }
707         }
708         return i;
709 }
710 
711 static bool filter(struct dma_chan *chan, void *slave)
712 {
713         chan->private = slave;
714         return true;
715 }
716 
717 #ifdef CONFIG_OF
718 static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
719                                      struct device_node *np)
720 {
721         struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
722         u32 val;
723         int ret;
724 
725         /* Set default NAND width to 8 bits */
726         pdata->width = 8;
727         if (!of_property_read_u32(np, "bank-width", &val)) {
728                 if (val == 2) {
729                         pdata->width = 16;
730                 } else if (val != 1) {
731                         dev_err(&pdev->dev, "invalid bank-width %u\n", val);
732                         return -EINVAL;
733                 }
734         }
735         if (of_get_property(np, "nand-skip-bbtscan", NULL))
736                 pdata->options = NAND_SKIP_BBTSCAN;
737 
738         pdata->nand_timings = devm_kzalloc(&pdev->dev,
739                                 sizeof(*pdata->nand_timings), GFP_KERNEL);
740         if (!pdata->nand_timings)
741                 return -ENOMEM;
742         ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
743                                                 sizeof(*pdata->nand_timings));
744         if (ret) {
745                 dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
746                 pdata->nand_timings = NULL;
747         }
748 
749         /* Set default NAND bank to 0 */
750         pdata->bank = 0;
751         if (!of_property_read_u32(np, "bank", &val)) {
752                 if (val > 3) {
753                         dev_err(&pdev->dev, "invalid bank %u\n", val);
754                         return -EINVAL;
755                 }
756                 pdata->bank = val;
757         }
758         return 0;
759 }
760 #else
761 static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
762                                      struct device_node *np)
763 {
764         return -ENOSYS;
765 }
766 #endif
767 
768 /*
769  * fsmc_nand_probe - Probe function
770  * @pdev:       platform device structure
771  */
772 static int __init fsmc_nand_probe(struct platform_device *pdev)
773 {
774         struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
775         struct device_node __maybe_unused *np = pdev->dev.of_node;
776         struct fsmc_nand_data *host;
777         struct mtd_info *mtd;
778         struct nand_chip *nand;
779         struct resource *res;
780         dma_cap_mask_t mask;
781         int ret = 0;
782         u32 pid;
783         int i;
784 
785         if (np) {
786                 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
787                 pdev->dev.platform_data = pdata;
788                 ret = fsmc_nand_probe_config_dt(pdev, np);
789                 if (ret) {
790                         dev_err(&pdev->dev, "no platform data\n");
791                         return -ENODEV;
792                 }
793         }
794 
795         if (!pdata) {
796                 dev_err(&pdev->dev, "platform data is NULL\n");
797                 return -EINVAL;
798         }
799 
800         /* Allocate memory for the device structure (and zero it) */
801         host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
802         if (!host)
803                 return -ENOMEM;
804 
805         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
806         host->data_va = devm_ioremap_resource(&pdev->dev, res);
807         if (IS_ERR(host->data_va))
808                 return PTR_ERR(host->data_va);
809 
810         host->data_pa = (dma_addr_t)res->start;
811 
812         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
813         host->addr_va = devm_ioremap_resource(&pdev->dev, res);
814         if (IS_ERR(host->addr_va))
815                 return PTR_ERR(host->addr_va);
816 
817         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
818         host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
819         if (IS_ERR(host->cmd_va))
820                 return PTR_ERR(host->cmd_va);
821 
822         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
823         host->regs_va = devm_ioremap_resource(&pdev->dev, res);
824         if (IS_ERR(host->regs_va))
825                 return PTR_ERR(host->regs_va);
826 
827         host->clk = clk_get(&pdev->dev, NULL);
828         if (IS_ERR(host->clk)) {
829                 dev_err(&pdev->dev, "failed to fetch block clock\n");
830                 return PTR_ERR(host->clk);
831         }
832 
833         ret = clk_prepare_enable(host->clk);
834         if (ret)
835                 goto err_clk_prepare_enable;
836 
837         /*
838          * This device ID is actually a common AMBA ID as used on the
839          * AMBA PrimeCell bus. However it is not a PrimeCell.
840          */
841         for (pid = 0, i = 0; i < 4; i++)
842                 pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
843         host->pid = pid;
844         dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
845                  "revision %02x, config %02x\n",
846                  AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
847                  AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
848 
849         host->bank = pdata->bank;
850         host->select_chip = pdata->select_bank;
851         host->partitions = pdata->partitions;
852         host->nr_partitions = pdata->nr_partitions;
853         host->dev = &pdev->dev;
854         host->dev_timings = pdata->nand_timings;
855         host->mode = pdata->mode;
856 
857         if (host->mode == USE_DMA_ACCESS)
858                 init_completion(&host->dma_access_complete);
859 
860         /* Link all private pointers */
861         mtd = nand_to_mtd(&host->nand);
862         nand = &host->nand;
863         nand_set_controller_data(nand, host);
864         nand_set_flash_node(nand, np);
865 
866         mtd->dev.parent = &pdev->dev;
867         nand->IO_ADDR_R = host->data_va;
868         nand->IO_ADDR_W = host->data_va;
869         nand->cmd_ctrl = fsmc_cmd_ctrl;
870         nand->chip_delay = 30;
871 
872         /*
873          * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
874          * can overwrite this value if the DT provides a different value.
875          */
876         nand->ecc.mode = NAND_ECC_HW;
877         nand->ecc.hwctl = fsmc_enable_hwecc;
878         nand->ecc.size = 512;
879         nand->options = pdata->options;
880         nand->select_chip = fsmc_select_chip;
881         nand->badblockbits = 7;
882         nand_set_flash_node(nand, np);
883 
884         if (pdata->width == FSMC_NAND_BW16)
885                 nand->options |= NAND_BUSWIDTH_16;
886 
887         switch (host->mode) {
888         case USE_DMA_ACCESS:
889                 dma_cap_zero(mask);
890                 dma_cap_set(DMA_MEMCPY, mask);
891                 host->read_dma_chan = dma_request_channel(mask, filter,
892                                 pdata->read_dma_priv);
893                 if (!host->read_dma_chan) {
894                         dev_err(&pdev->dev, "Unable to get read dma channel\n");
895                         goto err_req_read_chnl;
896                 }
897                 host->write_dma_chan = dma_request_channel(mask, filter,
898                                 pdata->write_dma_priv);
899                 if (!host->write_dma_chan) {
900                         dev_err(&pdev->dev, "Unable to get write dma channel\n");
901                         goto err_req_write_chnl;
902                 }
903                 nand->read_buf = fsmc_read_buf_dma;
904                 nand->write_buf = fsmc_write_buf_dma;
905                 break;
906 
907         default:
908         case USE_WORD_ACCESS:
909                 nand->read_buf = fsmc_read_buf;
910                 nand->write_buf = fsmc_write_buf;
911                 break;
912         }
913 
914         fsmc_nand_setup(host->regs_va, host->bank,
915                         nand->options & NAND_BUSWIDTH_16,
916                         host->dev_timings);
917 
918         if (AMBA_REV_BITS(host->pid) >= 8) {
919                 nand->ecc.read_page = fsmc_read_page_hwecc;
920                 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
921                 nand->ecc.correct = fsmc_bch8_correct_data;
922                 nand->ecc.bytes = 13;
923                 nand->ecc.strength = 8;
924         }
925 
926         /*
927          * Scan to find existence of the device
928          */
929         if (nand_scan_ident(mtd, 1, NULL)) {
930                 ret = -ENXIO;
931                 dev_err(&pdev->dev, "No NAND Device found!\n");
932                 goto err_scan_ident;
933         }
934 
935         if (AMBA_REV_BITS(host->pid) >= 8) {
936                 switch (mtd->oobsize) {
937                 case 16:
938                 case 64:
939                 case 128:
940                 case 224:
941                 case 256:
942                         break;
943                 default:
944                         dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
945                                  mtd->oobsize);
946                         ret = -EINVAL;
947                         goto err_probe;
948                 }
949 
950                 mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
951         } else {
952                 switch (nand->ecc.mode) {
953                 case NAND_ECC_HW:
954                         dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
955                         nand->ecc.calculate = fsmc_read_hwecc_ecc1;
956                         nand->ecc.correct = nand_correct_data;
957                         nand->ecc.bytes = 3;
958                         nand->ecc.strength = 1;
959                         break;
960 
961                 case NAND_ECC_SOFT:
962                         if (nand->ecc.algo == NAND_ECC_BCH) {
963                                 dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
964                                 break;
965                         }
966 
967                 default:
968                         dev_err(&pdev->dev, "Unsupported ECC mode!\n");
969                         goto err_probe;
970                 }
971 
972                 /*
973                  * Don't set layout for BCH4 SW ECC. This will be
974                  * generated later in nand_bch_init() later.
975                  */
976                 if (nand->ecc.mode == NAND_ECC_HW) {
977                         switch (mtd->oobsize) {
978                         case 16:
979                         case 64:
980                         case 128:
981                                 mtd_set_ooblayout(mtd,
982                                                   &fsmc_ecc1_ooblayout_ops);
983                                 break;
984                         default:
985                                 dev_warn(&pdev->dev,
986                                          "No oob scheme defined for oobsize %d\n",
987                                          mtd->oobsize);
988                                 ret = -EINVAL;
989                                 goto err_probe;
990                         }
991                 }
992         }
993 
994         /* Second stage of scan to fill MTD data-structures */
995         if (nand_scan_tail(mtd)) {
996                 ret = -ENXIO;
997                 goto err_probe;
998         }
999 
1000         /*
1001          * The partition information can is accessed by (in the same precedence)
1002          *
1003          * command line through Bootloader,
1004          * platform data,
1005          * default partition information present in driver.
1006          */
1007         /*
1008          * Check for partition info passed
1009          */
1010         mtd->name = "nand";
1011         ret = mtd_device_register(mtd, host->partitions, host->nr_partitions);
1012         if (ret)
1013                 goto err_probe;
1014 
1015         platform_set_drvdata(pdev, host);
1016         dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
1017         return 0;
1018 
1019 err_probe:
1020 err_scan_ident:
1021         if (host->mode == USE_DMA_ACCESS)
1022                 dma_release_channel(host->write_dma_chan);
1023 err_req_write_chnl:
1024         if (host->mode == USE_DMA_ACCESS)
1025                 dma_release_channel(host->read_dma_chan);
1026 err_req_read_chnl:
1027         clk_disable_unprepare(host->clk);
1028 err_clk_prepare_enable:
1029         clk_put(host->clk);
1030         return ret;
1031 }
1032 
1033 /*
1034  * Clean up routine
1035  */
1036 static int fsmc_nand_remove(struct platform_device *pdev)
1037 {
1038         struct fsmc_nand_data *host = platform_get_drvdata(pdev);
1039 
1040         if (host) {
1041                 nand_release(nand_to_mtd(&host->nand));
1042 
1043                 if (host->mode == USE_DMA_ACCESS) {
1044                         dma_release_channel(host->write_dma_chan);
1045                         dma_release_channel(host->read_dma_chan);
1046                 }
1047                 clk_disable_unprepare(host->clk);
1048                 clk_put(host->clk);
1049         }
1050 
1051         return 0;
1052 }
1053 
1054 #ifdef CONFIG_PM_SLEEP
1055 static int fsmc_nand_suspend(struct device *dev)
1056 {
1057         struct fsmc_nand_data *host = dev_get_drvdata(dev);
1058         if (host)
1059                 clk_disable_unprepare(host->clk);
1060         return 0;
1061 }
1062 
1063 static int fsmc_nand_resume(struct device *dev)
1064 {
1065         struct fsmc_nand_data *host = dev_get_drvdata(dev);
1066         if (host) {
1067                 clk_prepare_enable(host->clk);
1068                 fsmc_nand_setup(host->regs_va, host->bank,
1069                                 host->nand.options & NAND_BUSWIDTH_16,
1070                                 host->dev_timings);
1071         }
1072         return 0;
1073 }
1074 #endif
1075 
1076 static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
1077 
1078 #ifdef CONFIG_OF
1079 static const struct of_device_id fsmc_nand_id_table[] = {
1080         { .compatible = "st,spear600-fsmc-nand" },
1081         { .compatible = "stericsson,fsmc-nand" },
1082         {}
1083 };
1084 MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
1085 #endif
1086 
1087 static struct platform_driver fsmc_nand_driver = {
1088         .remove = fsmc_nand_remove,
1089         .driver = {
1090                 .name = "fsmc-nand",
1091                 .of_match_table = of_match_ptr(fsmc_nand_id_table),
1092                 .pm = &fsmc_nand_pm_ops,
1093         },
1094 };
1095 
1096 module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
1097 
1098 MODULE_LICENSE("GPL");
1099 MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
1100 MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
1101 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us