Version:  2.0.40 2.2.26 2.4.37 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17

Linux/drivers/mmc/host/dw_mmc.c

  1 /*
  2  * Synopsys DesignWare Multimedia Card Interface driver
  3  *  (Based on NXP driver for lpc 31xx)
  4  *
  5  * Copyright (C) 2009 NXP Semiconductors
  6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License as published by
 10  * the Free Software Foundation; either version 2 of the License, or
 11  * (at your option) any later version.
 12  */
 13 
 14 #include <linux/blkdev.h>
 15 #include <linux/clk.h>
 16 #include <linux/debugfs.h>
 17 #include <linux/device.h>
 18 #include <linux/dma-mapping.h>
 19 #include <linux/err.h>
 20 #include <linux/init.h>
 21 #include <linux/interrupt.h>
 22 #include <linux/ioport.h>
 23 #include <linux/module.h>
 24 #include <linux/platform_device.h>
 25 #include <linux/seq_file.h>
 26 #include <linux/slab.h>
 27 #include <linux/stat.h>
 28 #include <linux/delay.h>
 29 #include <linux/irq.h>
 30 #include <linux/mmc/host.h>
 31 #include <linux/mmc/mmc.h>
 32 #include <linux/mmc/sdio.h>
 33 #include <linux/mmc/dw_mmc.h>
 34 #include <linux/bitops.h>
 35 #include <linux/regulator/consumer.h>
 36 #include <linux/workqueue.h>
 37 #include <linux/of.h>
 38 #include <linux/of_gpio.h>
 39 #include <linux/mmc/slot-gpio.h>
 40 
 41 #include "dw_mmc.h"
 42 
 43 /* Common flag combinations */
 44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
 45                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
 46                                  SDMMC_INT_EBE)
 47 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
 48                                  SDMMC_INT_RESP_ERR)
 49 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
 50                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
 51 #define DW_MCI_SEND_STATUS      1
 52 #define DW_MCI_RECV_STATUS      2
 53 #define DW_MCI_DMA_THRESHOLD    16
 54 
 55 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
 56 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
 57 
 58 #ifdef CONFIG_MMC_DW_IDMAC
 59 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
 60                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
 61                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
 62                                  SDMMC_IDMAC_INT_TI)
 63 
 64 struct idmac_desc {
 65         u32             des0;   /* Control Descriptor */
 66 #define IDMAC_DES0_DIC  BIT(1)
 67 #define IDMAC_DES0_LD   BIT(2)
 68 #define IDMAC_DES0_FD   BIT(3)
 69 #define IDMAC_DES0_CH   BIT(4)
 70 #define IDMAC_DES0_ER   BIT(5)
 71 #define IDMAC_DES0_CES  BIT(30)
 72 #define IDMAC_DES0_OWN  BIT(31)
 73 
 74         u32             des1;   /* Buffer sizes */
 75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
 76         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
 77 
 78         u32             des2;   /* buffer 1 physical address */
 79 
 80         u32             des3;   /* buffer 2 physical address */
 81 };
 82 #endif /* CONFIG_MMC_DW_IDMAC */
 83 
 84 static const u8 tuning_blk_pattern_4bit[] = {
 85         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
 86         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
 87         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
 88         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
 89         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
 90         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
 91         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
 92         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
 93 };
 94 
 95 static const u8 tuning_blk_pattern_8bit[] = {
 96         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
 97         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
 98         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
 99         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
112 };
113 
114 static bool dw_mci_reset(struct dw_mci *host);
115 
116 #if defined(CONFIG_DEBUG_FS)
117 static int dw_mci_req_show(struct seq_file *s, void *v)
118 {
119         struct dw_mci_slot *slot = s->private;
120         struct mmc_request *mrq;
121         struct mmc_command *cmd;
122         struct mmc_command *stop;
123         struct mmc_data *data;
124 
125         /* Make sure we get a consistent snapshot */
126         spin_lock_bh(&slot->host->lock);
127         mrq = slot->mrq;
128 
129         if (mrq) {
130                 cmd = mrq->cmd;
131                 data = mrq->data;
132                 stop = mrq->stop;
133 
134                 if (cmd)
135                         seq_printf(s,
136                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137                                    cmd->opcode, cmd->arg, cmd->flags,
138                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
139                                    cmd->resp[2], cmd->error);
140                 if (data)
141                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142                                    data->bytes_xfered, data->blocks,
143                                    data->blksz, data->flags, data->error);
144                 if (stop)
145                         seq_printf(s,
146                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147                                    stop->opcode, stop->arg, stop->flags,
148                                    stop->resp[0], stop->resp[1], stop->resp[2],
149                                    stop->resp[2], stop->error);
150         }
151 
152         spin_unlock_bh(&slot->host->lock);
153 
154         return 0;
155 }
156 
157 static int dw_mci_req_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, dw_mci_req_show, inode->i_private);
160 }
161 
162 static const struct file_operations dw_mci_req_fops = {
163         .owner          = THIS_MODULE,
164         .open           = dw_mci_req_open,
165         .read           = seq_read,
166         .llseek         = seq_lseek,
167         .release        = single_release,
168 };
169 
170 static int dw_mci_regs_show(struct seq_file *s, void *v)
171 {
172         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178 
179         return 0;
180 }
181 
182 static int dw_mci_regs_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, dw_mci_regs_show, inode->i_private);
185 }
186 
187 static const struct file_operations dw_mci_regs_fops = {
188         .owner          = THIS_MODULE,
189         .open           = dw_mci_regs_open,
190         .read           = seq_read,
191         .llseek         = seq_lseek,
192         .release        = single_release,
193 };
194 
195 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196 {
197         struct mmc_host *mmc = slot->mmc;
198         struct dw_mci *host = slot->host;
199         struct dentry *root;
200         struct dentry *node;
201 
202         root = mmc->debugfs_root;
203         if (!root)
204                 return;
205 
206         node = debugfs_create_file("regs", S_IRUSR, root, host,
207                                    &dw_mci_regs_fops);
208         if (!node)
209                 goto err;
210 
211         node = debugfs_create_file("req", S_IRUSR, root, slot,
212                                    &dw_mci_req_fops);
213         if (!node)
214                 goto err;
215 
216         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217         if (!node)
218                 goto err;
219 
220         node = debugfs_create_x32("pending_events", S_IRUSR, root,
221                                   (u32 *)&host->pending_events);
222         if (!node)
223                 goto err;
224 
225         node = debugfs_create_x32("completed_events", S_IRUSR, root,
226                                   (u32 *)&host->completed_events);
227         if (!node)
228                 goto err;
229 
230         return;
231 
232 err:
233         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234 }
235 #endif /* defined(CONFIG_DEBUG_FS) */
236 
237 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238 {
239         struct mmc_data *data;
240         struct dw_mci_slot *slot = mmc_priv(mmc);
241         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
242         u32 cmdr;
243         cmd->error = -EINPROGRESS;
244 
245         cmdr = cmd->opcode;
246 
247         if (cmd->opcode == MMC_STOP_TRANSMISSION ||
248             cmd->opcode == MMC_GO_IDLE_STATE ||
249             cmd->opcode == MMC_GO_INACTIVE_STATE ||
250             (cmd->opcode == SD_IO_RW_DIRECT &&
251              ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
252                 cmdr |= SDMMC_CMD_STOP;
253         else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
254                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
255 
256         if (cmd->flags & MMC_RSP_PRESENT) {
257                 /* We expect a response, so set this bit */
258                 cmdr |= SDMMC_CMD_RESP_EXP;
259                 if (cmd->flags & MMC_RSP_136)
260                         cmdr |= SDMMC_CMD_RESP_LONG;
261         }
262 
263         if (cmd->flags & MMC_RSP_CRC)
264                 cmdr |= SDMMC_CMD_RESP_CRC;
265 
266         data = cmd->data;
267         if (data) {
268                 cmdr |= SDMMC_CMD_DAT_EXP;
269                 if (data->flags & MMC_DATA_STREAM)
270                         cmdr |= SDMMC_CMD_STRM_MODE;
271                 if (data->flags & MMC_DATA_WRITE)
272                         cmdr |= SDMMC_CMD_DAT_WR;
273         }
274 
275         if (drv_data && drv_data->prepare_command)
276                 drv_data->prepare_command(slot->host, &cmdr);
277 
278         return cmdr;
279 }
280 
281 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
282 {
283         struct mmc_command *stop;
284         u32 cmdr;
285 
286         if (!cmd->data)
287                 return 0;
288 
289         stop = &host->stop_abort;
290         cmdr = cmd->opcode;
291         memset(stop, 0, sizeof(struct mmc_command));
292 
293         if (cmdr == MMC_READ_SINGLE_BLOCK ||
294             cmdr == MMC_READ_MULTIPLE_BLOCK ||
295             cmdr == MMC_WRITE_BLOCK ||
296             cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
297                 stop->opcode = MMC_STOP_TRANSMISSION;
298                 stop->arg = 0;
299                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
300         } else if (cmdr == SD_IO_RW_EXTENDED) {
301                 stop->opcode = SD_IO_RW_DIRECT;
302                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
303                              ((cmd->arg >> 28) & 0x7);
304                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
305         } else {
306                 return 0;
307         }
308 
309         cmdr = stop->opcode | SDMMC_CMD_STOP |
310                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
311 
312         return cmdr;
313 }
314 
315 static void dw_mci_start_command(struct dw_mci *host,
316                                  struct mmc_command *cmd, u32 cmd_flags)
317 {
318         host->cmd = cmd;
319         dev_vdbg(host->dev,
320                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
321                  cmd->arg, cmd_flags);
322 
323         mci_writel(host, CMDARG, cmd->arg);
324         wmb();
325 
326         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
327 }
328 
329 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
330 {
331         struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
332         dw_mci_start_command(host, stop, host->stop_cmdr);
333 }
334 
335 /* DMA interface functions */
336 static void dw_mci_stop_dma(struct dw_mci *host)
337 {
338         if (host->using_dma) {
339                 host->dma_ops->stop(host);
340                 host->dma_ops->cleanup(host);
341         }
342 
343         /* Data transfer was stopped by the interrupt handler */
344         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
345 }
346 
347 static int dw_mci_get_dma_dir(struct mmc_data *data)
348 {
349         if (data->flags & MMC_DATA_WRITE)
350                 return DMA_TO_DEVICE;
351         else
352                 return DMA_FROM_DEVICE;
353 }
354 
355 #ifdef CONFIG_MMC_DW_IDMAC
356 static void dw_mci_dma_cleanup(struct dw_mci *host)
357 {
358         struct mmc_data *data = host->data;
359 
360         if (data)
361                 if (!data->host_cookie)
362                         dma_unmap_sg(host->dev,
363                                      data->sg,
364                                      data->sg_len,
365                                      dw_mci_get_dma_dir(data));
366 }
367 
368 static void dw_mci_idmac_reset(struct dw_mci *host)
369 {
370         u32 bmod = mci_readl(host, BMOD);
371         /* Software reset of DMA */
372         bmod |= SDMMC_IDMAC_SWRESET;
373         mci_writel(host, BMOD, bmod);
374 }
375 
376 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
377 {
378         u32 temp;
379 
380         /* Disable and reset the IDMAC interface */
381         temp = mci_readl(host, CTRL);
382         temp &= ~SDMMC_CTRL_USE_IDMAC;
383         temp |= SDMMC_CTRL_DMA_RESET;
384         mci_writel(host, CTRL, temp);
385 
386         /* Stop the IDMAC running */
387         temp = mci_readl(host, BMOD);
388         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
389         temp |= SDMMC_IDMAC_SWRESET;
390         mci_writel(host, BMOD, temp);
391 }
392 
393 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
394 {
395         struct mmc_data *data = host->data;
396 
397         dev_vdbg(host->dev, "DMA complete\n");
398 
399         host->dma_ops->cleanup(host);
400 
401         /*
402          * If the card was removed, data will be NULL. No point in trying to
403          * send the stop command or waiting for NBUSY in this case.
404          */
405         if (data) {
406                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407                 tasklet_schedule(&host->tasklet);
408         }
409 }
410 
411 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
412                                     unsigned int sg_len)
413 {
414         int i;
415         struct idmac_desc *desc = host->sg_cpu;
416 
417         for (i = 0; i < sg_len; i++, desc++) {
418                 unsigned int length = sg_dma_len(&data->sg[i]);
419                 u32 mem_addr = sg_dma_address(&data->sg[i]);
420 
421                 /* Set the OWN bit and disable interrupts for this descriptor */
422                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
423 
424                 /* Buffer length */
425                 IDMAC_SET_BUFFER1_SIZE(desc, length);
426 
427                 /* Physical address to DMA to/from */
428                 desc->des2 = mem_addr;
429         }
430 
431         /* Set first descriptor */
432         desc = host->sg_cpu;
433         desc->des0 |= IDMAC_DES0_FD;
434 
435         /* Set last descriptor */
436         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
437         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
438         desc->des0 |= IDMAC_DES0_LD;
439 
440         wmb();
441 }
442 
443 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
444 {
445         u32 temp;
446 
447         dw_mci_translate_sglist(host, host->data, sg_len);
448 
449         /* Select IDMAC interface */
450         temp = mci_readl(host, CTRL);
451         temp |= SDMMC_CTRL_USE_IDMAC;
452         mci_writel(host, CTRL, temp);
453 
454         wmb();
455 
456         /* Enable the IDMAC */
457         temp = mci_readl(host, BMOD);
458         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
459         mci_writel(host, BMOD, temp);
460 
461         /* Start it running */
462         mci_writel(host, PLDMND, 1);
463 }
464 
465 static int dw_mci_idmac_init(struct dw_mci *host)
466 {
467         struct idmac_desc *p;
468         int i;
469 
470         /* Number of descriptors in the ring buffer */
471         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
472 
473         /* Forward link the descriptor list */
474         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
475                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
476 
477         /* Set the last descriptor as the end-of-ring descriptor */
478         p->des3 = host->sg_dma;
479         p->des0 = IDMAC_DES0_ER;
480 
481         dw_mci_idmac_reset(host);
482 
483         /* Mask out interrupts - get Tx & Rx complete only */
484         mci_writel(host, IDSTS, IDMAC_INT_CLR);
485         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
486                    SDMMC_IDMAC_INT_TI);
487 
488         /* Set the descriptor base address */
489         mci_writel(host, DBADDR, host->sg_dma);
490         return 0;
491 }
492 
493 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
494         .init = dw_mci_idmac_init,
495         .start = dw_mci_idmac_start_dma,
496         .stop = dw_mci_idmac_stop_dma,
497         .complete = dw_mci_idmac_complete_dma,
498         .cleanup = dw_mci_dma_cleanup,
499 };
500 #endif /* CONFIG_MMC_DW_IDMAC */
501 
502 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
503                                    struct mmc_data *data,
504                                    bool next)
505 {
506         struct scatterlist *sg;
507         unsigned int i, sg_len;
508 
509         if (!next && data->host_cookie)
510                 return data->host_cookie;
511 
512         /*
513          * We don't do DMA on "complex" transfers, i.e. with
514          * non-word-aligned buffers or lengths. Also, we don't bother
515          * with all the DMA setup overhead for short transfers.
516          */
517         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
518                 return -EINVAL;
519 
520         if (data->blksz & 3)
521                 return -EINVAL;
522 
523         for_each_sg(data->sg, sg, data->sg_len, i) {
524                 if (sg->offset & 3 || sg->length & 3)
525                         return -EINVAL;
526         }
527 
528         sg_len = dma_map_sg(host->dev,
529                             data->sg,
530                             data->sg_len,
531                             dw_mci_get_dma_dir(data));
532         if (sg_len == 0)
533                 return -EINVAL;
534 
535         if (next)
536                 data->host_cookie = sg_len;
537 
538         return sg_len;
539 }
540 
541 static void dw_mci_pre_req(struct mmc_host *mmc,
542                            struct mmc_request *mrq,
543                            bool is_first_req)
544 {
545         struct dw_mci_slot *slot = mmc_priv(mmc);
546         struct mmc_data *data = mrq->data;
547 
548         if (!slot->host->use_dma || !data)
549                 return;
550 
551         if (data->host_cookie) {
552                 data->host_cookie = 0;
553                 return;
554         }
555 
556         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
557                 data->host_cookie = 0;
558 }
559 
560 static void dw_mci_post_req(struct mmc_host *mmc,
561                             struct mmc_request *mrq,
562                             int err)
563 {
564         struct dw_mci_slot *slot = mmc_priv(mmc);
565         struct mmc_data *data = mrq->data;
566 
567         if (!slot->host->use_dma || !data)
568                 return;
569 
570         if (data->host_cookie)
571                 dma_unmap_sg(slot->host->dev,
572                              data->sg,
573                              data->sg_len,
574                              dw_mci_get_dma_dir(data));
575         data->host_cookie = 0;
576 }
577 
578 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
579 {
580 #ifdef CONFIG_MMC_DW_IDMAC
581         unsigned int blksz = data->blksz;
582         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
583         u32 fifo_width = 1 << host->data_shift;
584         u32 blksz_depth = blksz / fifo_width, fifoth_val;
585         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
586         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
587 
588         tx_wmark = (host->fifo_depth) / 2;
589         tx_wmark_invers = host->fifo_depth - tx_wmark;
590 
591         /*
592          * MSIZE is '1',
593          * if blksz is not a multiple of the FIFO width
594          */
595         if (blksz % fifo_width) {
596                 msize = 0;
597                 rx_wmark = 1;
598                 goto done;
599         }
600 
601         do {
602                 if (!((blksz_depth % mszs[idx]) ||
603                      (tx_wmark_invers % mszs[idx]))) {
604                         msize = idx;
605                         rx_wmark = mszs[idx] - 1;
606                         break;
607                 }
608         } while (--idx > 0);
609         /*
610          * If idx is '', it won't be tried
611          * Thus, initial values are uesed
612          */
613 done:
614         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
615         mci_writel(host, FIFOTH, fifoth_val);
616 #endif
617 }
618 
619 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
620 {
621         unsigned int blksz = data->blksz;
622         u32 blksz_depth, fifo_depth;
623         u16 thld_size;
624 
625         WARN_ON(!(data->flags & MMC_DATA_READ));
626 
627         if (host->timing != MMC_TIMING_MMC_HS200 &&
628             host->timing != MMC_TIMING_UHS_SDR104)
629                 goto disable;
630 
631         blksz_depth = blksz / (1 << host->data_shift);
632         fifo_depth = host->fifo_depth;
633 
634         if (blksz_depth > fifo_depth)
635                 goto disable;
636 
637         /*
638          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
639          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
640          * Currently just choose blksz.
641          */
642         thld_size = blksz;
643         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
644         return;
645 
646 disable:
647         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
648 }
649 
650 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
651 {
652         int sg_len;
653         u32 temp;
654 
655         host->using_dma = 0;
656 
657         /* If we don't have a channel, we can't do DMA */
658         if (!host->use_dma)
659                 return -ENODEV;
660 
661         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
662         if (sg_len < 0) {
663                 host->dma_ops->stop(host);
664                 return sg_len;
665         }
666 
667         host->using_dma = 1;
668 
669         dev_vdbg(host->dev,
670                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
671                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
672                  sg_len);
673 
674         /*
675          * Decide the MSIZE and RX/TX Watermark.
676          * If current block size is same with previous size,
677          * no need to update fifoth.
678          */
679         if (host->prev_blksz != data->blksz)
680                 dw_mci_adjust_fifoth(host, data);
681 
682         /* Enable the DMA interface */
683         temp = mci_readl(host, CTRL);
684         temp |= SDMMC_CTRL_DMA_ENABLE;
685         mci_writel(host, CTRL, temp);
686 
687         /* Disable RX/TX IRQs, let DMA handle it */
688         temp = mci_readl(host, INTMASK);
689         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
690         mci_writel(host, INTMASK, temp);
691 
692         host->dma_ops->start(host, sg_len);
693 
694         return 0;
695 }
696 
697 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
698 {
699         u32 temp;
700 
701         data->error = -EINPROGRESS;
702 
703         WARN_ON(host->data);
704         host->sg = NULL;
705         host->data = data;
706 
707         if (data->flags & MMC_DATA_READ) {
708                 host->dir_status = DW_MCI_RECV_STATUS;
709                 dw_mci_ctrl_rd_thld(host, data);
710         } else {
711                 host->dir_status = DW_MCI_SEND_STATUS;
712         }
713 
714         if (dw_mci_submit_data_dma(host, data)) {
715                 int flags = SG_MITER_ATOMIC;
716                 if (host->data->flags & MMC_DATA_READ)
717                         flags |= SG_MITER_TO_SG;
718                 else
719                         flags |= SG_MITER_FROM_SG;
720 
721                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
722                 host->sg = data->sg;
723                 host->part_buf_start = 0;
724                 host->part_buf_count = 0;
725 
726                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
727                 temp = mci_readl(host, INTMASK);
728                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
729                 mci_writel(host, INTMASK, temp);
730 
731                 temp = mci_readl(host, CTRL);
732                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
733                 mci_writel(host, CTRL, temp);
734 
735                 /*
736                  * Use the initial fifoth_val for PIO mode.
737                  * If next issued data may be transfered by DMA mode,
738                  * prev_blksz should be invalidated.
739                  */
740                 mci_writel(host, FIFOTH, host->fifoth_val);
741                 host->prev_blksz = 0;
742         } else {
743                 /*
744                  * Keep the current block size.
745                  * It will be used to decide whether to update
746                  * fifoth register next time.
747                  */
748                 host->prev_blksz = data->blksz;
749         }
750 }
751 
752 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
753 {
754         struct dw_mci *host = slot->host;
755         unsigned long timeout = jiffies + msecs_to_jiffies(500);
756         unsigned int cmd_status = 0;
757 
758         mci_writel(host, CMDARG, arg);
759         wmb();
760         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
761 
762         while (time_before(jiffies, timeout)) {
763                 cmd_status = mci_readl(host, CMD);
764                 if (!(cmd_status & SDMMC_CMD_START))
765                         return;
766         }
767         dev_err(&slot->mmc->class_dev,
768                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
769                 cmd, arg, cmd_status);
770 }
771 
772 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
773 {
774         struct dw_mci *host = slot->host;
775         unsigned int clock = slot->clock;
776         u32 div;
777         u32 clk_en_a;
778 
779         if (!clock) {
780                 mci_writel(host, CLKENA, 0);
781                 mci_send_cmd(slot,
782                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
783         } else if (clock != host->current_speed || force_clkinit) {
784                 div = host->bus_hz / clock;
785                 if (host->bus_hz % clock && host->bus_hz > clock)
786                         /*
787                          * move the + 1 after the divide to prevent
788                          * over-clocking the card.
789                          */
790                         div += 1;
791 
792                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
793 
794                 if ((clock << div) != slot->__clk_old || force_clkinit)
795                         dev_info(&slot->mmc->class_dev,
796                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
797                                  slot->id, host->bus_hz, clock,
798                                  div ? ((host->bus_hz / div) >> 1) :
799                                  host->bus_hz, div);
800 
801                 /* disable clock */
802                 mci_writel(host, CLKENA, 0);
803                 mci_writel(host, CLKSRC, 0);
804 
805                 /* inform CIU */
806                 mci_send_cmd(slot,
807                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
808 
809                 /* set clock to desired speed */
810                 mci_writel(host, CLKDIV, div);
811 
812                 /* inform CIU */
813                 mci_send_cmd(slot,
814                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815 
816                 /* enable clock; only low power if no SDIO */
817                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
818                 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
819                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
820                 mci_writel(host, CLKENA, clk_en_a);
821 
822                 /* inform CIU */
823                 mci_send_cmd(slot,
824                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
825 
826                 /* keep the clock with reflecting clock dividor */
827                 slot->__clk_old = clock << div;
828         }
829 
830         host->current_speed = clock;
831 
832         /* Set the current slot bus width */
833         mci_writel(host, CTYPE, (slot->ctype << slot->id));
834 }
835 
836 static void __dw_mci_start_request(struct dw_mci *host,
837                                    struct dw_mci_slot *slot,
838                                    struct mmc_command *cmd)
839 {
840         struct mmc_request *mrq;
841         struct mmc_data *data;
842         u32 cmdflags;
843 
844         mrq = slot->mrq;
845 
846         host->cur_slot = slot;
847         host->mrq = mrq;
848 
849         host->pending_events = 0;
850         host->completed_events = 0;
851         host->cmd_status = 0;
852         host->data_status = 0;
853         host->dir_status = 0;
854 
855         data = cmd->data;
856         if (data) {
857                 mci_writel(host, TMOUT, 0xFFFFFFFF);
858                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
859                 mci_writel(host, BLKSIZ, data->blksz);
860         }
861 
862         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
863 
864         /* this is the first command, send the initialization clock */
865         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
866                 cmdflags |= SDMMC_CMD_INIT;
867 
868         if (data) {
869                 dw_mci_submit_data(host, data);
870                 wmb();
871         }
872 
873         dw_mci_start_command(host, cmd, cmdflags);
874 
875         if (mrq->stop)
876                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
877         else
878                 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
879 }
880 
881 static void dw_mci_start_request(struct dw_mci *host,
882                                  struct dw_mci_slot *slot)
883 {
884         struct mmc_request *mrq = slot->mrq;
885         struct mmc_command *cmd;
886 
887         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
888         __dw_mci_start_request(host, slot, cmd);
889 }
890 
891 /* must be called with host->lock held */
892 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
893                                  struct mmc_request *mrq)
894 {
895         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
896                  host->state);
897 
898         slot->mrq = mrq;
899 
900         if (host->state == STATE_IDLE) {
901                 host->state = STATE_SENDING_CMD;
902                 dw_mci_start_request(host, slot);
903         } else {
904                 list_add_tail(&slot->queue_node, &host->queue);
905         }
906 }
907 
908 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
909 {
910         struct dw_mci_slot *slot = mmc_priv(mmc);
911         struct dw_mci *host = slot->host;
912 
913         WARN_ON(slot->mrq);
914 
915         /*
916          * The check for card presence and queueing of the request must be
917          * atomic, otherwise the card could be removed in between and the
918          * request wouldn't fail until another card was inserted.
919          */
920         spin_lock_bh(&host->lock);
921 
922         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
923                 spin_unlock_bh(&host->lock);
924                 mrq->cmd->error = -ENOMEDIUM;
925                 mmc_request_done(mmc, mrq);
926                 return;
927         }
928 
929         dw_mci_queue_request(host, slot, mrq);
930 
931         spin_unlock_bh(&host->lock);
932 }
933 
934 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
935 {
936         struct dw_mci_slot *slot = mmc_priv(mmc);
937         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
938         u32 regs;
939 
940         switch (ios->bus_width) {
941         case MMC_BUS_WIDTH_4:
942                 slot->ctype = SDMMC_CTYPE_4BIT;
943                 break;
944         case MMC_BUS_WIDTH_8:
945                 slot->ctype = SDMMC_CTYPE_8BIT;
946                 break;
947         default:
948                 /* set default 1 bit mode */
949                 slot->ctype = SDMMC_CTYPE_1BIT;
950         }
951 
952         regs = mci_readl(slot->host, UHS_REG);
953 
954         /* DDR mode set */
955         if (ios->timing == MMC_TIMING_MMC_DDR52)
956                 regs |= ((0x1 << slot->id) << 16);
957         else
958                 regs &= ~((0x1 << slot->id) << 16);
959 
960         mci_writel(slot->host, UHS_REG, regs);
961         slot->host->timing = ios->timing;
962 
963         /*
964          * Use mirror of ios->clock to prevent race with mmc
965          * core ios update when finding the minimum.
966          */
967         slot->clock = ios->clock;
968 
969         if (drv_data && drv_data->set_ios)
970                 drv_data->set_ios(slot->host, ios);
971 
972         /* Slot specific timing and width adjustment */
973         dw_mci_setup_bus(slot, false);
974 
975         switch (ios->power_mode) {
976         case MMC_POWER_UP:
977                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
978                 regs = mci_readl(slot->host, PWREN);
979                 regs |= (1 << slot->id);
980                 mci_writel(slot->host, PWREN, regs);
981                 break;
982         case MMC_POWER_OFF:
983                 regs = mci_readl(slot->host, PWREN);
984                 regs &= ~(1 << slot->id);
985                 mci_writel(slot->host, PWREN, regs);
986                 break;
987         default:
988                 break;
989         }
990 }
991 
992 static int dw_mci_get_ro(struct mmc_host *mmc)
993 {
994         int read_only;
995         struct dw_mci_slot *slot = mmc_priv(mmc);
996         int gpio_ro = mmc_gpio_get_ro(mmc);
997 
998         /* Use platform get_ro function, else try on board write protect */
999         if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1000                         (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1001                 read_only = 0;
1002         else if (!IS_ERR_VALUE(gpio_ro))
1003                 read_only = gpio_ro;
1004         else
1005                 read_only =
1006                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1007 
1008         dev_dbg(&mmc->class_dev, "card is %s\n",
1009                 read_only ? "read-only" : "read-write");
1010 
1011         return read_only;
1012 }
1013 
1014 static int dw_mci_get_cd(struct mmc_host *mmc)
1015 {
1016         int present;
1017         struct dw_mci_slot *slot = mmc_priv(mmc);
1018         struct dw_mci_board *brd = slot->host->pdata;
1019         struct dw_mci *host = slot->host;
1020         int gpio_cd = mmc_gpio_get_cd(mmc);
1021 
1022         /* Use platform get_cd function, else try onboard card detect */
1023         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1024                 present = 1;
1025         else if (!IS_ERR_VALUE(gpio_cd))
1026                 present = gpio_cd;
1027         else
1028                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1029                         == 0 ? 1 : 0;
1030 
1031         spin_lock_bh(&host->lock);
1032         if (present) {
1033                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1034                 dev_dbg(&mmc->class_dev, "card is present\n");
1035         } else {
1036                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1037                 dev_dbg(&mmc->class_dev, "card is not present\n");
1038         }
1039         spin_unlock_bh(&host->lock);
1040 
1041         return present;
1042 }
1043 
1044 /*
1045  * Disable lower power mode.
1046  *
1047  * Low power mode will stop the card clock when idle.  According to the
1048  * description of the CLKENA register we should disable low power mode
1049  * for SDIO cards if we need SDIO interrupts to work.
1050  *
1051  * This function is fast if low power mode is already disabled.
1052  */
1053 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1054 {
1055         struct dw_mci *host = slot->host;
1056         u32 clk_en_a;
1057         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1058 
1059         clk_en_a = mci_readl(host, CLKENA);
1060 
1061         if (clk_en_a & clken_low_pwr) {
1062                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064                              SDMMC_CMD_PRV_DAT_WAIT, 0);
1065         }
1066 }
1067 
1068 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1069 {
1070         struct dw_mci_slot *slot = mmc_priv(mmc);
1071         struct dw_mci *host = slot->host;
1072         u32 int_mask;
1073 
1074         /* Enable/disable Slot Specific SDIO interrupt */
1075         int_mask = mci_readl(host, INTMASK);
1076         if (enb) {
1077                 /*
1078                  * Turn off low power mode if it was enabled.  This is a bit of
1079                  * a heavy operation and we disable / enable IRQs a lot, so
1080                  * we'll leave low power mode disabled and it will get
1081                  * re-enabled again in dw_mci_setup_bus().
1082                  */
1083                 dw_mci_disable_low_power(slot);
1084 
1085                 mci_writel(host, INTMASK,
1086                            (int_mask | SDMMC_INT_SDIO(slot->id)));
1087         } else {
1088                 mci_writel(host, INTMASK,
1089                            (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1090         }
1091 }
1092 
1093 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1094 {
1095         struct dw_mci_slot *slot = mmc_priv(mmc);
1096         struct dw_mci *host = slot->host;
1097         const struct dw_mci_drv_data *drv_data = host->drv_data;
1098         struct dw_mci_tuning_data tuning_data;
1099         int err = -ENOSYS;
1100 
1101         if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102                 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103                         tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104                         tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105                 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106                         tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107                         tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1108                 } else {
1109                         return -EINVAL;
1110                 }
1111         } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112                 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113                 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1114         } else {
1115                 dev_err(host->dev,
1116                         "Undefined command(%d) for tuning\n", opcode);
1117                 return -EINVAL;
1118         }
1119 
1120         if (drv_data && drv_data->execute_tuning)
1121                 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1122         return err;
1123 }
1124 
1125 static const struct mmc_host_ops dw_mci_ops = {
1126         .request                = dw_mci_request,
1127         .pre_req                = dw_mci_pre_req,
1128         .post_req               = dw_mci_post_req,
1129         .set_ios                = dw_mci_set_ios,
1130         .get_ro                 = dw_mci_get_ro,
1131         .get_cd                 = dw_mci_get_cd,
1132         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1133         .execute_tuning         = dw_mci_execute_tuning,
1134 };
1135 
1136 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137         __releases(&host->lock)
1138         __acquires(&host->lock)
1139 {
1140         struct dw_mci_slot *slot;
1141         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1142 
1143         WARN_ON(host->cmd || host->data);
1144 
1145         host->cur_slot->mrq = NULL;
1146         host->mrq = NULL;
1147         if (!list_empty(&host->queue)) {
1148                 slot = list_entry(host->queue.next,
1149                                   struct dw_mci_slot, queue_node);
1150                 list_del(&slot->queue_node);
1151                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1152                          mmc_hostname(slot->mmc));
1153                 host->state = STATE_SENDING_CMD;
1154                 dw_mci_start_request(host, slot);
1155         } else {
1156                 dev_vdbg(host->dev, "list empty\n");
1157                 host->state = STATE_IDLE;
1158         }
1159 
1160         spin_unlock(&host->lock);
1161         mmc_request_done(prev_mmc, mrq);
1162         spin_lock(&host->lock);
1163 }
1164 
1165 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1166 {
1167         u32 status = host->cmd_status;
1168 
1169         host->cmd_status = 0;
1170 
1171         /* Read the response from the card (up to 16 bytes) */
1172         if (cmd->flags & MMC_RSP_PRESENT) {
1173                 if (cmd->flags & MMC_RSP_136) {
1174                         cmd->resp[3] = mci_readl(host, RESP0);
1175                         cmd->resp[2] = mci_readl(host, RESP1);
1176                         cmd->resp[1] = mci_readl(host, RESP2);
1177                         cmd->resp[0] = mci_readl(host, RESP3);
1178                 } else {
1179                         cmd->resp[0] = mci_readl(host, RESP0);
1180                         cmd->resp[1] = 0;
1181                         cmd->resp[2] = 0;
1182                         cmd->resp[3] = 0;
1183                 }
1184         }
1185 
1186         if (status & SDMMC_INT_RTO)
1187                 cmd->error = -ETIMEDOUT;
1188         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189                 cmd->error = -EILSEQ;
1190         else if (status & SDMMC_INT_RESP_ERR)
1191                 cmd->error = -EIO;
1192         else
1193                 cmd->error = 0;
1194 
1195         if (cmd->error) {
1196                 /* newer ip versions need a delay between retries */
1197                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1198                         mdelay(20);
1199         }
1200 
1201         return cmd->error;
1202 }
1203 
1204 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1205 {
1206         u32 status = host->data_status;
1207 
1208         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209                 if (status & SDMMC_INT_DRTO) {
1210                         data->error = -ETIMEDOUT;
1211                 } else if (status & SDMMC_INT_DCRC) {
1212                         data->error = -EILSEQ;
1213                 } else if (status & SDMMC_INT_EBE) {
1214                         if (host->dir_status ==
1215                                 DW_MCI_SEND_STATUS) {
1216                                 /*
1217                                  * No data CRC status was returned.
1218                                  * The number of bytes transferred
1219                                  * will be exaggerated in PIO mode.
1220                                  */
1221                                 data->bytes_xfered = 0;
1222                                 data->error = -ETIMEDOUT;
1223                         } else if (host->dir_status ==
1224                                         DW_MCI_RECV_STATUS) {
1225                                 data->error = -EIO;
1226                         }
1227                 } else {
1228                         /* SDMMC_INT_SBE is included */
1229                         data->error = -EIO;
1230                 }
1231 
1232                 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1233 
1234                 /*
1235                  * After an error, there may be data lingering
1236                  * in the FIFO
1237                  */
1238                 dw_mci_reset(host);
1239         } else {
1240                 data->bytes_xfered = data->blocks * data->blksz;
1241                 data->error = 0;
1242         }
1243 
1244         return data->error;
1245 }
1246 
1247 static void dw_mci_tasklet_func(unsigned long priv)
1248 {
1249         struct dw_mci *host = (struct dw_mci *)priv;
1250         struct mmc_data *data;
1251         struct mmc_command *cmd;
1252         struct mmc_request *mrq;
1253         enum dw_mci_state state;
1254         enum dw_mci_state prev_state;
1255         unsigned int err;
1256 
1257         spin_lock(&host->lock);
1258 
1259         state = host->state;
1260         data = host->data;
1261         mrq = host->mrq;
1262 
1263         do {
1264                 prev_state = state;
1265 
1266                 switch (state) {
1267                 case STATE_IDLE:
1268                         break;
1269 
1270                 case STATE_SENDING_CMD:
1271                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272                                                 &host->pending_events))
1273                                 break;
1274 
1275                         cmd = host->cmd;
1276                         host->cmd = NULL;
1277                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1278                         err = dw_mci_command_complete(host, cmd);
1279                         if (cmd == mrq->sbc && !err) {
1280                                 prev_state = state = STATE_SENDING_CMD;
1281                                 __dw_mci_start_request(host, host->cur_slot,
1282                                                        mrq->cmd);
1283                                 goto unlock;
1284                         }
1285 
1286                         if (cmd->data && err) {
1287                                 dw_mci_stop_dma(host);
1288                                 send_stop_abort(host, data);
1289                                 state = STATE_SENDING_STOP;
1290                                 break;
1291                         }
1292 
1293                         if (!cmd->data || err) {
1294                                 dw_mci_request_end(host, mrq);
1295                                 goto unlock;
1296                         }
1297 
1298                         prev_state = state = STATE_SENDING_DATA;
1299                         /* fall through */
1300 
1301                 case STATE_SENDING_DATA:
1302                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1303                                                &host->pending_events)) {
1304                                 dw_mci_stop_dma(host);
1305                                 send_stop_abort(host, data);
1306                                 state = STATE_DATA_ERROR;
1307                                 break;
1308                         }
1309 
1310                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1311                                                 &host->pending_events))
1312                                 break;
1313 
1314                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1315                         prev_state = state = STATE_DATA_BUSY;
1316                         /* fall through */
1317 
1318                 case STATE_DATA_BUSY:
1319                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1320                                                 &host->pending_events))
1321                                 break;
1322 
1323                         host->data = NULL;
1324                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1325                         err = dw_mci_data_complete(host, data);
1326 
1327                         if (!err) {
1328                                 if (!data->stop || mrq->sbc) {
1329                                         if (mrq->sbc && data->stop)
1330                                                 data->stop->error = 0;
1331                                         dw_mci_request_end(host, mrq);
1332                                         goto unlock;
1333                                 }
1334 
1335                                 /* stop command for open-ended transfer*/
1336                                 if (data->stop)
1337                                         send_stop_abort(host, data);
1338                         }
1339 
1340                         /*
1341                          * If err has non-zero,
1342                          * stop-abort command has been already issued.
1343                          */
1344                         prev_state = state = STATE_SENDING_STOP;
1345 
1346                         /* fall through */
1347 
1348                 case STATE_SENDING_STOP:
1349                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1350                                                 &host->pending_events))
1351                                 break;
1352 
1353                         /* CMD error in data command */
1354                         if (mrq->cmd->error && mrq->data)
1355                                 dw_mci_reset(host);
1356 
1357                         host->cmd = NULL;
1358                         host->data = NULL;
1359 
1360                         if (mrq->stop)
1361                                 dw_mci_command_complete(host, mrq->stop);
1362                         else
1363                                 host->cmd_status = 0;
1364 
1365                         dw_mci_request_end(host, mrq);
1366                         goto unlock;
1367 
1368                 case STATE_DATA_ERROR:
1369                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1370                                                 &host->pending_events))
1371                                 break;
1372 
1373                         state = STATE_DATA_BUSY;
1374                         break;
1375                 }
1376         } while (state != prev_state);
1377 
1378         host->state = state;
1379 unlock:
1380         spin_unlock(&host->lock);
1381 
1382 }
1383 
1384 /* push final bytes to part_buf, only use during push */
1385 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1386 {
1387         memcpy((void *)&host->part_buf, buf, cnt);
1388         host->part_buf_count = cnt;
1389 }
1390 
1391 /* append bytes to part_buf, only use during push */
1392 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1393 {
1394         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1395         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1396         host->part_buf_count += cnt;
1397         return cnt;
1398 }
1399 
1400 /* pull first bytes from part_buf, only use during pull */
1401 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402 {
1403         cnt = min(cnt, (int)host->part_buf_count);
1404         if (cnt) {
1405                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1406                        cnt);
1407                 host->part_buf_count -= cnt;
1408                 host->part_buf_start += cnt;
1409         }
1410         return cnt;
1411 }
1412 
1413 /* pull final bytes from the part_buf, assuming it's just been filled */
1414 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1415 {
1416         memcpy(buf, &host->part_buf, cnt);
1417         host->part_buf_start = cnt;
1418         host->part_buf_count = (1 << host->data_shift) - cnt;
1419 }
1420 
1421 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1422 {
1423         struct mmc_data *data = host->data;
1424         int init_cnt = cnt;
1425 
1426         /* try and push anything in the part_buf */
1427         if (unlikely(host->part_buf_count)) {
1428                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1429                 buf += len;
1430                 cnt -= len;
1431                 if (host->part_buf_count == 2) {
1432                         mci_writew(host, DATA(host->data_offset),
1433                                         host->part_buf16);
1434                         host->part_buf_count = 0;
1435                 }
1436         }
1437 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1438         if (unlikely((unsigned long)buf & 0x1)) {
1439                 while (cnt >= 2) {
1440                         u16 aligned_buf[64];
1441                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1442                         int items = len >> 1;
1443                         int i;
1444                         /* memcpy from input buffer into aligned buffer */
1445                         memcpy(aligned_buf, buf, len);
1446                         buf += len;
1447                         cnt -= len;
1448                         /* push data from aligned buffer into fifo */
1449                         for (i = 0; i < items; ++i)
1450                                 mci_writew(host, DATA(host->data_offset),
1451                                                 aligned_buf[i]);
1452                 }
1453         } else
1454 #endif
1455         {
1456                 u16 *pdata = buf;
1457                 for (; cnt >= 2; cnt -= 2)
1458                         mci_writew(host, DATA(host->data_offset), *pdata++);
1459                 buf = pdata;
1460         }
1461         /* put anything remaining in the part_buf */
1462         if (cnt) {
1463                 dw_mci_set_part_bytes(host, buf, cnt);
1464                  /* Push data if we have reached the expected data length */
1465                 if ((data->bytes_xfered + init_cnt) ==
1466                     (data->blksz * data->blocks))
1467                         mci_writew(host, DATA(host->data_offset),
1468                                    host->part_buf16);
1469         }
1470 }
1471 
1472 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1473 {
1474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1475         if (unlikely((unsigned long)buf & 0x1)) {
1476                 while (cnt >= 2) {
1477                         /* pull data from fifo into aligned buffer */
1478                         u16 aligned_buf[64];
1479                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1480                         int items = len >> 1;
1481                         int i;
1482                         for (i = 0; i < items; ++i)
1483                                 aligned_buf[i] = mci_readw(host,
1484                                                 DATA(host->data_offset));
1485                         /* memcpy from aligned buffer into output buffer */
1486                         memcpy(buf, aligned_buf, len);
1487                         buf += len;
1488                         cnt -= len;
1489                 }
1490         } else
1491 #endif
1492         {
1493                 u16 *pdata = buf;
1494                 for (; cnt >= 2; cnt -= 2)
1495                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1496                 buf = pdata;
1497         }
1498         if (cnt) {
1499                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1500                 dw_mci_pull_final_bytes(host, buf, cnt);
1501         }
1502 }
1503 
1504 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1505 {
1506         struct mmc_data *data = host->data;
1507         int init_cnt = cnt;
1508 
1509         /* try and push anything in the part_buf */
1510         if (unlikely(host->part_buf_count)) {
1511                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1512                 buf += len;
1513                 cnt -= len;
1514                 if (host->part_buf_count == 4) {
1515                         mci_writel(host, DATA(host->data_offset),
1516                                         host->part_buf32);
1517                         host->part_buf_count = 0;
1518                 }
1519         }
1520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521         if (unlikely((unsigned long)buf & 0x3)) {
1522                 while (cnt >= 4) {
1523                         u32 aligned_buf[32];
1524                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1525                         int items = len >> 2;
1526                         int i;
1527                         /* memcpy from input buffer into aligned buffer */
1528                         memcpy(aligned_buf, buf, len);
1529                         buf += len;
1530                         cnt -= len;
1531                         /* push data from aligned buffer into fifo */
1532                         for (i = 0; i < items; ++i)
1533                                 mci_writel(host, DATA(host->data_offset),
1534                                                 aligned_buf[i]);
1535                 }
1536         } else
1537 #endif
1538         {
1539                 u32 *pdata = buf;
1540                 for (; cnt >= 4; cnt -= 4)
1541                         mci_writel(host, DATA(host->data_offset), *pdata++);
1542                 buf = pdata;
1543         }
1544         /* put anything remaining in the part_buf */
1545         if (cnt) {
1546                 dw_mci_set_part_bytes(host, buf, cnt);
1547                  /* Push data if we have reached the expected data length */
1548                 if ((data->bytes_xfered + init_cnt) ==
1549                     (data->blksz * data->blocks))
1550                         mci_writel(host, DATA(host->data_offset),
1551                                    host->part_buf32);
1552         }
1553 }
1554 
1555 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1556 {
1557 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1558         if (unlikely((unsigned long)buf & 0x3)) {
1559                 while (cnt >= 4) {
1560                         /* pull data from fifo into aligned buffer */
1561                         u32 aligned_buf[32];
1562                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1563                         int items = len >> 2;
1564                         int i;
1565                         for (i = 0; i < items; ++i)
1566                                 aligned_buf[i] = mci_readl(host,
1567                                                 DATA(host->data_offset));
1568                         /* memcpy from aligned buffer into output buffer */
1569                         memcpy(buf, aligned_buf, len);
1570                         buf += len;
1571                         cnt -= len;
1572                 }
1573         } else
1574 #endif
1575         {
1576                 u32 *pdata = buf;
1577                 for (; cnt >= 4; cnt -= 4)
1578                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1579                 buf = pdata;
1580         }
1581         if (cnt) {
1582                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1583                 dw_mci_pull_final_bytes(host, buf, cnt);
1584         }
1585 }
1586 
1587 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1588 {
1589         struct mmc_data *data = host->data;
1590         int init_cnt = cnt;
1591 
1592         /* try and push anything in the part_buf */
1593         if (unlikely(host->part_buf_count)) {
1594                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1595                 buf += len;
1596                 cnt -= len;
1597 
1598                 if (host->part_buf_count == 8) {
1599                         mci_writeq(host, DATA(host->data_offset),
1600                                         host->part_buf);
1601                         host->part_buf_count = 0;
1602                 }
1603         }
1604 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1605         if (unlikely((unsigned long)buf & 0x7)) {
1606                 while (cnt >= 8) {
1607                         u64 aligned_buf[16];
1608                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1609                         int items = len >> 3;
1610                         int i;
1611                         /* memcpy from input buffer into aligned buffer */
1612                         memcpy(aligned_buf, buf, len);
1613                         buf += len;
1614                         cnt -= len;
1615                         /* push data from aligned buffer into fifo */
1616                         for (i = 0; i < items; ++i)
1617                                 mci_writeq(host, DATA(host->data_offset),
1618                                                 aligned_buf[i]);
1619                 }
1620         } else
1621 #endif
1622         {
1623                 u64 *pdata = buf;
1624                 for (; cnt >= 8; cnt -= 8)
1625                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1626                 buf = pdata;
1627         }
1628         /* put anything remaining in the part_buf */
1629         if (cnt) {
1630                 dw_mci_set_part_bytes(host, buf, cnt);
1631                 /* Push data if we have reached the expected data length */
1632                 if ((data->bytes_xfered + init_cnt) ==
1633                     (data->blksz * data->blocks))
1634                         mci_writeq(host, DATA(host->data_offset),
1635                                    host->part_buf);
1636         }
1637 }
1638 
1639 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1640 {
1641 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1642         if (unlikely((unsigned long)buf & 0x7)) {
1643                 while (cnt >= 8) {
1644                         /* pull data from fifo into aligned buffer */
1645                         u64 aligned_buf[16];
1646                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1647                         int items = len >> 3;
1648                         int i;
1649                         for (i = 0; i < items; ++i)
1650                                 aligned_buf[i] = mci_readq(host,
1651                                                 DATA(host->data_offset));
1652                         /* memcpy from aligned buffer into output buffer */
1653                         memcpy(buf, aligned_buf, len);
1654                         buf += len;
1655                         cnt -= len;
1656                 }
1657         } else
1658 #endif
1659         {
1660                 u64 *pdata = buf;
1661                 for (; cnt >= 8; cnt -= 8)
1662                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1663                 buf = pdata;
1664         }
1665         if (cnt) {
1666                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1667                 dw_mci_pull_final_bytes(host, buf, cnt);
1668         }
1669 }
1670 
1671 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1672 {
1673         int len;
1674 
1675         /* get remaining partial bytes */
1676         len = dw_mci_pull_part_bytes(host, buf, cnt);
1677         if (unlikely(len == cnt))
1678                 return;
1679         buf += len;
1680         cnt -= len;
1681 
1682         /* get the rest of the data */
1683         host->pull_data(host, buf, cnt);
1684 }
1685 
1686 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1687 {
1688         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1689         void *buf;
1690         unsigned int offset;
1691         struct mmc_data *data = host->data;
1692         int shift = host->data_shift;
1693         u32 status;
1694         unsigned int len;
1695         unsigned int remain, fcnt;
1696 
1697         do {
1698                 if (!sg_miter_next(sg_miter))
1699                         goto done;
1700 
1701                 host->sg = sg_miter->piter.sg;
1702                 buf = sg_miter->addr;
1703                 remain = sg_miter->length;
1704                 offset = 0;
1705 
1706                 do {
1707                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1708                                         << shift) + host->part_buf_count;
1709                         len = min(remain, fcnt);
1710                         if (!len)
1711                                 break;
1712                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1713                         data->bytes_xfered += len;
1714                         offset += len;
1715                         remain -= len;
1716                 } while (remain);
1717 
1718                 sg_miter->consumed = offset;
1719                 status = mci_readl(host, MINTSTS);
1720                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1721         /* if the RXDR is ready read again */
1722         } while ((status & SDMMC_INT_RXDR) ||
1723                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1724 
1725         if (!remain) {
1726                 if (!sg_miter_next(sg_miter))
1727                         goto done;
1728                 sg_miter->consumed = 0;
1729         }
1730         sg_miter_stop(sg_miter);
1731         return;
1732 
1733 done:
1734         sg_miter_stop(sg_miter);
1735         host->sg = NULL;
1736         smp_wmb();
1737         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1738 }
1739 
1740 static void dw_mci_write_data_pio(struct dw_mci *host)
1741 {
1742         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1743         void *buf;
1744         unsigned int offset;
1745         struct mmc_data *data = host->data;
1746         int shift = host->data_shift;
1747         u32 status;
1748         unsigned int len;
1749         unsigned int fifo_depth = host->fifo_depth;
1750         unsigned int remain, fcnt;
1751 
1752         do {
1753                 if (!sg_miter_next(sg_miter))
1754                         goto done;
1755 
1756                 host->sg = sg_miter->piter.sg;
1757                 buf = sg_miter->addr;
1758                 remain = sg_miter->length;
1759                 offset = 0;
1760 
1761                 do {
1762                         fcnt = ((fifo_depth -
1763                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1764                                         << shift) - host->part_buf_count;
1765                         len = min(remain, fcnt);
1766                         if (!len)
1767                                 break;
1768                         host->push_data(host, (void *)(buf + offset), len);
1769                         data->bytes_xfered += len;
1770                         offset += len;
1771                         remain -= len;
1772                 } while (remain);
1773 
1774                 sg_miter->consumed = offset;
1775                 status = mci_readl(host, MINTSTS);
1776                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1777         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1778 
1779         if (!remain) {
1780                 if (!sg_miter_next(sg_miter))
1781                         goto done;
1782                 sg_miter->consumed = 0;
1783         }
1784         sg_miter_stop(sg_miter);
1785         return;
1786 
1787 done:
1788         sg_miter_stop(sg_miter);
1789         host->sg = NULL;
1790         smp_wmb();
1791         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1792 }
1793 
1794 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1795 {
1796         if (!host->cmd_status)
1797                 host->cmd_status = status;
1798 
1799         smp_wmb();
1800 
1801         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1802         tasklet_schedule(&host->tasklet);
1803 }
1804 
1805 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1806 {
1807         struct dw_mci *host = dev_id;
1808         u32 pending;
1809         int i;
1810 
1811         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1812 
1813         /*
1814          * DTO fix - version 2.10a and below, and only if internal DMA
1815          * is configured.
1816          */
1817         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1818                 if (!pending &&
1819                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1820                         pending |= SDMMC_INT_DATA_OVER;
1821         }
1822 
1823         if (pending) {
1824                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1825                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1826                         host->cmd_status = pending;
1827                         smp_wmb();
1828                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1829                 }
1830 
1831                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1832                         /* if there is an error report DATA_ERROR */
1833                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1834                         host->data_status = pending;
1835                         smp_wmb();
1836                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
1837                         tasklet_schedule(&host->tasklet);
1838                 }
1839 
1840                 if (pending & SDMMC_INT_DATA_OVER) {
1841                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1842                         if (!host->data_status)
1843                                 host->data_status = pending;
1844                         smp_wmb();
1845                         if (host->dir_status == DW_MCI_RECV_STATUS) {
1846                                 if (host->sg != NULL)
1847                                         dw_mci_read_data_pio(host, true);
1848                         }
1849                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1850                         tasklet_schedule(&host->tasklet);
1851                 }
1852 
1853                 if (pending & SDMMC_INT_RXDR) {
1854                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1855                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1856                                 dw_mci_read_data_pio(host, false);
1857                 }
1858 
1859                 if (pending & SDMMC_INT_TXDR) {
1860                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1861                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1862                                 dw_mci_write_data_pio(host);
1863                 }
1864 
1865                 if (pending & SDMMC_INT_CMD_DONE) {
1866                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1867                         dw_mci_cmd_interrupt(host, pending);
1868                 }
1869 
1870                 if (pending & SDMMC_INT_CD) {
1871                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
1872                         queue_work(host->card_workqueue, &host->card_work);
1873                 }
1874 
1875                 /* Handle SDIO Interrupts */
1876                 for (i = 0; i < host->num_slots; i++) {
1877                         struct dw_mci_slot *slot = host->slot[i];
1878                         if (pending & SDMMC_INT_SDIO(i)) {
1879                                 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1880                                 mmc_signal_sdio_irq(slot->mmc);
1881                         }
1882                 }
1883 
1884         }
1885 
1886 #ifdef CONFIG_MMC_DW_IDMAC
1887         /* Handle DMA interrupts */
1888         pending = mci_readl(host, IDSTS);
1889         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1890                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1891                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1892                 host->dma_ops->complete(host);
1893         }
1894 #endif
1895 
1896         return IRQ_HANDLED;
1897 }
1898 
1899 static void dw_mci_work_routine_card(struct work_struct *work)
1900 {
1901         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1902         int i;
1903 
1904         for (i = 0; i < host->num_slots; i++) {
1905                 struct dw_mci_slot *slot = host->slot[i];
1906                 struct mmc_host *mmc = slot->mmc;
1907                 struct mmc_request *mrq;
1908                 int present;
1909 
1910                 present = dw_mci_get_cd(mmc);
1911                 while (present != slot->last_detect_state) {
1912                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
1913                                 present ? "inserted" : "removed");
1914 
1915                         spin_lock_bh(&host->lock);
1916 
1917                         /* Card change detected */
1918                         slot->last_detect_state = present;
1919 
1920                         /* Clean up queue if present */
1921                         mrq = slot->mrq;
1922                         if (mrq) {
1923                                 if (mrq == host->mrq) {
1924                                         host->data = NULL;
1925                                         host->cmd = NULL;
1926 
1927                                         switch (host->state) {
1928                                         case STATE_IDLE:
1929                                                 break;
1930                                         case STATE_SENDING_CMD:
1931                                                 mrq->cmd->error = -ENOMEDIUM;
1932                                                 if (!mrq->data)
1933                                                         break;
1934                                                 /* fall through */
1935                                         case STATE_SENDING_DATA:
1936                                                 mrq->data->error = -ENOMEDIUM;
1937                                                 dw_mci_stop_dma(host);
1938                                                 break;
1939                                         case STATE_DATA_BUSY:
1940                                         case STATE_DATA_ERROR:
1941                                                 if (mrq->data->error == -EINPROGRESS)
1942                                                         mrq->data->error = -ENOMEDIUM;
1943                                                 /* fall through */
1944                                         case STATE_SENDING_STOP:
1945                                                 if (mrq->stop)
1946                                                         mrq->stop->error = -ENOMEDIUM;
1947                                                 break;
1948                                         }
1949 
1950                                         dw_mci_request_end(host, mrq);
1951                                 } else {
1952                                         list_del(&slot->queue_node);
1953                                         mrq->cmd->error = -ENOMEDIUM;
1954                                         if (mrq->data)
1955                                                 mrq->data->error = -ENOMEDIUM;
1956                                         if (mrq->stop)
1957                                                 mrq->stop->error = -ENOMEDIUM;
1958 
1959                                         spin_unlock(&host->lock);
1960                                         mmc_request_done(slot->mmc, mrq);
1961                                         spin_lock(&host->lock);
1962                                 }
1963                         }
1964 
1965                         /* Power down slot */
1966                         if (present == 0)
1967                                 dw_mci_reset(host);
1968 
1969                         spin_unlock_bh(&host->lock);
1970 
1971                         present = dw_mci_get_cd(mmc);
1972                 }
1973 
1974                 mmc_detect_change(slot->mmc,
1975                         msecs_to_jiffies(host->pdata->detect_delay_ms));
1976         }
1977 }
1978 
1979 #ifdef CONFIG_OF
1980 /* given a slot id, find out the device node representing that slot */
1981 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1982 {
1983         struct device_node *np;
1984         const __be32 *addr;
1985         int len;
1986 
1987         if (!dev || !dev->of_node)
1988                 return NULL;
1989 
1990         for_each_child_of_node(dev->of_node, np) {
1991                 addr = of_get_property(np, "reg", &len);
1992                 if (!addr || (len < sizeof(int)))
1993                         continue;
1994                 if (be32_to_cpup(addr) == slot)
1995                         return np;
1996         }
1997         return NULL;
1998 }
1999 
2000 static struct dw_mci_of_slot_quirks {
2001         char *quirk;
2002         int id;
2003 } of_slot_quirks[] = {
2004         {
2005                 .quirk  = "disable-wp",
2006                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2007         },
2008 };
2009 
2010 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2011 {
2012         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2013         int quirks = 0;
2014         int idx;
2015 
2016         /* get quirks */
2017         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2018                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2019                         dev_warn(dev, "Slot quirk %s is deprecated\n",
2020                                         of_slot_quirks[idx].quirk);
2021                         quirks |= of_slot_quirks[idx].id;
2022                 }
2023 
2024         return quirks;
2025 }
2026 #else /* CONFIG_OF */
2027 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2028 {
2029         return 0;
2030 }
2031 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2032 {
2033         return NULL;
2034 }
2035 #endif /* CONFIG_OF */
2036 
2037 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2038 {
2039         struct mmc_host *mmc;
2040         struct dw_mci_slot *slot;
2041         const struct dw_mci_drv_data *drv_data = host->drv_data;
2042         int ctrl_id, ret;
2043         u32 freq[2];
2044 
2045         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2046         if (!mmc)
2047                 return -ENOMEM;
2048 
2049         slot = mmc_priv(mmc);
2050         slot->id = id;
2051         slot->mmc = mmc;
2052         slot->host = host;
2053         host->slot[id] = slot;
2054 
2055         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2056 
2057         mmc->ops = &dw_mci_ops;
2058         if (of_property_read_u32_array(host->dev->of_node,
2059                                        "clock-freq-min-max", freq, 2)) {
2060                 mmc->f_min = DW_MCI_FREQ_MIN;
2061                 mmc->f_max = DW_MCI_FREQ_MAX;
2062         } else {
2063                 mmc->f_min = freq[0];
2064                 mmc->f_max = freq[1];
2065         }
2066 
2067         mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2068 
2069         if (host->pdata->caps)
2070                 mmc->caps = host->pdata->caps;
2071 
2072         if (host->pdata->pm_caps)
2073                 mmc->pm_caps = host->pdata->pm_caps;
2074 
2075         if (host->dev->of_node) {
2076                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2077                 if (ctrl_id < 0)
2078                         ctrl_id = 0;
2079         } else {
2080                 ctrl_id = to_platform_device(host->dev)->id;
2081         }
2082         if (drv_data && drv_data->caps)
2083                 mmc->caps |= drv_data->caps[ctrl_id];
2084 
2085         if (host->pdata->caps2)
2086                 mmc->caps2 = host->pdata->caps2;
2087 
2088         mmc_of_parse(mmc);
2089 
2090         if (host->pdata->blk_settings) {
2091                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2092                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2093                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2094                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2095                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2096         } else {
2097                 /* Useful defaults if platform data is unset. */
2098 #ifdef CONFIG_MMC_DW_IDMAC
2099                 mmc->max_segs = host->ring_size;
2100                 mmc->max_blk_size = 65536;
2101                 mmc->max_blk_count = host->ring_size;
2102                 mmc->max_seg_size = 0x1000;
2103                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2104 #else
2105                 mmc->max_segs = 64;
2106                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2107                 mmc->max_blk_count = 512;
2108                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2109                 mmc->max_seg_size = mmc->max_req_size;
2110 #endif /* CONFIG_MMC_DW_IDMAC */
2111         }
2112 
2113         if (dw_mci_get_cd(mmc))
2114                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2115         else
2116                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2117 
2118         ret = mmc_add_host(mmc);
2119         if (ret)
2120                 goto err_setup_bus;
2121 
2122 #if defined(CONFIG_DEBUG_FS)
2123         dw_mci_init_debugfs(slot);
2124 #endif
2125 
2126         /* Card initially undetected */
2127         slot->last_detect_state = 0;
2128 
2129         return 0;
2130 
2131 err_setup_bus:
2132         mmc_free_host(mmc);
2133         return -EINVAL;
2134 }
2135 
2136 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2137 {
2138         /* Debugfs stuff is cleaned up by mmc core */
2139         mmc_remove_host(slot->mmc);
2140         slot->host->slot[id] = NULL;
2141         mmc_free_host(slot->mmc);
2142 }
2143 
2144 static void dw_mci_init_dma(struct dw_mci *host)
2145 {
2146         /* Alloc memory for sg translation */
2147         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2148                                           &host->sg_dma, GFP_KERNEL);
2149         if (!host->sg_cpu) {
2150                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2151                         __func__);
2152                 goto no_dma;
2153         }
2154 
2155         /* Determine which DMA interface to use */
2156 #ifdef CONFIG_MMC_DW_IDMAC
2157         host->dma_ops = &dw_mci_idmac_ops;
2158         dev_info(host->dev, "Using internal DMA controller.\n");
2159 #endif
2160 
2161         if (!host->dma_ops)
2162                 goto no_dma;
2163 
2164         if (host->dma_ops->init && host->dma_ops->start &&
2165             host->dma_ops->stop && host->dma_ops->cleanup) {
2166                 if (host->dma_ops->init(host)) {
2167                         dev_err(host->dev, "%s: Unable to initialize "
2168                                 "DMA Controller.\n", __func__);
2169                         goto no_dma;
2170                 }
2171         } else {
2172                 dev_err(host->dev, "DMA initialization not found.\n");
2173                 goto no_dma;
2174         }
2175 
2176         host->use_dma = 1;
2177         return;
2178 
2179 no_dma:
2180         dev_info(host->dev, "Using PIO mode.\n");
2181         host->use_dma = 0;
2182         return;
2183 }
2184 
2185 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2186 {
2187         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2188         u32 ctrl;
2189 
2190         ctrl = mci_readl(host, CTRL);
2191         ctrl |= reset;
2192         mci_writel(host, CTRL, ctrl);
2193 
2194         /* wait till resets clear */
2195         do {
2196                 ctrl = mci_readl(host, CTRL);
2197                 if (!(ctrl & reset))
2198                         return true;
2199         } while (time_before(jiffies, timeout));
2200 
2201         dev_err(host->dev,
2202                 "Timeout resetting block (ctrl reset %#x)\n",
2203                 ctrl & reset);
2204 
2205         return false;
2206 }
2207 
2208 static bool dw_mci_reset(struct dw_mci *host)
2209 {
2210         u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2211         bool ret = false;
2212 
2213         /*
2214          * Reseting generates a block interrupt, hence setting
2215          * the scatter-gather pointer to NULL.
2216          */
2217         if (host->sg) {
2218                 sg_miter_stop(&host->sg_miter);
2219                 host->sg = NULL;
2220         }
2221 
2222         if (host->use_dma)
2223                 flags |= SDMMC_CTRL_DMA_RESET;
2224 
2225         if (dw_mci_ctrl_reset(host, flags)) {
2226                 /*
2227                  * In all cases we clear the RAWINTS register to clear any
2228                  * interrupts.
2229                  */
2230                 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2231 
2232                 /* if using dma we wait for dma_req to clear */
2233                 if (host->use_dma) {
2234                         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2235                         u32 status;
2236                         do {
2237                                 status = mci_readl(host, STATUS);
2238                                 if (!(status & SDMMC_STATUS_DMA_REQ))
2239                                         break;
2240                                 cpu_relax();
2241                         } while (time_before(jiffies, timeout));
2242 
2243                         if (status & SDMMC_STATUS_DMA_REQ) {
2244                                 dev_err(host->dev,
2245                                         "%s: Timeout waiting for dma_req to "
2246                                         "clear during reset\n", __func__);
2247                                 goto ciu_out;
2248                         }
2249 
2250                         /* when using DMA next we reset the fifo again */
2251                         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2252                                 goto ciu_out;
2253                 }
2254         } else {
2255                 /* if the controller reset bit did clear, then set clock regs */
2256                 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2257                         dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2258                                 "clear but ciu was reset, doing clock update\n",
2259                                 __func__);
2260                         goto ciu_out;
2261                 }
2262         }
2263 
2264 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2265         /* It is also recommended that we reset and reprogram idmac */
2266         dw_mci_idmac_reset(host);
2267 #endif
2268 
2269         ret = true;
2270 
2271 ciu_out:
2272         /* After a CTRL reset we need to have CIU set clock registers  */
2273         mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2274 
2275         return ret;
2276 }
2277 
2278 #ifdef CONFIG_OF
2279 static struct dw_mci_of_quirks {
2280         char *quirk;
2281         int id;
2282 } of_quirks[] = {
2283         {
2284                 .quirk  = "broken-cd",
2285                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2286         }, {
2287                 .quirk  = "disable-wp",
2288                 .id     = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2289         },
2290 };
2291 
2292 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2293 {
2294         struct dw_mci_board *pdata;
2295         struct device *dev = host->dev;
2296         struct device_node *np = dev->of_node;
2297         const struct dw_mci_drv_data *drv_data = host->drv_data;
2298         int idx, ret;
2299         u32 clock_frequency;
2300 
2301         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2302         if (!pdata) {
2303                 dev_err(dev, "could not allocate memory for pdata\n");
2304                 return ERR_PTR(-ENOMEM);
2305         }
2306 
2307         /* find out number of slots supported */
2308         if (of_property_read_u32(dev->of_node, "num-slots",
2309                                 &pdata->num_slots)) {
2310                 dev_info(dev, "num-slots property not found, "
2311                                 "assuming 1 slot is available\n");
2312                 pdata->num_slots = 1;
2313         }
2314 
2315         /* get quirks */
2316         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2317                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2318                         pdata->quirks |= of_quirks[idx].id;
2319 
2320         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2321                 dev_info(dev, "fifo-depth property not found, using "
2322                                 "value of FIFOTH register as default\n");
2323 
2324         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2325 
2326         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2327                 pdata->bus_hz = clock_frequency;
2328 
2329         if (drv_data && drv_data->parse_dt) {
2330                 ret = drv_data->parse_dt(host);
2331                 if (ret)
2332                         return ERR_PTR(ret);
2333         }
2334 
2335         if (of_find_property(np, "supports-highspeed", NULL))
2336                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2337 
2338         return pdata;
2339 }
2340 
2341 #else /* CONFIG_OF */
2342 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2343 {
2344         return ERR_PTR(-EINVAL);
2345 }
2346 #endif /* CONFIG_OF */
2347 
2348 int dw_mci_probe(struct dw_mci *host)
2349 {
2350         const struct dw_mci_drv_data *drv_data = host->drv_data;
2351         int width, i, ret = 0;
2352         u32 fifo_size;
2353         int init_slots = 0;
2354 
2355         if (!host->pdata) {
2356                 host->pdata = dw_mci_parse_dt(host);
2357                 if (IS_ERR(host->pdata)) {
2358                         dev_err(host->dev, "platform data not available\n");
2359                         return -EINVAL;
2360                 }
2361         }
2362 
2363         if (host->pdata->num_slots > 1) {
2364                 dev_err(host->dev,
2365                         "Platform data must supply num_slots.\n");
2366                 return -ENODEV;
2367         }
2368 
2369         host->biu_clk = devm_clk_get(host->dev, "biu");
2370         if (IS_ERR(host->biu_clk)) {
2371                 dev_dbg(host->dev, "biu clock not available\n");
2372         } else {
2373                 ret = clk_prepare_enable(host->biu_clk);
2374                 if (ret) {
2375                         dev_err(host->dev, "failed to enable biu clock\n");
2376                         return ret;
2377                 }
2378         }
2379 
2380         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2381         if (IS_ERR(host->ciu_clk)) {
2382                 dev_dbg(host->dev, "ciu clock not available\n");
2383                 host->bus_hz = host->pdata->bus_hz;
2384         } else {
2385                 ret = clk_prepare_enable(host->ciu_clk);
2386                 if (ret) {
2387                         dev_err(host->dev, "failed to enable ciu clock\n");
2388                         goto err_clk_biu;
2389                 }
2390 
2391                 if (host->pdata->bus_hz) {
2392                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2393                         if (ret)
2394                                 dev_warn(host->dev,
2395                                          "Unable to set bus rate to %uHz\n",
2396                                          host->pdata->bus_hz);
2397                 }
2398                 host->bus_hz = clk_get_rate(host->ciu_clk);
2399         }
2400 
2401         if (!host->bus_hz) {
2402                 dev_err(host->dev,
2403                         "Platform data must supply bus speed\n");
2404                 ret = -ENODEV;
2405                 goto err_clk_ciu;
2406         }
2407 
2408         if (drv_data && drv_data->init) {
2409                 ret = drv_data->init(host);
2410                 if (ret) {
2411                         dev_err(host->dev,
2412                                 "implementation specific init failed\n");
2413                         goto err_clk_ciu;
2414                 }
2415         }
2416 
2417         if (drv_data && drv_data->setup_clock) {
2418                 ret = drv_data->setup_clock(host);
2419                 if (ret) {
2420                         dev_err(host->dev,
2421                                 "implementation specific clock setup failed\n");
2422                         goto err_clk_ciu;
2423                 }
2424         }
2425 
2426         host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2427         if (IS_ERR(host->vmmc)) {
2428                 ret = PTR_ERR(host->vmmc);
2429                 if (ret == -EPROBE_DEFER)
2430                         goto err_clk_ciu;
2431 
2432                 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2433                 host->vmmc = NULL;
2434         } else {
2435                 ret = regulator_enable(host->vmmc);
2436                 if (ret) {
2437                         if (ret != -EPROBE_DEFER)
2438                                 dev_err(host->dev,
2439                                         "regulator_enable fail: %d\n", ret);
2440                         goto err_clk_ciu;
2441                 }
2442         }
2443 
2444         host->quirks = host->pdata->quirks;
2445 
2446         spin_lock_init(&host->lock);
2447         INIT_LIST_HEAD(&host->queue);
2448 
2449         /*
2450          * Get the host data width - this assumes that HCON has been set with
2451          * the correct values.
2452          */
2453         i = (mci_readl(host, HCON) >> 7) & 0x7;
2454         if (!i) {
2455                 host->push_data = dw_mci_push_data16;
2456                 host->pull_data = dw_mci_pull_data16;
2457                 width = 16;
2458                 host->data_shift = 1;
2459         } else if (i == 2) {
2460                 host->push_data = dw_mci_push_data64;
2461                 host->pull_data = dw_mci_pull_data64;
2462                 width = 64;
2463                 host->data_shift = 3;
2464         } else {
2465                 /* Check for a reserved value, and warn if it is */
2466                 WARN((i != 1),
2467                      "HCON reports a reserved host data width!\n"
2468                      "Defaulting to 32-bit access.\n");
2469                 host->push_data = dw_mci_push_data32;
2470                 host->pull_data = dw_mci_pull_data32;
2471                 width = 32;
2472                 host->data_shift = 2;
2473         }
2474 
2475         /* Reset all blocks */
2476         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2477                 return -ENODEV;
2478 
2479         host->dma_ops = host->pdata->dma_ops;
2480         dw_mci_init_dma(host);
2481 
2482         /* Clear the interrupts for the host controller */
2483         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2484         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2485 
2486         /* Put in max timeout */
2487         mci_writel(host, TMOUT, 0xFFFFFFFF);
2488 
2489         /*
2490          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2491          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2492          */
2493         if (!host->pdata->fifo_depth) {
2494                 /*
2495                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2496                  * have been overwritten by the bootloader, just like we're
2497                  * about to do, so if you know the value for your hardware, you
2498                  * should put it in the platform data.
2499                  */
2500                 fifo_size = mci_readl(host, FIFOTH);
2501                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2502         } else {
2503                 fifo_size = host->pdata->fifo_depth;
2504         }
2505         host->fifo_depth = fifo_size;
2506         host->fifoth_val =
2507                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2508         mci_writel(host, FIFOTH, host->fifoth_val);
2509 
2510         /* disable clock to CIU */
2511         mci_writel(host, CLKENA, 0);
2512         mci_writel(host, CLKSRC, 0);
2513 
2514         /*
2515          * In 2.40a spec, Data offset is changed.
2516          * Need to check the version-id and set data-offset for DATA register.
2517          */
2518         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2519         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2520 
2521         if (host->verid < DW_MMC_240A)
2522                 host->data_offset = DATA_OFFSET;
2523         else
2524                 host->data_offset = DATA_240A_OFFSET;
2525 
2526         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2527         host->card_workqueue = alloc_workqueue("dw-mci-card",
2528                         WQ_MEM_RECLAIM, 1);
2529         if (!host->card_workqueue) {
2530                 ret = -ENOMEM;
2531                 goto err_dmaunmap;
2532         }
2533         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2534         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2535                                host->irq_flags, "dw-mci", host);
2536         if (ret)
2537                 goto err_workqueue;
2538 
2539         if (host->pdata->num_slots)
2540                 host->num_slots = host->pdata->num_slots;
2541         else
2542                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2543 
2544         /*
2545          * Enable interrupts for command done, data over, data empty, card det,
2546          * receive ready and error such as transmit, receive timeout, crc error
2547          */
2548         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2549         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2550                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2551                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2552         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2553 
2554         dev_info(host->dev, "DW MMC controller at irq %d, "
2555                  "%d bit host data width, "
2556                  "%u deep fifo\n",
2557                  host->irq, width, fifo_size);
2558 
2559         /* We need at least one slot to succeed */
2560         for (i = 0; i < host->num_slots; i++) {
2561                 ret = dw_mci_init_slot(host, i);
2562                 if (ret)
2563                         dev_dbg(host->dev, "slot %d init failed\n", i);
2564                 else
2565                         init_slots++;
2566         }
2567 
2568         if (init_slots) {
2569                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2570         } else {
2571                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2572                                         "but failed on all\n", host->num_slots);
2573                 goto err_workqueue;
2574         }
2575 
2576         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2577                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2578 
2579         return 0;
2580 
2581 err_workqueue:
2582         destroy_workqueue(host->card_workqueue);
2583 
2584 err_dmaunmap:
2585         if (host->use_dma && host->dma_ops->exit)
2586                 host->dma_ops->exit(host);
2587         if (host->vmmc)
2588                 regulator_disable(host->vmmc);
2589 
2590 err_clk_ciu:
2591         if (!IS_ERR(host->ciu_clk))
2592                 clk_disable_unprepare(host->ciu_clk);
2593 
2594 err_clk_biu:
2595         if (!IS_ERR(host->biu_clk))
2596                 clk_disable_unprepare(host->biu_clk);
2597 
2598         return ret;
2599 }
2600 EXPORT_SYMBOL(dw_mci_probe);
2601 
2602 void dw_mci_remove(struct dw_mci *host)
2603 {
2604         int i;
2605 
2606         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2607         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2608 
2609         for (i = 0; i < host->num_slots; i++) {
2610                 dev_dbg(host->dev, "remove slot %d\n", i);
2611                 if (host->slot[i])
2612                         dw_mci_cleanup_slot(host->slot[i], i);
2613         }
2614 
2615         /* disable clock to CIU */
2616         mci_writel(host, CLKENA, 0);
2617         mci_writel(host, CLKSRC, 0);
2618 
2619         destroy_workqueue(host->card_workqueue);
2620 
2621         if (host->use_dma && host->dma_ops->exit)
2622                 host->dma_ops->exit(host);
2623 
2624         if (host->vmmc)
2625                 regulator_disable(host->vmmc);
2626 
2627         if (!IS_ERR(host->ciu_clk))
2628                 clk_disable_unprepare(host->ciu_clk);
2629 
2630         if (!IS_ERR(host->biu_clk))
2631                 clk_disable_unprepare(host->biu_clk);
2632 }
2633 EXPORT_SYMBOL(dw_mci_remove);
2634 
2635 
2636 
2637 #ifdef CONFIG_PM_SLEEP
2638 /*
2639  * TODO: we should probably disable the clock to the card in the suspend path.
2640  */
2641 int dw_mci_suspend(struct dw_mci *host)
2642 {
2643         if (host->vmmc)
2644                 regulator_disable(host->vmmc);
2645 
2646         return 0;
2647 }
2648 EXPORT_SYMBOL(dw_mci_suspend);
2649 
2650 int dw_mci_resume(struct dw_mci *host)
2651 {
2652         int i, ret;
2653 
2654         if (host->vmmc) {
2655                 ret = regulator_enable(host->vmmc);
2656                 if (ret) {
2657                         dev_err(host->dev,
2658                                 "failed to enable regulator: %d\n", ret);
2659                         return ret;
2660                 }
2661         }
2662 
2663         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2664                 ret = -ENODEV;
2665                 return ret;
2666         }
2667 
2668         if (host->use_dma && host->dma_ops->init)
2669                 host->dma_ops->init(host);
2670 
2671         /*
2672          * Restore the initial value at FIFOTH register
2673          * And Invalidate the prev_blksz with zero
2674          */
2675         mci_writel(host, FIFOTH, host->fifoth_val);
2676         host->prev_blksz = 0;
2677 
2678         /* Put in max timeout */
2679         mci_writel(host, TMOUT, 0xFFFFFFFF);
2680 
2681         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2682         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2683                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2684                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2685         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2686 
2687         for (i = 0; i < host->num_slots; i++) {
2688                 struct dw_mci_slot *slot = host->slot[i];
2689                 if (!slot)
2690                         continue;
2691                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2692                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2693                         dw_mci_setup_bus(slot, true);
2694                 }
2695         }
2696         return 0;
2697 }
2698 EXPORT_SYMBOL(dw_mci_resume);
2699 #endif /* CONFIG_PM_SLEEP */
2700 
2701 static int __init dw_mci_init(void)
2702 {
2703         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2704         return 0;
2705 }
2706 
2707 static void __exit dw_mci_exit(void)
2708 {
2709 }
2710 
2711 module_init(dw_mci_init);
2712 module_exit(dw_mci_exit);
2713 
2714 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2715 MODULE_AUTHOR("NXP Semiconductor VietNam");
2716 MODULE_AUTHOR("Imagination Technologies Ltd");
2717 MODULE_LICENSE("GPL v2");
2718 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us