Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/drivers/ata/pata_octeon_cf.c

  1 /*
  2  * Driver for the Octeon bootbus compact flash.
  3  *
  4  * This file is subject to the terms and conditions of the GNU General Public
  5  * License.  See the file "COPYING" in the main directory of this archive
  6  * for more details.
  7  *
  8  * Copyright (C) 2005 - 2012 Cavium Inc.
  9  * Copyright (C) 2008 Wind River Systems
 10  */
 11 
 12 #include <linux/kernel.h>
 13 #include <linux/module.h>
 14 #include <linux/libata.h>
 15 #include <linux/hrtimer.h>
 16 #include <linux/slab.h>
 17 #include <linux/irq.h>
 18 #include <linux/of.h>
 19 #include <linux/of_platform.h>
 20 #include <linux/platform_device.h>
 21 #include <scsi/scsi_host.h>
 22 
 23 #include <asm/byteorder.h>
 24 #include <asm/octeon/octeon.h>
 25 
 26 /*
 27  * The Octeon bootbus compact flash interface is connected in at least
 28  * 3 different configurations on various evaluation boards:
 29  *
 30  * -- 8  bits no irq, no DMA
 31  * -- 16 bits no irq, no DMA
 32  * -- 16 bits True IDE mode with DMA, but no irq.
 33  *
 34  * In the last case the DMA engine can generate an interrupt when the
 35  * transfer is complete.  For the first two cases only PIO is supported.
 36  *
 37  */
 38 
 39 #define DRV_NAME        "pata_octeon_cf"
 40 #define DRV_VERSION     "2.2"
 41 
 42 /* Poll interval in nS. */
 43 #define OCTEON_CF_BUSY_POLL_INTERVAL 500000
 44 
 45 #define DMA_CFG 0
 46 #define DMA_TIM 0x20
 47 #define DMA_INT 0x38
 48 #define DMA_INT_EN 0x50
 49 
 50 struct octeon_cf_port {
 51         struct hrtimer delayed_finish;
 52         struct ata_port *ap;
 53         int dma_finished;
 54         void            *c0;
 55         unsigned int cs0;
 56         unsigned int cs1;
 57         bool is_true_ide;
 58         u64 dma_base;
 59 };
 60 
 61 static struct scsi_host_template octeon_cf_sht = {
 62         ATA_PIO_SHT(DRV_NAME),
 63 };
 64 
 65 static int enable_dma;
 66 module_param(enable_dma, int, 0444);
 67 MODULE_PARM_DESC(enable_dma,
 68                  "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
 69 
 70 /**
 71  * Convert nanosecond based time to setting used in the
 72  * boot bus timing register, based on timing multiple
 73  */
 74 static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
 75 {
 76         unsigned int val;
 77 
 78         /*
 79          * Compute # of eclock periods to get desired duration in
 80          * nanoseconds.
 81          */
 82         val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
 83                           1000 * tim_mult);
 84 
 85         return val;
 86 }
 87 
 88 static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
 89 {
 90         union cvmx_mio_boot_reg_cfgx reg_cfg;
 91         unsigned int tim_mult;
 92 
 93         switch (multiplier) {
 94         case 8:
 95                 tim_mult = 3;
 96                 break;
 97         case 4:
 98                 tim_mult = 0;
 99                 break;
100         case 2:
101                 tim_mult = 2;
102                 break;
103         default:
104                 tim_mult = 1;
105                 break;
106         }
107 
108         reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
109         reg_cfg.s.dmack = 0;    /* Don't assert DMACK on access */
110         reg_cfg.s.tim_mult = tim_mult;  /* Timing mutiplier */
111         reg_cfg.s.rd_dly = 0;   /* Sample on falling edge of BOOT_OE */
112         reg_cfg.s.sam = 0;      /* Don't combine write and output enable */
113         reg_cfg.s.we_ext = 0;   /* No write enable extension */
114         reg_cfg.s.oe_ext = 0;   /* No read enable extension */
115         reg_cfg.s.en = 1;       /* Enable this region */
116         reg_cfg.s.orbit = 0;    /* Don't combine with previous region */
117         reg_cfg.s.ale = 0;      /* Don't do address multiplexing */
118         cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
119 }
120 
121 /**
122  * Called after libata determines the needed PIO mode. This
123  * function programs the Octeon bootbus regions to support the
124  * timing requirements of the PIO mode.
125  *
126  * @ap:     ATA port information
127  * @dev:    ATA device
128  */
129 static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
130 {
131         struct octeon_cf_port *cf_port = ap->private_data;
132         union cvmx_mio_boot_reg_timx reg_tim;
133         int T;
134         struct ata_timing timing;
135 
136         unsigned int div;
137         int use_iordy;
138         int trh;
139         int pause;
140         /* These names are timing parameters from the ATA spec */
141         int t1;
142         int t2;
143         int t2i;
144 
145         /*
146          * A divisor value of four will overflow the timing fields at
147          * clock rates greater than 800MHz
148          */
149         if (octeon_get_io_clock_rate() <= 800000000)
150                 div = 4;
151         else
152                 div = 8;
153         T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
154 
155         BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
156 
157         t1 = timing.setup;
158         if (t1)
159                 t1--;
160         t2 = timing.active;
161         if (t2)
162                 t2--;
163         t2i = timing.act8b;
164         if (t2i)
165                 t2i--;
166 
167         trh = ns_to_tim_reg(div, 20);
168         if (trh)
169                 trh--;
170 
171         pause = (int)timing.cycle - (int)timing.active -
172                 (int)timing.setup - trh;
173         if (pause < 0)
174                 pause = 0;
175         if (pause)
176                 pause--;
177 
178         octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
179         if (cf_port->is_true_ide)
180                 /* True IDE mode, program both chip selects.  */
181                 octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
182 
183 
184         use_iordy = ata_pio_need_iordy(dev);
185 
186         reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
187         /* Disable page mode */
188         reg_tim.s.pagem = 0;
189         /* Enable dynamic timing */
190         reg_tim.s.waitm = use_iordy;
191         /* Pages are disabled */
192         reg_tim.s.pages = 0;
193         /* We don't use multiplexed address mode */
194         reg_tim.s.ale = 0;
195         /* Not used */
196         reg_tim.s.page = 0;
197         /* Time after IORDY to coninue to assert the data */
198         reg_tim.s.wait = 0;
199         /* Time to wait to complete the cycle. */
200         reg_tim.s.pause = pause;
201         /* How long to hold after a write to de-assert CE. */
202         reg_tim.s.wr_hld = trh;
203         /* How long to wait after a read to de-assert CE. */
204         reg_tim.s.rd_hld = trh;
205         /* How long write enable is asserted */
206         reg_tim.s.we = t2;
207         /* How long read enable is asserted */
208         reg_tim.s.oe = t2;
209         /* Time after CE that read/write starts */
210         reg_tim.s.ce = ns_to_tim_reg(div, 5);
211         /* Time before CE that address is valid */
212         reg_tim.s.adr = 0;
213 
214         /* Program the bootbus region timing for the data port chip select. */
215         cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
216         if (cf_port->is_true_ide)
217                 /* True IDE mode, program both chip selects.  */
218                 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
219                                reg_tim.u64);
220 }
221 
222 static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
223 {
224         struct octeon_cf_port *cf_port = ap->private_data;
225         union cvmx_mio_boot_pin_defs pin_defs;
226         union cvmx_mio_boot_dma_timx dma_tim;
227         unsigned int oe_a;
228         unsigned int oe_n;
229         unsigned int dma_ackh;
230         unsigned int dma_arq;
231         unsigned int pause;
232         unsigned int T0, Tkr, Td;
233         unsigned int tim_mult;
234         int c;
235 
236         const struct ata_timing *timing;
237 
238         timing = ata_timing_find_mode(dev->dma_mode);
239         T0      = timing->cycle;
240         Td      = timing->active;
241         Tkr     = timing->recover;
242         dma_ackh = timing->dmack_hold;
243 
244         dma_tim.u64 = 0;
245         /* dma_tim.s.tim_mult = 0 --> 4x */
246         tim_mult = 4;
247 
248         /* not spec'ed, value in eclocks, not affected by tim_mult */
249         dma_arq = 8;
250         pause = 25 - dma_arq * 1000 /
251                 (octeon_get_io_clock_rate() / 1000000); /* Tz */
252 
253         oe_a = Td;
254         /* Tkr from cf spec, lengthened to meet T0 */
255         oe_n = max(T0 - oe_a, Tkr);
256 
257         pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
258 
259         /* DMA channel number. */
260         c = (cf_port->dma_base & 8) >> 3;
261 
262         /* Invert the polarity if the default is 0*/
263         dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
264 
265         dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
266         dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
267 
268         /*
269          * This is tI, C.F. spec. says 0, but Sony CF card requires
270          * more, we use 20 nS.
271          */
272         dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
273         dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
274 
275         dma_tim.s.dmarq = dma_arq;
276         dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
277 
278         dma_tim.s.rd_dly = 0;   /* Sample right on edge */
279 
280         /*  writes only */
281         dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
282         dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
283 
284         pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
285                  ns_to_tim_reg(tim_mult, 60));
286         pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
287                  dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
288                  dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
289 
290         cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
291 }
292 
293 /**
294  * Handle an 8 bit I/O request.
295  *
296  * @dev:        Device to access
297  * @buffer:     Data buffer
298  * @buflen:     Length of the buffer.
299  * @rw:         True to write.
300  */
301 static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
302                                          unsigned char *buffer,
303                                          unsigned int buflen,
304                                          int rw)
305 {
306         struct ata_port *ap             = dev->link->ap;
307         void __iomem *data_addr         = ap->ioaddr.data_addr;
308         unsigned long words;
309         int count;
310 
311         words = buflen;
312         if (rw) {
313                 count = 16;
314                 while (words--) {
315                         iowrite8(*buffer, data_addr);
316                         buffer++;
317                         /*
318                          * Every 16 writes do a read so the bootbus
319                          * FIFO doesn't fill up.
320                          */
321                         if (--count == 0) {
322                                 ioread8(ap->ioaddr.altstatus_addr);
323                                 count = 16;
324                         }
325                 }
326         } else {
327                 ioread8_rep(data_addr, buffer, words);
328         }
329         return buflen;
330 }
331 
332 /**
333  * Handle a 16 bit I/O request.
334  *
335  * @dev:        Device to access
336  * @buffer:     Data buffer
337  * @buflen:     Length of the buffer.
338  * @rw:         True to write.
339  */
340 static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
341                                           unsigned char *buffer,
342                                           unsigned int buflen,
343                                           int rw)
344 {
345         struct ata_port *ap             = dev->link->ap;
346         void __iomem *data_addr         = ap->ioaddr.data_addr;
347         unsigned long words;
348         int count;
349 
350         words = buflen / 2;
351         if (rw) {
352                 count = 16;
353                 while (words--) {
354                         iowrite16(*(uint16_t *)buffer, data_addr);
355                         buffer += sizeof(uint16_t);
356                         /*
357                          * Every 16 writes do a read so the bootbus
358                          * FIFO doesn't fill up.
359                          */
360                         if (--count == 0) {
361                                 ioread8(ap->ioaddr.altstatus_addr);
362                                 count = 16;
363                         }
364                 }
365         } else {
366                 while (words--) {
367                         *(uint16_t *)buffer = ioread16(data_addr);
368                         buffer += sizeof(uint16_t);
369                 }
370         }
371         /* Transfer trailing 1 byte, if any. */
372         if (unlikely(buflen & 0x01)) {
373                 __le16 align_buf[1] = { 0 };
374 
375                 if (rw == READ) {
376                         align_buf[0] = cpu_to_le16(ioread16(data_addr));
377                         memcpy(buffer, align_buf, 1);
378                 } else {
379                         memcpy(align_buf, buffer, 1);
380                         iowrite16(le16_to_cpu(align_buf[0]), data_addr);
381                 }
382                 words++;
383         }
384         return buflen;
385 }
386 
387 /**
388  * Read the taskfile for 16bit non-True IDE only.
389  */
390 static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
391 {
392         u16 blob;
393         /* The base of the registers is at ioaddr.data_addr. */
394         void __iomem *base = ap->ioaddr.data_addr;
395 
396         blob = __raw_readw(base + 0xc);
397         tf->feature = blob >> 8;
398 
399         blob = __raw_readw(base + 2);
400         tf->nsect = blob & 0xff;
401         tf->lbal = blob >> 8;
402 
403         blob = __raw_readw(base + 4);
404         tf->lbam = blob & 0xff;
405         tf->lbah = blob >> 8;
406 
407         blob = __raw_readw(base + 6);
408         tf->device = blob & 0xff;
409         tf->command = blob >> 8;
410 
411         if (tf->flags & ATA_TFLAG_LBA48) {
412                 if (likely(ap->ioaddr.ctl_addr)) {
413                         iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
414 
415                         blob = __raw_readw(base + 0xc);
416                         tf->hob_feature = blob >> 8;
417 
418                         blob = __raw_readw(base + 2);
419                         tf->hob_nsect = blob & 0xff;
420                         tf->hob_lbal = blob >> 8;
421 
422                         blob = __raw_readw(base + 4);
423                         tf->hob_lbam = blob & 0xff;
424                         tf->hob_lbah = blob >> 8;
425 
426                         iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
427                         ap->last_ctl = tf->ctl;
428                 } else {
429                         WARN_ON(1);
430                 }
431         }
432 }
433 
434 static u8 octeon_cf_check_status16(struct ata_port *ap)
435 {
436         u16 blob;
437         void __iomem *base = ap->ioaddr.data_addr;
438 
439         blob = __raw_readw(base + 6);
440         return blob >> 8;
441 }
442 
443 static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
444                                  unsigned long deadline)
445 {
446         struct ata_port *ap = link->ap;
447         void __iomem *base = ap->ioaddr.data_addr;
448         int rc;
449         u8 err;
450 
451         DPRINTK("about to softreset\n");
452         __raw_writew(ap->ctl, base + 0xe);
453         udelay(20);
454         __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
455         udelay(20);
456         __raw_writew(ap->ctl, base + 0xe);
457 
458         rc = ata_sff_wait_after_reset(link, 1, deadline);
459         if (rc) {
460                 ata_link_err(link, "SRST failed (errno=%d)\n", rc);
461                 return rc;
462         }
463 
464         /* determine by signature whether we have ATA or ATAPI devices */
465         classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
466         DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
467         return 0;
468 }
469 
470 /**
471  * Load the taskfile for 16bit non-True IDE only.  The device_addr is
472  * not loaded, we do this as part of octeon_cf_exec_command16.
473  */
474 static void octeon_cf_tf_load16(struct ata_port *ap,
475                                 const struct ata_taskfile *tf)
476 {
477         unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
478         /* The base of the registers is at ioaddr.data_addr. */
479         void __iomem *base = ap->ioaddr.data_addr;
480 
481         if (tf->ctl != ap->last_ctl) {
482                 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
483                 ap->last_ctl = tf->ctl;
484                 ata_wait_idle(ap);
485         }
486         if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
487                 __raw_writew(tf->hob_feature << 8, base + 0xc);
488                 __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
489                 __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
490                 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
491                         tf->hob_feature,
492                         tf->hob_nsect,
493                         tf->hob_lbal,
494                         tf->hob_lbam,
495                         tf->hob_lbah);
496         }
497         if (is_addr) {
498                 __raw_writew(tf->feature << 8, base + 0xc);
499                 __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
500                 __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
501                 VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
502                         tf->feature,
503                         tf->nsect,
504                         tf->lbal,
505                         tf->lbam,
506                         tf->lbah);
507         }
508         ata_wait_idle(ap);
509 }
510 
511 
512 static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
513 {
514 /*  There is only one device, do nothing. */
515         return;
516 }
517 
518 /*
519  * Issue ATA command to host controller.  The device_addr is also sent
520  * as it must be written in a combined write with the command.
521  */
522 static void octeon_cf_exec_command16(struct ata_port *ap,
523                                 const struct ata_taskfile *tf)
524 {
525         /* The base of the registers is at ioaddr.data_addr. */
526         void __iomem *base = ap->ioaddr.data_addr;
527         u16 blob;
528 
529         if (tf->flags & ATA_TFLAG_DEVICE) {
530                 VPRINTK("device 0x%X\n", tf->device);
531                 blob = tf->device;
532         } else {
533                 blob = 0;
534         }
535 
536         DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
537         blob |= (tf->command << 8);
538         __raw_writew(blob, base + 6);
539 
540 
541         ata_wait_idle(ap);
542 }
543 
544 static void octeon_cf_ata_port_noaction(struct ata_port *ap)
545 {
546 }
547 
548 static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
549 {
550         struct ata_port *ap = qc->ap;
551         struct octeon_cf_port *cf_port;
552 
553         cf_port = ap->private_data;
554         DPRINTK("ENTER\n");
555         /* issue r/w command */
556         qc->cursg = qc->sg;
557         cf_port->dma_finished = 0;
558         ap->ops->sff_exec_command(ap, &qc->tf);
559         DPRINTK("EXIT\n");
560 }
561 
562 /**
563  * Start a DMA transfer that was already setup
564  *
565  * @qc:     Information about the DMA
566  */
567 static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
568 {
569         struct octeon_cf_port *cf_port = qc->ap->private_data;
570         union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
571         union cvmx_mio_boot_dma_intx mio_boot_dma_int;
572         struct scatterlist *sg;
573 
574         VPRINTK("%d scatterlists\n", qc->n_elem);
575 
576         /* Get the scatter list entry we need to DMA into */
577         sg = qc->cursg;
578         BUG_ON(!sg);
579 
580         /*
581          * Clear the DMA complete status.
582          */
583         mio_boot_dma_int.u64 = 0;
584         mio_boot_dma_int.s.done = 1;
585         cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
586 
587         /* Enable the interrupt.  */
588         cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
589 
590         /* Set the direction of the DMA */
591         mio_boot_dma_cfg.u64 = 0;
592 #ifdef __LITTLE_ENDIAN
593         mio_boot_dma_cfg.s.endian = 1;
594 #endif
595         mio_boot_dma_cfg.s.en = 1;
596         mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
597 
598         /*
599          * Don't stop the DMA if the device deasserts DMARQ. Many
600          * compact flashes deassert DMARQ for a short time between
601          * sectors. Instead of stopping and restarting the DMA, we'll
602          * let the hardware do it. If the DMA is really stopped early
603          * due to an error condition, a later timeout will force us to
604          * stop.
605          */
606         mio_boot_dma_cfg.s.clr = 0;
607 
608         /* Size is specified in 16bit words and minus one notation */
609         mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
610 
611         /* We need to swap the high and low bytes of every 16 bits */
612         mio_boot_dma_cfg.s.swap8 = 1;
613 
614         mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
615 
616         VPRINTK("%s %d bytes address=%p\n",
617                 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
618                 (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
619 
620         cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
621 }
622 
623 /**
624  *
625  *      LOCKING:
626  *      spin_lock_irqsave(host lock)
627  *
628  */
629 static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
630                                         struct ata_queued_cmd *qc)
631 {
632         struct ata_eh_info *ehi = &ap->link.eh_info;
633         struct octeon_cf_port *cf_port = ap->private_data;
634         union cvmx_mio_boot_dma_cfgx dma_cfg;
635         union cvmx_mio_boot_dma_intx dma_int;
636         u8 status;
637 
638         VPRINTK("ata%u: protocol %d task_state %d\n",
639                 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
640 
641 
642         if (ap->hsm_task_state != HSM_ST_LAST)
643                 return 0;
644 
645         dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
646         if (dma_cfg.s.size != 0xfffff) {
647                 /* Error, the transfer was not complete.  */
648                 qc->err_mask |= AC_ERR_HOST_BUS;
649                 ap->hsm_task_state = HSM_ST_ERR;
650         }
651 
652         /* Stop and clear the dma engine.  */
653         dma_cfg.u64 = 0;
654         dma_cfg.s.size = -1;
655         cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
656 
657         /* Disable the interrupt.  */
658         dma_int.u64 = 0;
659         cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
660 
661         /* Clear the DMA complete status */
662         dma_int.s.done = 1;
663         cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
664 
665         status = ap->ops->sff_check_status(ap);
666 
667         ata_sff_hsm_move(ap, qc, status, 0);
668 
669         if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
670                 ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
671 
672         return 1;
673 }
674 
675 /*
676  * Check if any queued commands have more DMAs, if so start the next
677  * transfer, else do end of transfer handling.
678  */
679 static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
680 {
681         struct ata_host *host = dev_instance;
682         struct octeon_cf_port *cf_port;
683         int i;
684         unsigned int handled = 0;
685         unsigned long flags;
686 
687         spin_lock_irqsave(&host->lock, flags);
688 
689         DPRINTK("ENTER\n");
690         for (i = 0; i < host->n_ports; i++) {
691                 u8 status;
692                 struct ata_port *ap;
693                 struct ata_queued_cmd *qc;
694                 union cvmx_mio_boot_dma_intx dma_int;
695                 union cvmx_mio_boot_dma_cfgx dma_cfg;
696 
697                 ap = host->ports[i];
698                 cf_port = ap->private_data;
699 
700                 dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
701                 dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
702 
703                 qc = ata_qc_from_tag(ap, ap->link.active_tag);
704 
705                 if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
706                         continue;
707 
708                 if (dma_int.s.done && !dma_cfg.s.en) {
709                         if (!sg_is_last(qc->cursg)) {
710                                 qc->cursg = sg_next(qc->cursg);
711                                 handled = 1;
712                                 octeon_cf_dma_start(qc);
713                                 continue;
714                         } else {
715                                 cf_port->dma_finished = 1;
716                         }
717                 }
718                 if (!cf_port->dma_finished)
719                         continue;
720                 status = ioread8(ap->ioaddr.altstatus_addr);
721                 if (status & (ATA_BUSY | ATA_DRQ)) {
722                         /*
723                          * We are busy, try to handle it later.  This
724                          * is the DMA finished interrupt, and it could
725                          * take a little while for the card to be
726                          * ready for more commands.
727                          */
728                         /* Clear DMA irq. */
729                         dma_int.u64 = 0;
730                         dma_int.s.done = 1;
731                         cvmx_write_csr(cf_port->dma_base + DMA_INT,
732                                        dma_int.u64);
733                         hrtimer_start_range_ns(&cf_port->delayed_finish,
734                                                ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
735                                                OCTEON_CF_BUSY_POLL_INTERVAL / 5,
736                                                HRTIMER_MODE_REL);
737                         handled = 1;
738                 } else {
739                         handled |= octeon_cf_dma_finished(ap, qc);
740                 }
741         }
742         spin_unlock_irqrestore(&host->lock, flags);
743         DPRINTK("EXIT\n");
744         return IRQ_RETVAL(handled);
745 }
746 
747 static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
748 {
749         struct octeon_cf_port *cf_port = container_of(hrt,
750                                                       struct octeon_cf_port,
751                                                       delayed_finish);
752         struct ata_port *ap = cf_port->ap;
753         struct ata_host *host = ap->host;
754         struct ata_queued_cmd *qc;
755         unsigned long flags;
756         u8 status;
757         enum hrtimer_restart rv = HRTIMER_NORESTART;
758 
759         spin_lock_irqsave(&host->lock, flags);
760 
761         /*
762          * If the port is not waiting for completion, it must have
763          * handled it previously.  The hsm_task_state is
764          * protected by host->lock.
765          */
766         if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
767                 goto out;
768 
769         status = ioread8(ap->ioaddr.altstatus_addr);
770         if (status & (ATA_BUSY | ATA_DRQ)) {
771                 /* Still busy, try again. */
772                 hrtimer_forward_now(hrt,
773                                     ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
774                 rv = HRTIMER_RESTART;
775                 goto out;
776         }
777         qc = ata_qc_from_tag(ap, ap->link.active_tag);
778         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
779                 octeon_cf_dma_finished(ap, qc);
780 out:
781         spin_unlock_irqrestore(&host->lock, flags);
782         return rv;
783 }
784 
785 static void octeon_cf_dev_config(struct ata_device *dev)
786 {
787         /*
788          * A maximum of 2^20 - 1 16 bit transfers are possible with
789          * the bootbus DMA.  So we need to throttle max_sectors to
790          * (2^12 - 1 == 4095) to assure that this can never happen.
791          */
792         dev->max_sectors = min(dev->max_sectors, 4095U);
793 }
794 
795 /*
796  * We don't do ATAPI DMA so return 0.
797  */
798 static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
799 {
800         return 0;
801 }
802 
803 static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
804 {
805         struct ata_port *ap = qc->ap;
806 
807         switch (qc->tf.protocol) {
808         case ATA_PROT_DMA:
809                 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
810 
811                 ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
812                 octeon_cf_dma_setup(qc);            /* set up dma */
813                 octeon_cf_dma_start(qc);            /* initiate dma */
814                 ap->hsm_task_state = HSM_ST_LAST;
815                 break;
816 
817         case ATAPI_PROT_DMA:
818                 dev_err(ap->dev, "Error, ATAPI not supported\n");
819                 BUG();
820 
821         default:
822                 return ata_sff_qc_issue(qc);
823         }
824 
825         return 0;
826 }
827 
828 static struct ata_port_operations octeon_cf_ops = {
829         .inherits               = &ata_sff_port_ops,
830         .check_atapi_dma        = octeon_cf_check_atapi_dma,
831         .qc_prep                = ata_noop_qc_prep,
832         .qc_issue               = octeon_cf_qc_issue,
833         .sff_dev_select         = octeon_cf_dev_select,
834         .sff_irq_on             = octeon_cf_ata_port_noaction,
835         .sff_irq_clear          = octeon_cf_ata_port_noaction,
836         .cable_detect           = ata_cable_40wire,
837         .set_piomode            = octeon_cf_set_piomode,
838         .set_dmamode            = octeon_cf_set_dmamode,
839         .dev_config             = octeon_cf_dev_config,
840 };
841 
842 static int octeon_cf_probe(struct platform_device *pdev)
843 {
844         struct resource *res_cs0, *res_cs1;
845 
846         bool is_16bit;
847         const __be32 *cs_num;
848         struct property *reg_prop;
849         int n_addr, n_size, reg_len;
850         struct device_node *node;
851         const void *prop;
852         void __iomem *cs0;
853         void __iomem *cs1 = NULL;
854         struct ata_host *host;
855         struct ata_port *ap;
856         int irq = 0;
857         irq_handler_t irq_handler = NULL;
858         void __iomem *base;
859         struct octeon_cf_port *cf_port;
860         int rv = -ENOMEM;
861 
862 
863         node = pdev->dev.of_node;
864         if (node == NULL)
865                 return -EINVAL;
866 
867         cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL);
868         if (!cf_port)
869                 return -ENOMEM;
870 
871         cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
872 
873         prop = of_get_property(node, "cavium,bus-width", NULL);
874         if (prop)
875                 is_16bit = (be32_to_cpup(prop) == 16);
876         else
877                 is_16bit = false;
878 
879         n_addr = of_n_addr_cells(node);
880         n_size = of_n_size_cells(node);
881 
882         reg_prop = of_find_property(node, "reg", &reg_len);
883         if (!reg_prop || reg_len < sizeof(__be32))
884                 return -EINVAL;
885 
886         cs_num = reg_prop->value;
887         cf_port->cs0 = be32_to_cpup(cs_num);
888 
889         if (cf_port->is_true_ide) {
890                 struct device_node *dma_node;
891                 dma_node = of_parse_phandle(node,
892                                             "cavium,dma-engine-handle", 0);
893                 if (dma_node) {
894                         struct platform_device *dma_dev;
895                         dma_dev = of_find_device_by_node(dma_node);
896                         if (dma_dev) {
897                                 struct resource *res_dma;
898                                 int i;
899                                 res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
900                                 if (!res_dma) {
901                                         of_node_put(dma_node);
902                                         return -EINVAL;
903                                 }
904                                 cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
905                                                                          resource_size(res_dma));
906                                 if (!cf_port->dma_base) {
907                                         of_node_put(dma_node);
908                                         return -EINVAL;
909                                 }
910 
911                                 irq_handler = octeon_cf_interrupt;
912                                 i = platform_get_irq(dma_dev, 0);
913                                 if (i > 0)
914                                         irq = i;
915                         }
916                         of_node_put(dma_node);
917                 }
918                 res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
919                 if (!res_cs1)
920                         return -EINVAL;
921 
922                 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
923                                            resource_size(res_cs1));
924                 if (!cs1)
925                         return rv;
926 
927                 if (reg_len < (n_addr + n_size + 1) * sizeof(__be32))
928                         return -EINVAL;
929 
930                 cs_num += n_addr + n_size;
931                 cf_port->cs1 = be32_to_cpup(cs_num);
932         }
933 
934         res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
935         if (!res_cs0)
936                 return -EINVAL;
937 
938         cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
939                                    resource_size(res_cs0));
940         if (!cs0)
941                 return rv;
942 
943         /* allocate host */
944         host = ata_host_alloc(&pdev->dev, 1);
945         if (!host)
946                 return rv;
947 
948         ap = host->ports[0];
949         ap->private_data = cf_port;
950         pdev->dev.platform_data = cf_port;
951         cf_port->ap = ap;
952         ap->ops = &octeon_cf_ops;
953         ap->pio_mask = ATA_PIO6;
954         ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
955 
956         if (!is_16bit) {
957                 base = cs0 + 0x800;
958                 ap->ioaddr.cmd_addr     = base;
959                 ata_sff_std_ports(&ap->ioaddr);
960 
961                 ap->ioaddr.altstatus_addr = base + 0xe;
962                 ap->ioaddr.ctl_addr     = base + 0xe;
963                 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
964         } else if (cf_port->is_true_ide) {
965                 base = cs0;
966                 ap->ioaddr.cmd_addr     = base + (ATA_REG_CMD << 1) + 1;
967                 ap->ioaddr.data_addr    = base + (ATA_REG_DATA << 1);
968                 ap->ioaddr.error_addr   = base + (ATA_REG_ERR << 1) + 1;
969                 ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
970                 ap->ioaddr.nsect_addr   = base + (ATA_REG_NSECT << 1) + 1;
971                 ap->ioaddr.lbal_addr    = base + (ATA_REG_LBAL << 1) + 1;
972                 ap->ioaddr.lbam_addr    = base + (ATA_REG_LBAM << 1) + 1;
973                 ap->ioaddr.lbah_addr    = base + (ATA_REG_LBAH << 1) + 1;
974                 ap->ioaddr.device_addr  = base + (ATA_REG_DEVICE << 1) + 1;
975                 ap->ioaddr.status_addr  = base + (ATA_REG_STATUS << 1) + 1;
976                 ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
977                 ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
978                 ap->ioaddr.ctl_addr     = cs1 + (6 << 1) + 1;
979                 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
980 
981                 ap->mwdma_mask  = enable_dma ? ATA_MWDMA4 : 0;
982 
983                 /* True IDE mode needs a timer to poll for not-busy.  */
984                 hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
985                              HRTIMER_MODE_REL);
986                 cf_port->delayed_finish.function = octeon_cf_delayed_finish;
987         } else {
988                 /* 16 bit but not True IDE */
989                 base = cs0 + 0x800;
990                 octeon_cf_ops.sff_data_xfer     = octeon_cf_data_xfer16;
991                 octeon_cf_ops.softreset         = octeon_cf_softreset16;
992                 octeon_cf_ops.sff_check_status  = octeon_cf_check_status16;
993                 octeon_cf_ops.sff_tf_read       = octeon_cf_tf_read16;
994                 octeon_cf_ops.sff_tf_load       = octeon_cf_tf_load16;
995                 octeon_cf_ops.sff_exec_command  = octeon_cf_exec_command16;
996 
997                 ap->ioaddr.data_addr    = base + ATA_REG_DATA;
998                 ap->ioaddr.nsect_addr   = base + ATA_REG_NSECT;
999                 ap->ioaddr.lbal_addr    = base + ATA_REG_LBAL;
1000                 ap->ioaddr.ctl_addr     = base + 0xe;
1001                 ap->ioaddr.altstatus_addr = base + 0xe;
1002         }
1003         cf_port->c0 = ap->ioaddr.ctl_addr;
1004 
1005         rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1006         if (rv)
1007                 return rv;
1008 
1009         ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
1010 
1011         dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
1012                  is_16bit ? 16 : 8,
1013                  cf_port->is_true_ide ? ", True IDE" : "");
1014 
1015         return ata_host_activate(host, irq, irq_handler,
1016                                  IRQF_SHARED, &octeon_cf_sht);
1017 }
1018 
1019 static void octeon_cf_shutdown(struct device *dev)
1020 {
1021         union cvmx_mio_boot_dma_cfgx dma_cfg;
1022         union cvmx_mio_boot_dma_intx dma_int;
1023 
1024         struct octeon_cf_port *cf_port = dev_get_platdata(dev);
1025 
1026         if (cf_port->dma_base) {
1027                 /* Stop and clear the dma engine.  */
1028                 dma_cfg.u64 = 0;
1029                 dma_cfg.s.size = -1;
1030                 cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
1031 
1032                 /* Disable the interrupt.  */
1033                 dma_int.u64 = 0;
1034                 cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
1035 
1036                 /* Clear the DMA complete status */
1037                 dma_int.s.done = 1;
1038                 cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
1039 
1040                 __raw_writeb(0, cf_port->c0);
1041                 udelay(20);
1042                 __raw_writeb(ATA_SRST, cf_port->c0);
1043                 udelay(20);
1044                 __raw_writeb(0, cf_port->c0);
1045                 mdelay(100);
1046         }
1047 }
1048 
1049 static struct of_device_id octeon_cf_match[] = {
1050         {
1051                 .compatible = "cavium,ebt3000-compact-flash",
1052         },
1053         {},
1054 };
1055 MODULE_DEVICE_TABLE(of, octeon_cf_match);
1056 
1057 static struct platform_driver octeon_cf_driver = {
1058         .probe          = octeon_cf_probe,
1059         .driver         = {
1060                 .name   = DRV_NAME,
1061                 .of_match_table = octeon_cf_match,
1062                 .shutdown = octeon_cf_shutdown
1063         },
1064 };
1065 
1066 static int __init octeon_cf_init(void)
1067 {
1068         return platform_driver_register(&octeon_cf_driver);
1069 }
1070 
1071 
1072 MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
1073 MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
1074 MODULE_LICENSE("GPL");
1075 MODULE_VERSION(DRV_VERSION);
1076 MODULE_ALIAS("platform:" DRV_NAME);
1077 
1078 module_init(octeon_cf_init);
1079 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us