Version:  2.0.40 2.2.26 2.4.37 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19

Linux/drivers/ata/sata_dwc_460ex.c

  1 /*
  2  * drivers/ata/sata_dwc_460ex.c
  3  *
  4  * Synopsys DesignWare Cores (DWC) SATA host driver
  5  *
  6  * Author: Mark Miesfeld <mmiesfeld@amcc.com>
  7  *
  8  * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
  9  * Copyright 2008 DENX Software Engineering
 10  *
 11  * Based on versions provided by AMCC and Synopsys which are:
 12  *          Copyright 2006 Applied Micro Circuits Corporation
 13  *          COPYRIGHT (C) 2005  SYNOPSYS, INC.  ALL RIGHTS RESERVED
 14  *
 15  * This program is free software; you can redistribute  it and/or modify it
 16  * under  the terms of  the GNU General  Public License as published by the
 17  * Free Software Foundation;  either version 2 of the  License, or (at your
 18  * option) any later version.
 19  */
 20 
 21 #ifdef CONFIG_SATA_DWC_DEBUG
 22 #define DEBUG
 23 #endif
 24 
 25 #ifdef CONFIG_SATA_DWC_VDEBUG
 26 #define VERBOSE_DEBUG
 27 #define DEBUG_NCQ
 28 #endif
 29 
 30 #include <linux/kernel.h>
 31 #include <linux/module.h>
 32 #include <linux/device.h>
 33 #include <linux/of_address.h>
 34 #include <linux/of_irq.h>
 35 #include <linux/of_platform.h>
 36 #include <linux/platform_device.h>
 37 #include <linux/libata.h>
 38 #include <linux/slab.h>
 39 #include "libata.h"
 40 
 41 #include <scsi/scsi_host.h>
 42 #include <scsi/scsi_cmnd.h>
 43 
 44 /* These two are defined in "libata.h" */
 45 #undef  DRV_NAME
 46 #undef  DRV_VERSION
 47 
 48 #define DRV_NAME        "sata-dwc"
 49 #define DRV_VERSION     "1.3"
 50 
 51 /* SATA DMA driver Globals */
 52 #define DMA_NUM_CHANS           1
 53 #define DMA_NUM_CHAN_REGS       8
 54 
 55 /* SATA DMA Register definitions */
 56 #define AHB_DMA_BRST_DFLT       64      /* 16 data items burst length*/
 57 
 58 struct dmareg {
 59         u32 low;                /* Low bits 0-31 */
 60         u32 high;               /* High bits 32-63 */
 61 };
 62 
 63 /* DMA Per Channel registers */
 64 struct dma_chan_regs {
 65         struct dmareg sar;      /* Source Address */
 66         struct dmareg dar;      /* Destination address */
 67         struct dmareg llp;      /* Linked List Pointer */
 68         struct dmareg ctl;      /* Control */
 69         struct dmareg sstat;    /* Source Status not implemented in core */
 70         struct dmareg dstat;    /* Destination Status not implemented in core*/
 71         struct dmareg sstatar;  /* Source Status Address not impl in core */
 72         struct dmareg dstatar;  /* Destination Status Address not implemente */
 73         struct dmareg cfg;      /* Config */
 74         struct dmareg sgr;      /* Source Gather */
 75         struct dmareg dsr;      /* Destination Scatter */
 76 };
 77 
 78 /* Generic Interrupt Registers */
 79 struct dma_interrupt_regs {
 80         struct dmareg tfr;      /* Transfer Interrupt */
 81         struct dmareg block;    /* Block Interrupt */
 82         struct dmareg srctran;  /* Source Transfer Interrupt */
 83         struct dmareg dsttran;  /* Dest Transfer Interrupt */
 84         struct dmareg error;    /* Error */
 85 };
 86 
 87 struct ahb_dma_regs {
 88         struct dma_chan_regs    chan_regs[DMA_NUM_CHAN_REGS];
 89         struct dma_interrupt_regs interrupt_raw;        /* Raw Interrupt */
 90         struct dma_interrupt_regs interrupt_status;     /* Interrupt Status */
 91         struct dma_interrupt_regs interrupt_mask;       /* Interrupt Mask */
 92         struct dma_interrupt_regs interrupt_clear;      /* Interrupt Clear */
 93         struct dmareg           statusInt;      /* Interrupt combined*/
 94         struct dmareg           rq_srcreg;      /* Src Trans Req */
 95         struct dmareg           rq_dstreg;      /* Dst Trans Req */
 96         struct dmareg           rq_sgl_srcreg;  /* Sngl Src Trans Req*/
 97         struct dmareg           rq_sgl_dstreg;  /* Sngl Dst Trans Req*/
 98         struct dmareg           rq_lst_srcreg;  /* Last Src Trans Req*/
 99         struct dmareg           rq_lst_dstreg;  /* Last Dst Trans Req*/
100         struct dmareg           dma_cfg;                /* DMA Config */
101         struct dmareg           dma_chan_en;            /* DMA Channel Enable*/
102         struct dmareg           dma_id;                 /* DMA ID */
103         struct dmareg           dma_test;               /* DMA Test */
104         struct dmareg           res1;                   /* reserved */
105         struct dmareg           res2;                   /* reserved */
106         /*
107          * DMA Comp Params
108          * Param 6 = dma_param[0], Param 5 = dma_param[1],
109          * Param 4 = dma_param[2] ...
110          */
111         struct dmareg           dma_params[6];
112 };
113 
114 /* Data structure for linked list item */
115 struct lli {
116         u32             sar;            /* Source Address */
117         u32             dar;            /* Destination address */
118         u32             llp;            /* Linked List Pointer */
119         struct dmareg   ctl;            /* Control */
120         struct dmareg   dstat;          /* Destination Status */
121 };
122 
123 enum {
124         SATA_DWC_DMAC_LLI_SZ =  (sizeof(struct lli)),
125         SATA_DWC_DMAC_LLI_NUM = 256,
126         SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \
127                                         SATA_DWC_DMAC_LLI_NUM),
128         SATA_DWC_DMAC_TWIDTH_BYTES = 4,
129         SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \
130                                                 SATA_DWC_DMAC_TWIDTH_BYTES),
131 };
132 
133 /* DMA Register Operation Bits */
134 enum {
135         DMA_EN  =               0x00000001, /* Enable AHB DMA */
136         DMA_CTL_LLP_SRCEN =     0x10000000, /* Blk chain enable Src */
137         DMA_CTL_LLP_DSTEN =     0x08000000, /* Blk chain enable Dst */
138 };
139 
140 #define DMA_CTL_BLK_TS(size)    ((size) & 0x000000FFF)  /* Blk Transfer size */
141 #define DMA_CHANNEL(ch)         (0x00000001 << (ch))    /* Select channel */
142         /* Enable channel */
143 #define DMA_ENABLE_CHAN(ch)     ((0x00000001 << (ch)) |                 \
144                                  ((0x000000001 << (ch)) << 8))
145         /* Disable channel */
146 #define DMA_DISABLE_CHAN(ch)    (0x00000000 | ((0x000000001 << (ch)) << 8))
147         /* Transfer Type & Flow Controller */
148 #define DMA_CTL_TTFC(type)      (((type) & 0x7) << 20)
149 #define DMA_CTL_SMS(num)        (((num) & 0x3) << 25) /* Src Master Select */
150 #define DMA_CTL_DMS(num)        (((num) & 0x3) << 23)/* Dst Master Select */
151         /* Src Burst Transaction Length */
152 #define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
153         /* Dst Burst Transaction Length */
154 #define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
155         /* Source Transfer Width */
156 #define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
157         /* Destination Transfer Width */
158 #define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
159 
160 /* Assign HW handshaking interface (x) to destination / source peripheral */
161 #define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
162 #define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
163 #define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
164 #define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
165 
166 /*
167  * This define is used to set block chaining disabled in the control low
168  * register.  It is already in little endian format so it can be &'d dirctly.
169  * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
170  */
171 enum {
172         DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7,
173         DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
174         DMA_CTL_TTFC_M2P_PER =  0x00000003, /* Mem to per, peripheral cntr */
175         DMA_CTL_SINC_INC =      0x00000000, /* Source Address Increment */
176         DMA_CTL_SINC_DEC =      0x00000200,
177         DMA_CTL_SINC_NOCHANGE = 0x00000400,
178         DMA_CTL_DINC_INC =      0x00000000, /* Destination Address Increment */
179         DMA_CTL_DINC_DEC =      0x00000080,
180         DMA_CTL_DINC_NOCHANGE = 0x00000100,
181         DMA_CTL_INT_EN =        0x00000001, /* Interrupt Enable */
182 
183 /* Channel Configuration Register high bits */
184         DMA_CFG_FCMOD_REQ =     0x00000001, /* Flow Control - request based */
185         DMA_CFG_PROTCTL =       (0x00000003 << 2),/* Protection Control */
186 
187 /* Channel Configuration Register low bits */
188         DMA_CFG_RELD_DST =      0x80000000, /* Reload Dest / Src Addr */
189         DMA_CFG_RELD_SRC =      0x40000000,
190         DMA_CFG_HS_SELSRC =     0x00000800, /* Software handshake Src/ Dest */
191         DMA_CFG_HS_SELDST =     0x00000400,
192         DMA_CFG_FIFOEMPTY =     (0x00000001 << 9), /* FIFO Empty bit */
193 
194 /* Channel Linked List Pointer Register */
195         DMA_LLP_AHBMASTER1 =    0,      /* List Master Select */
196         DMA_LLP_AHBMASTER2 =    1,
197 
198         SATA_DWC_MAX_PORTS = 1,
199 
200         SATA_DWC_SCR_OFFSET = 0x24,
201         SATA_DWC_REG_OFFSET = 0x64,
202 };
203 
204 /* DWC SATA Registers */
205 struct sata_dwc_regs {
206         u32 fptagr;             /* 1st party DMA tag */
207         u32 fpbor;              /* 1st party DMA buffer offset */
208         u32 fptcr;              /* 1st party DMA Xfr count */
209         u32 dmacr;              /* DMA Control */
210         u32 dbtsr;              /* DMA Burst Transac size */
211         u32 intpr;              /* Interrupt Pending */
212         u32 intmr;              /* Interrupt Mask */
213         u32 errmr;              /* Error Mask */
214         u32 llcr;               /* Link Layer Control */
215         u32 phycr;              /* PHY Control */
216         u32 physr;              /* PHY Status */
217         u32 rxbistpd;           /* Recvd BIST pattern def register */
218         u32 rxbistpd1;          /* Recvd BIST data dword1 */
219         u32 rxbistpd2;          /* Recvd BIST pattern data dword2 */
220         u32 txbistpd;           /* Trans BIST pattern def register */
221         u32 txbistpd1;          /* Trans BIST data dword1 */
222         u32 txbistpd2;          /* Trans BIST data dword2 */
223         u32 bistcr;             /* BIST Control Register */
224         u32 bistfctr;           /* BIST FIS Count Register */
225         u32 bistsr;             /* BIST Status Register */
226         u32 bistdecr;           /* BIST Dword Error count register */
227         u32 res[15];            /* Reserved locations */
228         u32 testr;              /* Test Register */
229         u32 versionr;           /* Version Register */
230         u32 idr;                /* ID Register */
231         u32 unimpl[192];        /* Unimplemented */
232         u32 dmadr[256]; /* FIFO Locations in DMA Mode */
233 };
234 
235 enum {
236         SCR_SCONTROL_DET_ENABLE =       0x00000001,
237         SCR_SSTATUS_DET_PRESENT =       0x00000001,
238         SCR_SERROR_DIAG_X       =       0x04000000,
239 /* DWC SATA Register Operations */
240         SATA_DWC_TXFIFO_DEPTH   =       0x01FF,
241         SATA_DWC_RXFIFO_DEPTH   =       0x01FF,
242         SATA_DWC_DMACR_TMOD_TXCHEN =    0x00000004,
243         SATA_DWC_DMACR_TXCHEN   = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
244         SATA_DWC_DMACR_RXCHEN   = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
245         SATA_DWC_DMACR_TXRXCH_CLEAR =   SATA_DWC_DMACR_TMOD_TXCHEN,
246         SATA_DWC_INTPR_DMAT     =       0x00000001,
247         SATA_DWC_INTPR_NEWFP    =       0x00000002,
248         SATA_DWC_INTPR_PMABRT   =       0x00000004,
249         SATA_DWC_INTPR_ERR      =       0x00000008,
250         SATA_DWC_INTPR_NEWBIST  =       0x00000010,
251         SATA_DWC_INTPR_IPF      =       0x10000000,
252         SATA_DWC_INTMR_DMATM    =       0x00000001,
253         SATA_DWC_INTMR_NEWFPM   =       0x00000002,
254         SATA_DWC_INTMR_PMABRTM  =       0x00000004,
255         SATA_DWC_INTMR_ERRM     =       0x00000008,
256         SATA_DWC_INTMR_NEWBISTM =       0x00000010,
257         SATA_DWC_LLCR_SCRAMEN   =       0x00000001,
258         SATA_DWC_LLCR_DESCRAMEN =       0x00000002,
259         SATA_DWC_LLCR_RPDEN     =       0x00000004,
260 /* This is all error bits, zero's are reserved fields. */
261         SATA_DWC_SERROR_ERR_BITS =      0x0FFF0F03
262 };
263 
264 #define SATA_DWC_SCR0_SPD_GET(v)        (((v) >> 4) & 0x0000000F)
265 #define SATA_DWC_DMACR_TX_CLEAR(v)      (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
266                                                  SATA_DWC_DMACR_TMOD_TXCHEN)
267 #define SATA_DWC_DMACR_RX_CLEAR(v)      (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
268                                                  SATA_DWC_DMACR_TMOD_TXCHEN)
269 #define SATA_DWC_DBTSR_MWR(size)        (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
270 #define SATA_DWC_DBTSR_MRD(size)        ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
271                                                  << 16)
272 struct sata_dwc_device {
273         struct device           *dev;           /* generic device struct */
274         struct ata_probe_ent    *pe;            /* ptr to probe-ent */
275         struct ata_host         *host;
276         u8                      *reg_base;
277         struct sata_dwc_regs    *sata_dwc_regs; /* DW Synopsys SATA specific */
278         int                     irq_dma;
279 };
280 
281 #define SATA_DWC_QCMD_MAX       32
282 
283 struct sata_dwc_device_port {
284         struct sata_dwc_device  *hsdev;
285         int                     cmd_issued[SATA_DWC_QCMD_MAX];
286         struct lli              *llit[SATA_DWC_QCMD_MAX];  /* DMA LLI table */
287         dma_addr_t              llit_dma[SATA_DWC_QCMD_MAX];
288         u32                     dma_chan[SATA_DWC_QCMD_MAX];
289         int                     dma_pending[SATA_DWC_QCMD_MAX];
290 };
291 
292 /*
293  * Commonly used DWC SATA driver Macros
294  */
295 #define HSDEV_FROM_HOST(host)  ((struct sata_dwc_device *)\
296                                         (host)->private_data)
297 #define HSDEV_FROM_AP(ap)  ((struct sata_dwc_device *)\
298                                         (ap)->host->private_data)
299 #define HSDEVP_FROM_AP(ap)   ((struct sata_dwc_device_port *)\
300                                         (ap)->private_data)
301 #define HSDEV_FROM_QC(qc)       ((struct sata_dwc_device *)\
302                                         (qc)->ap->host->private_data)
303 #define HSDEV_FROM_HSDEVP(p)    ((struct sata_dwc_device *)\
304                                                 (hsdevp)->hsdev)
305 
306 enum {
307         SATA_DWC_CMD_ISSUED_NOT         = 0,
308         SATA_DWC_CMD_ISSUED_PEND        = 1,
309         SATA_DWC_CMD_ISSUED_EXEC        = 2,
310         SATA_DWC_CMD_ISSUED_NODATA      = 3,
311 
312         SATA_DWC_DMA_PENDING_NONE       = 0,
313         SATA_DWC_DMA_PENDING_TX         = 1,
314         SATA_DWC_DMA_PENDING_RX         = 2,
315 };
316 
317 struct sata_dwc_host_priv {
318         void    __iomem  *scr_addr_sstatus;
319         u32     sata_dwc_sactive_issued ;
320         u32     sata_dwc_sactive_queued ;
321         u32     dma_interrupt_count;
322         struct  ahb_dma_regs    *sata_dma_regs;
323         struct  device  *dwc_dev;
324         int     dma_channel;
325 };
326 struct sata_dwc_host_priv host_pvt;
327 /*
328  * Prototypes
329  */
330 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
331 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
332                                 u32 check_status);
333 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
334 static void sata_dwc_port_stop(struct ata_port *ap);
335 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
336 static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
337 static void dma_dwc_exit(struct sata_dwc_device *hsdev);
338 static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
339                               struct lli *lli, dma_addr_t dma_lli,
340                               void __iomem *addr, int dir);
341 static void dma_dwc_xfer_start(int dma_ch);
342 
343 static const char *get_prot_descript(u8 protocol)
344 {
345         switch ((enum ata_tf_protocols)protocol) {
346         case ATA_PROT_NODATA:
347                 return "ATA no data";
348         case ATA_PROT_PIO:
349                 return "ATA PIO";
350         case ATA_PROT_DMA:
351                 return "ATA DMA";
352         case ATA_PROT_NCQ:
353                 return "ATA NCQ";
354         case ATAPI_PROT_NODATA:
355                 return "ATAPI no data";
356         case ATAPI_PROT_PIO:
357                 return "ATAPI PIO";
358         case ATAPI_PROT_DMA:
359                 return "ATAPI DMA";
360         default:
361                 return "unknown";
362         }
363 }
364 
365 static const char *get_dma_dir_descript(int dma_dir)
366 {
367         switch ((enum dma_data_direction)dma_dir) {
368         case DMA_BIDIRECTIONAL:
369                 return "bidirectional";
370         case DMA_TO_DEVICE:
371                 return "to device";
372         case DMA_FROM_DEVICE:
373                 return "from device";
374         default:
375                 return "none";
376         }
377 }
378 
379 static void sata_dwc_tf_dump(struct ata_taskfile *tf)
380 {
381         dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
382                 "0x%lx device: %x\n", tf->command,
383                 get_prot_descript(tf->protocol), tf->flags, tf->device);
384         dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
385                 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
386                  tf->lbam, tf->lbah);
387         dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x "
388                 "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
389                 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
390                 tf->hob_lbah);
391 }
392 
393 /*
394  * Function: get_burst_length_encode
395  * arguments: datalength: length in bytes of data
396  * returns value to be programmed in register corresponding to data length
397  * This value is effectively the log(base 2) of the length
398  */
399 static  int get_burst_length_encode(int datalength)
400 {
401         int items = datalength >> 2;    /* div by 4 to get lword count */
402 
403         if (items >= 64)
404                 return 5;
405 
406         if (items >= 32)
407                 return 4;
408 
409         if (items >= 16)
410                 return 3;
411 
412         if (items >= 8)
413                 return 2;
414 
415         if (items >= 4)
416                 return 1;
417 
418         return 0;
419 }
420 
421 static  void clear_chan_interrupts(int c)
422 {
423         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
424                  DMA_CHANNEL(c));
425         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
426                  DMA_CHANNEL(c));
427         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
428                  DMA_CHANNEL(c));
429         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
430                  DMA_CHANNEL(c));
431         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
432                  DMA_CHANNEL(c));
433 }
434 
435 /*
436  * Function: dma_request_channel
437  * arguments: None
438  * returns channel number if available else -1
439  * This function assigns the next available DMA channel from the list to the
440  * requester
441  */
442 static int dma_request_channel(void)
443 {
444         /* Check if the channel is not currently in use */
445         if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
446                 DMA_CHANNEL(host_pvt.dma_channel)))
447                 return host_pvt.dma_channel;
448         dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
449                 __func__, host_pvt.dma_channel);
450         return -1;
451 }
452 
453 /*
454  * Function: dma_dwc_interrupt
455  * arguments: irq, dev_id, pt_regs
456  * returns channel number if available else -1
457  * Interrupt Handler for DW AHB SATA DMA
458  */
459 static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
460 {
461         int chan;
462         u32 tfr_reg, err_reg;
463         unsigned long flags;
464         struct sata_dwc_device *hsdev = hsdev_instance;
465         struct ata_host *host = (struct ata_host *)hsdev->host;
466         struct ata_port *ap;
467         struct sata_dwc_device_port *hsdevp;
468         u8 tag = 0;
469         unsigned int port = 0;
470 
471         spin_lock_irqsave(&host->lock, flags);
472         ap = host->ports[port];
473         hsdevp = HSDEVP_FROM_AP(ap);
474         tag = ap->link.active_tag;
475 
476         tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
477                         .low));
478         err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
479                         .low));
480 
481         dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
482                 tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
483 
484         chan = host_pvt.dma_channel;
485         if (chan >= 0) {
486                 /* Check for end-of-transfer interrupt. */
487                 if (tfr_reg & DMA_CHANNEL(chan)) {
488                         /*
489                          * Each DMA command produces 2 interrupts.  Only
490                          * complete the command after both interrupts have been
491                          * seen. (See sata_dwc_isr())
492                          */
493                         host_pvt.dma_interrupt_count++;
494                         sata_dwc_clear_dmacr(hsdevp, tag);
495 
496                         if (hsdevp->dma_pending[tag] ==
497                             SATA_DWC_DMA_PENDING_NONE) {
498                                 dev_err(ap->dev, "DMA not pending eot=0x%08x "
499                                         "err=0x%08x tag=0x%02x pending=%d\n",
500                                         tfr_reg, err_reg, tag,
501                                         hsdevp->dma_pending[tag]);
502                         }
503 
504                         if ((host_pvt.dma_interrupt_count % 2) == 0)
505                                 sata_dwc_dma_xfer_complete(ap, 1);
506 
507                         /* Clear the interrupt */
508                         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
509                                 .tfr.low),
510                                  DMA_CHANNEL(chan));
511                 }
512 
513                 /* Check for error interrupt. */
514                 if (err_reg & DMA_CHANNEL(chan)) {
515                         /* TODO Need error handler ! */
516                         dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
517                                 err_reg);
518 
519                         /* Clear the interrupt. */
520                         out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
521                                 .error.low),
522                                  DMA_CHANNEL(chan));
523                 }
524         }
525         spin_unlock_irqrestore(&host->lock, flags);
526         return IRQ_HANDLED;
527 }
528 
529 /*
530  * Function: dma_request_interrupts
531  * arguments: hsdev
532  * returns status
533  * This function registers ISR for a particular DMA channel interrupt
534  */
535 static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
536 {
537         int retval = 0;
538         int chan = host_pvt.dma_channel;
539 
540         if (chan >= 0) {
541                 /* Unmask error interrupt */
542                 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
543                          DMA_ENABLE_CHAN(chan));
544 
545                 /* Unmask end-of-transfer interrupt */
546                 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
547                          DMA_ENABLE_CHAN(chan));
548         }
549 
550         retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
551         if (retval) {
552                 dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
553                 __func__, irq);
554                 return -ENODEV;
555         }
556 
557         /* Mark this interrupt as requested */
558         hsdev->irq_dma = irq;
559         return 0;
560 }
561 
562 /*
563  * Function: map_sg_to_lli
564  * The Synopsis driver has a comment proposing that better performance
565  * is possible by only enabling interrupts on the last item in the linked list.
566  * However, it seems that could be a problem if an error happened on one of the
567  * first items.  The transfer would halt, but no error interrupt would occur.
568  * Currently this function sets interrupts enabled for each linked list item:
569  * DMA_CTL_INT_EN.
570  */
571 static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
572                         struct lli *lli, dma_addr_t dma_lli,
573                         void __iomem *dmadr_addr, int dir)
574 {
575         int i, idx = 0;
576         int fis_len = 0;
577         dma_addr_t next_llp;
578         int bl;
579         int sms_val, dms_val;
580 
581         sms_val = 0;
582         dms_val = 1 + host_pvt.dma_channel;
583         dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
584                 " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
585                 (u32)dmadr_addr);
586 
587         bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
588 
589         for (i = 0; i < num_elems; i++, sg++) {
590                 u32 addr, offset;
591                 u32 sg_len, len;
592 
593                 addr = (u32) sg_dma_address(sg);
594                 sg_len = sg_dma_len(sg);
595 
596                 dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
597                         "=%d\n", __func__, i, addr, sg_len);
598 
599                 while (sg_len) {
600                         if (idx >= SATA_DWC_DMAC_LLI_NUM) {
601                                 /* The LLI table is not large enough. */
602                                 dev_err(host_pvt.dwc_dev, "LLI table overrun "
603                                 "(idx=%d)\n", idx);
604                                 break;
605                         }
606                         len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
607                                 SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
608 
609                         offset = addr & 0xffff;
610                         if ((offset + sg_len) > 0x10000)
611                                 len = 0x10000 - offset;
612 
613                         /*
614                          * Make sure a LLI block is not created that will span
615                          * 8K max FIS boundary.  If the block spans such a FIS
616                          * boundary, there is a chance that a DMA burst will
617                          * cross that boundary -- this results in an error in
618                          * the host controller.
619                          */
620                         if (fis_len + len > 8192) {
621                                 dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
622                                         "%d(0x%x) len=%d(0x%x)\n", fis_len,
623                                          fis_len, len, len);
624                                 len = 8192 - fis_len;
625                                 fis_len = 0;
626                         } else {
627                                 fis_len += len;
628                         }
629                         if (fis_len == 8192)
630                                 fis_len = 0;
631 
632                         /*
633                          * Set DMA addresses and lower half of control register
634                          * based on direction.
635                          */
636                         if (dir == DMA_FROM_DEVICE) {
637                                 lli[idx].dar = cpu_to_le32(addr);
638                                 lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
639 
640                                 lli[idx].ctl.low = cpu_to_le32(
641                                         DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
642                                         DMA_CTL_SMS(sms_val) |
643                                         DMA_CTL_DMS(dms_val) |
644                                         DMA_CTL_SRC_MSIZE(bl) |
645                                         DMA_CTL_DST_MSIZE(bl) |
646                                         DMA_CTL_SINC_NOCHANGE |
647                                         DMA_CTL_SRC_TRWID(2) |
648                                         DMA_CTL_DST_TRWID(2) |
649                                         DMA_CTL_INT_EN |
650                                         DMA_CTL_LLP_SRCEN |
651                                         DMA_CTL_LLP_DSTEN);
652                         } else {        /* DMA_TO_DEVICE */
653                                 lli[idx].sar = cpu_to_le32(addr);
654                                 lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
655 
656                                 lli[idx].ctl.low = cpu_to_le32(
657                                         DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
658                                         DMA_CTL_SMS(dms_val) |
659                                         DMA_CTL_DMS(sms_val) |
660                                         DMA_CTL_SRC_MSIZE(bl) |
661                                         DMA_CTL_DST_MSIZE(bl) |
662                                         DMA_CTL_DINC_NOCHANGE |
663                                         DMA_CTL_SRC_TRWID(2) |
664                                         DMA_CTL_DST_TRWID(2) |
665                                         DMA_CTL_INT_EN |
666                                         DMA_CTL_LLP_SRCEN |
667                                         DMA_CTL_LLP_DSTEN);
668                         }
669 
670                         dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
671                                 "0x%08x val: 0x%08x\n", __func__,
672                                 len, DMA_CTL_BLK_TS(len / 4));
673 
674                         /* Program the LLI CTL high register */
675                         lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
676                                                 (len / 4));
677 
678                         /* Program the next pointer.  The next pointer must be
679                          * the physical address, not the virtual address.
680                          */
681                         next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
682                                                         lli)));
683 
684                         /* The last 2 bits encode the list master select. */
685                         next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
686 
687                         lli[idx].llp = cpu_to_le32(next_llp);
688                         idx++;
689                         sg_len -= len;
690                         addr += len;
691                 }
692         }
693 
694         /*
695          * The last next ptr has to be zero and the last control low register
696          * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
697          * and destination enable) set back to 0 (disabled.) This is what tells
698          * the core that this is the last item in the linked list.
699          */
700         if (idx) {
701                 lli[idx-1].llp = 0x00000000;
702                 lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
703 
704                 /* Flush cache to memory */
705                 dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
706                                DMA_BIDIRECTIONAL);
707         }
708 
709         return idx;
710 }
711 
712 /*
713  * Function: dma_dwc_xfer_start
714  * arguments: Channel number
715  * Return : None
716  * Enables the DMA channel
717  */
718 static void dma_dwc_xfer_start(int dma_ch)
719 {
720         /* Enable the DMA channel */
721         out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
722                  in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
723                  DMA_ENABLE_CHAN(dma_ch));
724 }
725 
726 static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
727                               struct lli *lli, dma_addr_t dma_lli,
728                               void __iomem *addr, int dir)
729 {
730         int dma_ch;
731         int num_lli;
732         /* Acquire DMA channel */
733         dma_ch = dma_request_channel();
734         if (dma_ch == -1) {
735                 dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
736                          __func__);
737                 return -EAGAIN;
738         }
739 
740         /* Convert SG list to linked list of items (LLIs) for AHB DMA */
741         num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir);
742 
743         dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:"
744                 " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems,
745                  lli, (u32)dma_lli, addr, num_lli);
746 
747         clear_chan_interrupts(dma_ch);
748 
749         /* Program the CFG register. */
750         out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
751                  DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
752                  DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
753         out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
754                  DMA_CFG_HW_CH_PRIOR(dma_ch));
755 
756         /* Program the address of the linked list */
757         out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
758                  DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
759 
760         /* Program the CTL register with src enable / dst enable */
761         out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
762                  DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
763         return dma_ch;
764 }
765 
766 /*
767  * Function: dma_dwc_exit
768  * arguments: None
769  * returns status
770  * This function exits the SATA DMA driver
771  */
772 static void dma_dwc_exit(struct sata_dwc_device *hsdev)
773 {
774         dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
775         if (host_pvt.sata_dma_regs) {
776                 iounmap(host_pvt.sata_dma_regs);
777                 host_pvt.sata_dma_regs = NULL;
778         }
779 
780         if (hsdev->irq_dma) {
781                 free_irq(hsdev->irq_dma, hsdev);
782                 hsdev->irq_dma = 0;
783         }
784 }
785 
786 /*
787  * Function: dma_dwc_init
788  * arguments: hsdev
789  * returns status
790  * This function initializes the SATA DMA driver
791  */
792 static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
793 {
794         int err;
795 
796         err = dma_request_interrupts(hsdev, irq);
797         if (err) {
798                 dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
799                         " %d\n", __func__, err);
800                 return err;
801         }
802 
803         /* Enabe DMA */
804         out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN);
805 
806         dev_notice(host_pvt.dwc_dev, "DMA initialized\n");
807         dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\
808                 sata_dma_regs);
809 
810         return 0;
811 }
812 
813 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
814 {
815         if (scr > SCR_NOTIFICATION) {
816                 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
817                         __func__, scr);
818                 return -EINVAL;
819         }
820 
821         *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
822         dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
823                 __func__, link->ap->print_id, scr, *val);
824 
825         return 0;
826 }
827 
828 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
829 {
830         dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
831                 __func__, link->ap->print_id, scr, val);
832         if (scr > SCR_NOTIFICATION) {
833                 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
834                          __func__, scr);
835                 return -EINVAL;
836         }
837         out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
838 
839         return 0;
840 }
841 
842 static u32 core_scr_read(unsigned int scr)
843 {
844         return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\
845                         (scr * 4));
846 }
847 
848 static void core_scr_write(unsigned int scr, u32 val)
849 {
850         out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4),
851                 val);
852 }
853 
854 static void clear_serror(void)
855 {
856         u32 val;
857         val = core_scr_read(SCR_ERROR);
858         core_scr_write(SCR_ERROR, val);
859 
860 }
861 
862 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
863 {
864         out_le32(&hsdev->sata_dwc_regs->intpr,
865                  in_le32(&hsdev->sata_dwc_regs->intpr));
866 }
867 
868 static u32 qcmd_tag_to_mask(u8 tag)
869 {
870         return 0x00000001 << (tag & 0x1f);
871 }
872 
873 /* See ahci.c */
874 static void sata_dwc_error_intr(struct ata_port *ap,
875                                 struct sata_dwc_device *hsdev, uint intpr)
876 {
877         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
878         struct ata_eh_info *ehi = &ap->link.eh_info;
879         unsigned int err_mask = 0, action = 0;
880         struct ata_queued_cmd *qc;
881         u32 serror;
882         u8 status, tag;
883         u32 err_reg;
884 
885         ata_ehi_clear_desc(ehi);
886 
887         serror = core_scr_read(SCR_ERROR);
888         status = ap->ops->sff_check_status(ap);
889 
890         err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\
891                         low));
892         tag = ap->link.active_tag;
893 
894         dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
895                 "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
896                 __func__, serror, intpr, status, host_pvt.dma_interrupt_count,
897                 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
898 
899         /* Clear error register and interrupt bit */
900         clear_serror();
901         clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
902 
903         /* This is the only error happening now.  TODO check for exact error */
904 
905         err_mask |= AC_ERR_HOST_BUS;
906         action |= ATA_EH_RESET;
907 
908         /* Pass this on to EH */
909         ehi->serror |= serror;
910         ehi->action |= action;
911 
912         qc = ata_qc_from_tag(ap, tag);
913         if (qc)
914                 qc->err_mask |= err_mask;
915         else
916                 ehi->err_mask |= err_mask;
917 
918         ata_port_abort(ap);
919 }
920 
921 /*
922  * Function : sata_dwc_isr
923  * arguments : irq, void *dev_instance, struct pt_regs *regs
924  * Return value : irqreturn_t - status of IRQ
925  * This Interrupt handler called via port ops registered function.
926  * .irq_handler = sata_dwc_isr
927  */
928 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
929 {
930         struct ata_host *host = (struct ata_host *)dev_instance;
931         struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
932         struct ata_port *ap;
933         struct ata_queued_cmd *qc;
934         unsigned long flags;
935         u8 status, tag;
936         int handled, num_processed, port = 0;
937         uint intpr, sactive, sactive2, tag_mask;
938         struct sata_dwc_device_port *hsdevp;
939         host_pvt.sata_dwc_sactive_issued = 0;
940 
941         spin_lock_irqsave(&host->lock, flags);
942 
943         /* Read the interrupt register */
944         intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
945 
946         ap = host->ports[port];
947         hsdevp = HSDEVP_FROM_AP(ap);
948 
949         dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
950                 ap->link.active_tag);
951 
952         /* Check for error interrupt */
953         if (intpr & SATA_DWC_INTPR_ERR) {
954                 sata_dwc_error_intr(ap, hsdev, intpr);
955                 handled = 1;
956                 goto DONE;
957         }
958 
959         /* Check for DMA SETUP FIS (FP DMA) interrupt */
960         if (intpr & SATA_DWC_INTPR_NEWFP) {
961                 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
962 
963                 tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
964                 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
965                 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
966                         dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
967 
968                 host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
969 
970                 qc = ata_qc_from_tag(ap, tag);
971                 /*
972                  * Start FP DMA for NCQ command.  At this point the tag is the
973                  * active tag.  It is the tag that matches the command about to
974                  * be completed.
975                  */
976                 qc->ap->link.active_tag = tag;
977                 sata_dwc_bmdma_start_by_tag(qc, tag);
978 
979                 handled = 1;
980                 goto DONE;
981         }
982         sactive = core_scr_read(SCR_ACTIVE);
983         tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
984 
985         /* If no sactive issued and tag_mask is zero then this is not NCQ */
986         if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
987                 if (ap->link.active_tag == ATA_TAG_POISON)
988                         tag = 0;
989                 else
990                         tag = ap->link.active_tag;
991                 qc = ata_qc_from_tag(ap, tag);
992 
993                 /* DEV interrupt w/ no active qc? */
994                 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
995                         dev_err(ap->dev, "%s interrupt with no active qc "
996                                 "qc=%p\n", __func__, qc);
997                         ap->ops->sff_check_status(ap);
998                         handled = 1;
999                         goto DONE;
1000                 }
1001                 status = ap->ops->sff_check_status(ap);
1002 
1003                 qc->ap->link.active_tag = tag;
1004                 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
1005 
1006                 if (status & ATA_ERR) {
1007                         dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
1008                         sata_dwc_qc_complete(ap, qc, 1);
1009                         handled = 1;
1010                         goto DONE;
1011                 }
1012 
1013                 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
1014                         __func__, get_prot_descript(qc->tf.protocol));
1015 DRVSTILLBUSY:
1016                 if (ata_is_dma(qc->tf.protocol)) {
1017                         /*
1018                          * Each DMA transaction produces 2 interrupts. The DMAC
1019                          * transfer complete interrupt and the SATA controller
1020                          * operation done interrupt. The command should be
1021                          * completed only after both interrupts are seen.
1022                          */
1023                         host_pvt.dma_interrupt_count++;
1024                         if (hsdevp->dma_pending[tag] == \
1025                                         SATA_DWC_DMA_PENDING_NONE) {
1026                                 dev_err(ap->dev, "%s: DMA not pending "
1027                                         "intpr=0x%08x status=0x%08x pending"
1028                                         "=%d\n", __func__, intpr, status,
1029                                         hsdevp->dma_pending[tag]);
1030                         }
1031 
1032                         if ((host_pvt.dma_interrupt_count % 2) == 0)
1033                                 sata_dwc_dma_xfer_complete(ap, 1);
1034                 } else if (ata_is_pio(qc->tf.protocol)) {
1035                         ata_sff_hsm_move(ap, qc, status, 0);
1036                         handled = 1;
1037                         goto DONE;
1038                 } else {
1039                         if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1040                                 goto DRVSTILLBUSY;
1041                 }
1042 
1043                 handled = 1;
1044                 goto DONE;
1045         }
1046 
1047         /*
1048          * This is a NCQ command. At this point we need to figure out for which
1049          * tags we have gotten a completion interrupt.  One interrupt may serve
1050          * as completion for more than one operation when commands are queued
1051          * (NCQ).  We need to process each completed command.
1052          */
1053 
1054          /* process completed commands */
1055         sactive = core_scr_read(SCR_ACTIVE);
1056         tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
1057 
1058         if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
1059                                                         tag_mask > 1) {
1060                 dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x  sactive_issued=0x%08x"
1061                         "tag_mask=0x%08x\n", __func__, sactive,
1062                         host_pvt.sata_dwc_sactive_issued, tag_mask);
1063         }
1064 
1065         if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
1066                                         (host_pvt.sata_dwc_sactive_issued)) {
1067                 dev_warn(ap->dev, "Bad tag mask?  sactive=0x%08x "
1068                          "(host_pvt.sata_dwc_sactive_issued)=0x%08x  tag_mask"
1069                          "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued,
1070                           tag_mask);
1071         }
1072 
1073         /* read just to clear ... not bad if currently still busy */
1074         status = ap->ops->sff_check_status(ap);
1075         dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
1076 
1077         tag = 0;
1078         num_processed = 0;
1079         while (tag_mask) {
1080                 num_processed++;
1081                 while (!(tag_mask & 0x00000001)) {
1082                         tag++;
1083                         tag_mask <<= 1;
1084                 }
1085 
1086                 tag_mask &= (~0x00000001);
1087                 qc = ata_qc_from_tag(ap, tag);
1088 
1089                 /* To be picked up by completion functions */
1090                 qc->ap->link.active_tag = tag;
1091                 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
1092 
1093                 /* Let libata/scsi layers handle error */
1094                 if (status & ATA_ERR) {
1095                         dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
1096                                 status);
1097                         sata_dwc_qc_complete(ap, qc, 1);
1098                         handled = 1;
1099                         goto DONE;
1100                 }
1101 
1102                 /* Process completed command */
1103                 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
1104                         get_prot_descript(qc->tf.protocol));
1105                 if (ata_is_dma(qc->tf.protocol)) {
1106                         host_pvt.dma_interrupt_count++;
1107                         if (hsdevp->dma_pending[tag] == \
1108                                         SATA_DWC_DMA_PENDING_NONE)
1109                                 dev_warn(ap->dev, "%s: DMA not pending?\n",
1110                                         __func__);
1111                         if ((host_pvt.dma_interrupt_count % 2) == 0)
1112                                 sata_dwc_dma_xfer_complete(ap, 1);
1113                 } else {
1114                         if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1115                                 goto STILLBUSY;
1116                 }
1117                 continue;
1118 
1119 STILLBUSY:
1120                 ap->stats.idle_irq++;
1121                 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
1122                         ap->print_id);
1123         } /* while tag_mask */
1124 
1125         /*
1126          * Check to see if any commands completed while we were processing our
1127          * initial set of completed commands (read status clears interrupts,
1128          * so we might miss a completed command interrupt if one came in while
1129          * we were processing --we read status as part of processing a completed
1130          * command).
1131          */
1132         sactive2 = core_scr_read(SCR_ACTIVE);
1133         if (sactive2 != sactive) {
1134                 dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2"
1135                         "=0x%x\n", sactive, sactive2);
1136         }
1137         handled = 1;
1138 
1139 DONE:
1140         spin_unlock_irqrestore(&host->lock, flags);
1141         return IRQ_RETVAL(handled);
1142 }
1143 
1144 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
1145 {
1146         struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
1147 
1148         if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
1149                 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1150                          SATA_DWC_DMACR_RX_CLEAR(
1151                                  in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1152         } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
1153                 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1154                          SATA_DWC_DMACR_TX_CLEAR(
1155                                  in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1156         } else {
1157                 /*
1158                  * This should not happen, it indicates the driver is out of
1159                  * sync.  If it does happen, clear dmacr anyway.
1160                  */
1161                 dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and"
1162                         "TX DMA not pending tag=0x%02x pending=%d"
1163                         " dmacr: 0x%08x\n", __func__, tag,
1164                         hsdevp->dma_pending[tag],
1165                         in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1166                 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1167                         SATA_DWC_DMACR_TXRXCH_CLEAR);
1168         }
1169 }
1170 
1171 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
1172 {
1173         struct ata_queued_cmd *qc;
1174         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1175         struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1176         u8 tag = 0;
1177 
1178         tag = ap->link.active_tag;
1179         qc = ata_qc_from_tag(ap, tag);
1180         if (!qc) {
1181                 dev_err(ap->dev, "failed to get qc");
1182                 return;
1183         }
1184 
1185 #ifdef DEBUG_NCQ
1186         if (tag > 0) {
1187                 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
1188                          "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
1189                          get_dma_dir_descript(qc->dma_dir),
1190                          get_prot_descript(qc->tf.protocol),
1191                          in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1192         }
1193 #endif
1194 
1195         if (ata_is_dma(qc->tf.protocol)) {
1196                 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
1197                         dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
1198                                 "pending dmacr: 0x%08x\n", __func__,
1199                                 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1200                 }
1201 
1202                 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
1203                 sata_dwc_qc_complete(ap, qc, check_status);
1204                 ap->link.active_tag = ATA_TAG_POISON;
1205         } else {
1206                 sata_dwc_qc_complete(ap, qc, check_status);
1207         }
1208 }
1209 
1210 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
1211                                 u32 check_status)
1212 {
1213         u8 status = 0;
1214         u32 mask = 0x0;
1215         u8 tag = qc->tag;
1216         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1217         host_pvt.sata_dwc_sactive_queued = 0;
1218         dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
1219 
1220         if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
1221                 dev_err(ap->dev, "TX DMA PENDING\n");
1222         else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
1223                 dev_err(ap->dev, "RX DMA PENDING\n");
1224         dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:"
1225                 " protocol=%d\n", qc->tf.command, status, ap->print_id,
1226                  qc->tf.protocol);
1227 
1228         /* clear active bit */
1229         mask = (~(qcmd_tag_to_mask(tag)));
1230         host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
1231                                                 & mask;
1232         host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
1233                                                 & mask;
1234         ata_qc_complete(qc);
1235         return 0;
1236 }
1237 
1238 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
1239 {
1240         /* Enable selective interrupts by setting the interrupt maskregister*/
1241         out_le32(&hsdev->sata_dwc_regs->intmr,
1242                  SATA_DWC_INTMR_ERRM |
1243                  SATA_DWC_INTMR_NEWFPM |
1244                  SATA_DWC_INTMR_PMABRTM |
1245                  SATA_DWC_INTMR_DMATM);
1246         /*
1247          * Unmask the error bits that should trigger an error interrupt by
1248          * setting the error mask register.
1249          */
1250         out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
1251 
1252         dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
1253                  __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
1254                 in_le32(&hsdev->sata_dwc_regs->errmr));
1255 }
1256 
1257 static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
1258 {
1259         port->cmd_addr = (void *)base + 0x00;
1260         port->data_addr = (void *)base + 0x00;
1261 
1262         port->error_addr = (void *)base + 0x04;
1263         port->feature_addr = (void *)base + 0x04;
1264 
1265         port->nsect_addr = (void *)base + 0x08;
1266 
1267         port->lbal_addr = (void *)base + 0x0c;
1268         port->lbam_addr = (void *)base + 0x10;
1269         port->lbah_addr = (void *)base + 0x14;
1270 
1271         port->device_addr = (void *)base + 0x18;
1272         port->command_addr = (void *)base + 0x1c;
1273         port->status_addr = (void *)base + 0x1c;
1274 
1275         port->altstatus_addr = (void *)base + 0x20;
1276         port->ctl_addr = (void *)base + 0x20;
1277 }
1278 
1279 /*
1280  * Function : sata_dwc_port_start
1281  * arguments : struct ata_ioports *port
1282  * Return value : returns 0 if success, error code otherwise
1283  * This function allocates the scatter gather LLI table for AHB DMA
1284  */
1285 static int sata_dwc_port_start(struct ata_port *ap)
1286 {
1287         int err = 0;
1288         struct sata_dwc_device *hsdev;
1289         struct sata_dwc_device_port *hsdevp = NULL;
1290         struct device *pdev;
1291         int i;
1292 
1293         hsdev = HSDEV_FROM_AP(ap);
1294 
1295         dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
1296 
1297         hsdev->host = ap->host;
1298         pdev = ap->host->dev;
1299         if (!pdev) {
1300                 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
1301                 err = -ENODEV;
1302                 goto CLEANUP;
1303         }
1304 
1305         /* Allocate Port Struct */
1306         hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
1307         if (!hsdevp) {
1308                 dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
1309                 err = -ENOMEM;
1310                 goto CLEANUP;
1311         }
1312         hsdevp->hsdev = hsdev;
1313 
1314         for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
1315                 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
1316 
1317         ap->bmdma_prd = 0;      /* set these so libata doesn't use them */
1318         ap->bmdma_prd_dma = 0;
1319 
1320         /*
1321          * DMA - Assign scatter gather LLI table. We can't use the libata
1322          * version since it's PRD is IDE PCI specific.
1323          */
1324         for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1325                 hsdevp->llit[i] = dma_alloc_coherent(pdev,
1326                                                      SATA_DWC_DMAC_LLI_TBL_SZ,
1327                                                      &(hsdevp->llit_dma[i]),
1328                                                      GFP_ATOMIC);
1329                 if (!hsdevp->llit[i]) {
1330                         dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1331                                  __func__);
1332                         err = -ENOMEM;
1333                         goto CLEANUP_ALLOC;
1334                 }
1335         }
1336 
1337         if (ap->port_no == 0)  {
1338                 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
1339                         __func__);
1340                 out_le32(&hsdev->sata_dwc_regs->dmacr,
1341                          SATA_DWC_DMACR_TXRXCH_CLEAR);
1342 
1343                 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
1344                          __func__);
1345                 out_le32(&hsdev->sata_dwc_regs->dbtsr,
1346                          (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1347                           SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
1348         }
1349 
1350         /* Clear any error bits before libata starts issuing commands */
1351         clear_serror();
1352         ap->private_data = hsdevp;
1353         dev_dbg(ap->dev, "%s: done\n", __func__);
1354         return 0;
1355 
1356 CLEANUP_ALLOC:
1357         kfree(hsdevp);
1358 CLEANUP:
1359         dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
1360         return err;
1361 }
1362 
1363 static void sata_dwc_port_stop(struct ata_port *ap)
1364 {
1365         int i;
1366         struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1367         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1368 
1369         dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
1370 
1371         if (hsdevp && hsdev) {
1372                 /* deallocate LLI table */
1373                 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1374                         dma_free_coherent(ap->host->dev,
1375                                           SATA_DWC_DMAC_LLI_TBL_SZ,
1376                                          hsdevp->llit[i], hsdevp->llit_dma[i]);
1377                 }
1378 
1379                 kfree(hsdevp);
1380         }
1381         ap->private_data = NULL;
1382 }
1383 
1384 /*
1385  * Function : sata_dwc_exec_command_by_tag
1386  * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
1387  * Return value : None
1388  * This function keeps track of individual command tag ids and calls
1389  * ata_exec_command in libata
1390  */
1391 static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
1392                                          struct ata_taskfile *tf,
1393                                          u8 tag, u32 cmd_issued)
1394 {
1395         unsigned long flags;
1396         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1397 
1398         dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
1399                 ata_get_cmd_descript(tf->command), tag);
1400 
1401         spin_lock_irqsave(&ap->host->lock, flags);
1402         hsdevp->cmd_issued[tag] = cmd_issued;
1403         spin_unlock_irqrestore(&ap->host->lock, flags);
1404         /*
1405          * Clear SError before executing a new command.
1406          * sata_dwc_scr_write and read can not be used here. Clearing the PM
1407          * managed SError register for the disk needs to be done before the
1408          * task file is loaded.
1409          */
1410         clear_serror();
1411         ata_sff_exec_command(ap, tf);
1412 }
1413 
1414 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
1415 {
1416         sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
1417                                      SATA_DWC_CMD_ISSUED_PEND);
1418 }
1419 
1420 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
1421 {
1422         u8 tag = qc->tag;
1423 
1424         if (ata_is_ncq(qc->tf.protocol)) {
1425                 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1426                         __func__, qc->ap->link.sactive, tag);
1427         } else {
1428                 tag = 0;
1429         }
1430         sata_dwc_bmdma_setup_by_tag(qc, tag);
1431 }
1432 
1433 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1434 {
1435         int start_dma;
1436         u32 reg, dma_chan;
1437         struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
1438         struct ata_port *ap = qc->ap;
1439         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1440         int dir = qc->dma_dir;
1441         dma_chan = hsdevp->dma_chan[tag];
1442 
1443         if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
1444                 start_dma = 1;
1445                 if (dir == DMA_TO_DEVICE)
1446                         hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
1447                 else
1448                         hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
1449         } else {
1450                 dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
1451                         "(tag=%d) DMA NOT started\n", __func__,
1452                         hsdevp->cmd_issued[tag], tag);
1453                 start_dma = 0;
1454         }
1455 
1456         dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
1457                 "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
1458                 get_dma_dir_descript(qc->dma_dir), start_dma);
1459         sata_dwc_tf_dump(&(qc->tf));
1460 
1461         if (start_dma) {
1462                 reg = core_scr_read(SCR_ERROR);
1463                 if (reg & SATA_DWC_SERROR_ERR_BITS) {
1464                         dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
1465                                 __func__, reg);
1466                 }
1467 
1468                 if (dir == DMA_TO_DEVICE)
1469                         out_le32(&hsdev->sata_dwc_regs->dmacr,
1470                                 SATA_DWC_DMACR_TXCHEN);
1471                 else
1472                         out_le32(&hsdev->sata_dwc_regs->dmacr,
1473                                 SATA_DWC_DMACR_RXCHEN);
1474 
1475                 /* Enable AHB DMA transfer on the specified channel */
1476                 dma_dwc_xfer_start(dma_chan);
1477         }
1478 }
1479 
1480 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1481 {
1482         u8 tag = qc->tag;
1483 
1484         if (ata_is_ncq(qc->tf.protocol)) {
1485                 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1486                         __func__, qc->ap->link.sactive, tag);
1487         } else {
1488                 tag = 0;
1489         }
1490         dev_dbg(qc->ap->dev, "%s\n", __func__);
1491         sata_dwc_bmdma_start_by_tag(qc, tag);
1492 }
1493 
1494 /*
1495  * Function : sata_dwc_qc_prep_by_tag
1496  * arguments : ata_queued_cmd *qc, u8 tag
1497  * Return value : None
1498  * qc_prep for a particular queued command based on tag
1499  */
1500 static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1501 {
1502         struct scatterlist *sg = qc->sg;
1503         struct ata_port *ap = qc->ap;
1504         int dma_chan;
1505         struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1506         struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1507 
1508         dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
1509                 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
1510                  qc->n_elem);
1511 
1512         dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
1513                                       hsdevp->llit_dma[tag],
1514                                       (void *__iomem)(&hsdev->sata_dwc_regs->\
1515                                       dmadr), qc->dma_dir);
1516         if (dma_chan < 0) {
1517                 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1518                         __func__, dma_chan);
1519                 return;
1520         }
1521         hsdevp->dma_chan[tag] = dma_chan;
1522 }
1523 
1524 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1525 {
1526         u32 sactive;
1527         u8 tag = qc->tag;
1528         struct ata_port *ap = qc->ap;
1529 
1530 #ifdef DEBUG_NCQ
1531         if (qc->tag > 0 || ap->link.sactive > 1)
1532                 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
1533                          "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
1534                          __func__, ap->print_id, qc->tf.command,
1535                          ata_get_cmd_descript(qc->tf.command),
1536                          qc->tag, get_prot_descript(qc->tf.protocol),
1537                          ap->link.active_tag, ap->link.sactive);
1538 #endif
1539 
1540         if (!ata_is_ncq(qc->tf.protocol))
1541                 tag = 0;
1542         sata_dwc_qc_prep_by_tag(qc, tag);
1543 
1544         if (ata_is_ncq(qc->tf.protocol)) {
1545                 sactive = core_scr_read(SCR_ACTIVE);
1546                 sactive |= (0x00000001 << tag);
1547                 core_scr_write(SCR_ACTIVE, sactive);
1548 
1549                 dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
1550                         "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
1551                         sactive);
1552 
1553                 ap->ops->sff_tf_load(ap, &qc->tf);
1554                 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
1555                                              SATA_DWC_CMD_ISSUED_PEND);
1556         } else {
1557                 ata_sff_qc_issue(qc);
1558         }
1559         return 0;
1560 }
1561 
1562 /*
1563  * Function : sata_dwc_qc_prep
1564  * arguments : ata_queued_cmd *qc
1565  * Return value : None
1566  * qc_prep for a particular queued command
1567  */
1568 
1569 static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1570 {
1571         if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
1572                 return;
1573 
1574 #ifdef DEBUG_NCQ
1575         if (qc->tag > 0)
1576                 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
1577                          __func__, qc->tag, qc->ap->link.active_tag);
1578 
1579         return ;
1580 #endif
1581 }
1582 
1583 static void sata_dwc_error_handler(struct ata_port *ap)
1584 {
1585         ata_sff_error_handler(ap);
1586 }
1587 
1588 int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1589                         unsigned long deadline)
1590 {
1591         struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1592         int ret;
1593 
1594         ret = sata_sff_hardreset(link, class, deadline);
1595 
1596         sata_dwc_enable_interrupts(hsdev);
1597 
1598         /* Reconfigure the DMA control register */
1599         out_le32(&hsdev->sata_dwc_regs->dmacr,
1600                  SATA_DWC_DMACR_TXRXCH_CLEAR);
1601 
1602         /* Reconfigure the DMA Burst Transaction Size register */
1603         out_le32(&hsdev->sata_dwc_regs->dbtsr,
1604                  SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1605                  SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
1606 
1607         return ret;
1608 }
1609 
1610 /*
1611  * scsi mid-layer and libata interface structures
1612  */
1613 static struct scsi_host_template sata_dwc_sht = {
1614         ATA_NCQ_SHT(DRV_NAME),
1615         /*
1616          * test-only: Currently this driver doesn't handle NCQ
1617          * correctly. We enable NCQ but set the queue depth to a
1618          * max of 1. This will get fixed in in a future release.
1619          */
1620         .sg_tablesize           = LIBATA_MAX_PRD,
1621         .can_queue              = ATA_DEF_QUEUE,        /* ATA_MAX_QUEUE */
1622         .dma_boundary           = ATA_DMA_BOUNDARY,
1623 };
1624 
1625 static struct ata_port_operations sata_dwc_ops = {
1626         .inherits               = &ata_sff_port_ops,
1627 
1628         .error_handler          = sata_dwc_error_handler,
1629         .hardreset              = sata_dwc_hardreset,
1630 
1631         .qc_prep                = sata_dwc_qc_prep,
1632         .qc_issue               = sata_dwc_qc_issue,
1633 
1634         .scr_read               = sata_dwc_scr_read,
1635         .scr_write              = sata_dwc_scr_write,
1636 
1637         .port_start             = sata_dwc_port_start,
1638         .port_stop              = sata_dwc_port_stop,
1639 
1640         .bmdma_setup            = sata_dwc_bmdma_setup,
1641         .bmdma_start            = sata_dwc_bmdma_start,
1642 };
1643 
1644 static const struct ata_port_info sata_dwc_port_info[] = {
1645         {
1646                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NCQ,
1647                 .pio_mask       = ATA_PIO4,
1648                 .udma_mask      = ATA_UDMA6,
1649                 .port_ops       = &sata_dwc_ops,
1650         },
1651 };
1652 
1653 static int sata_dwc_probe(struct platform_device *ofdev)
1654 {
1655         struct sata_dwc_device *hsdev;
1656         u32 idr, versionr;
1657         char *ver = (char *)&versionr;
1658         u8 *base = NULL;
1659         int err = 0;
1660         int irq;
1661         struct ata_host *host;
1662         struct ata_port_info pi = sata_dwc_port_info[0];
1663         const struct ata_port_info *ppi[] = { &pi, NULL };
1664         struct device_node *np = ofdev->dev.of_node;
1665         u32 dma_chan;
1666 
1667         /* Allocate DWC SATA device */
1668         hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
1669         if (hsdev == NULL) {
1670                 dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
1671                 err = -ENOMEM;
1672                 goto error;
1673         }
1674 
1675         if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
1676                 dev_warn(&ofdev->dev, "no dma-channel property set."
1677                          " Use channel 0\n");
1678                 dma_chan = 0;
1679         }
1680         host_pvt.dma_channel = dma_chan;
1681 
1682         /* Ioremap SATA registers */
1683         base = of_iomap(ofdev->dev.of_node, 0);
1684         if (!base) {
1685                 dev_err(&ofdev->dev, "ioremap failed for SATA register"
1686                         " address\n");
1687                 err = -ENODEV;
1688                 goto error_kmalloc;
1689         }
1690         hsdev->reg_base = base;
1691         dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
1692 
1693         /* Synopsys DWC SATA specific Registers */
1694         hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
1695 
1696         /* Allocate and fill host */
1697         host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
1698         if (!host) {
1699                 dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
1700                 err = -ENOMEM;
1701                 goto error_iomap;
1702         }
1703 
1704         host->private_data = hsdev;
1705 
1706         /* Setup port */
1707         host->ports[0]->ioaddr.cmd_addr = base;
1708         host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1709         host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
1710         sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
1711 
1712         /* Read the ID and Version Registers */
1713         idr = in_le32(&hsdev->sata_dwc_regs->idr);
1714         versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
1715         dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
1716                    idr, ver[0], ver[1], ver[2]);
1717 
1718         /* Get SATA DMA interrupt number */
1719         irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
1720         if (irq == NO_IRQ) {
1721                 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1722                 err = -ENODEV;
1723                 goto error_iomap;
1724         }
1725 
1726         /* Get physical SATA DMA register base address */
1727         host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
1728         if (!(host_pvt.sata_dma_regs)) {
1729                 dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1730                         " address\n");
1731                 err = -ENODEV;
1732                 goto error_iomap;
1733         }
1734 
1735         /* Save dev for later use in dev_xxx() routines */
1736         host_pvt.dwc_dev = &ofdev->dev;
1737 
1738         /* Initialize AHB DMAC */
1739         err = dma_dwc_init(hsdev, irq);
1740         if (err)
1741                 goto error_dma_iomap;
1742 
1743         /* Enable SATA Interrupts */
1744         sata_dwc_enable_interrupts(hsdev);
1745 
1746         /* Get SATA interrupt number */
1747         irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1748         if (irq == NO_IRQ) {
1749                 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1750                 err = -ENODEV;
1751                 goto error_out;
1752         }
1753 
1754         /*
1755          * Now, register with libATA core, this will also initiate the
1756          * device discovery process, invoking our port_start() handler &
1757          * error_handler() to execute a dummy Softreset EH session
1758          */
1759         err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1760         if (err)
1761                 dev_err(&ofdev->dev, "failed to activate host");
1762 
1763         dev_set_drvdata(&ofdev->dev, host);
1764         return 0;
1765 
1766 error_out:
1767         /* Free SATA DMA resources */
1768         dma_dwc_exit(hsdev);
1769 error_dma_iomap:
1770         iounmap((void __iomem *)host_pvt.sata_dma_regs);
1771 error_iomap:
1772         iounmap(base);
1773 error_kmalloc:
1774         kfree(hsdev);
1775 error:
1776         return err;
1777 }
1778 
1779 static int sata_dwc_remove(struct platform_device *ofdev)
1780 {
1781         struct device *dev = &ofdev->dev;
1782         struct ata_host *host = dev_get_drvdata(dev);
1783         struct sata_dwc_device *hsdev = host->private_data;
1784 
1785         ata_host_detach(host);
1786         dev_set_drvdata(dev, NULL);
1787 
1788         /* Free SATA DMA resources */
1789         dma_dwc_exit(hsdev);
1790 
1791         iounmap((void __iomem *)host_pvt.sata_dma_regs);
1792         iounmap(hsdev->reg_base);
1793         kfree(hsdev);
1794         kfree(host);
1795         dev_dbg(&ofdev->dev, "done\n");
1796         return 0;
1797 }
1798 
1799 static const struct of_device_id sata_dwc_match[] = {
1800         { .compatible = "amcc,sata-460ex", },
1801         {}
1802 };
1803 MODULE_DEVICE_TABLE(of, sata_dwc_match);
1804 
1805 static struct platform_driver sata_dwc_driver = {
1806         .driver = {
1807                 .name = DRV_NAME,
1808                 .of_match_table = sata_dwc_match,
1809         },
1810         .probe = sata_dwc_probe,
1811         .remove = sata_dwc_remove,
1812 };
1813 
1814 module_platform_driver(sata_dwc_driver);
1815 
1816 MODULE_LICENSE("GPL");
1817 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
1818 MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver");
1819 MODULE_VERSION(DRV_VERSION);
1820 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us