Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/dma/mpc512x_dma.c

  1 /*
  2  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  3  * Copyright (C) Semihalf 2009
  4  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  5  * Copyright (C) Alexander Popov, Promcontroller 2014
  6  *
  7  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  8  * (defines, structures and comments) was taken from MPC5121 DMA driver
  9  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
 10  *
 11  * Approved as OSADL project by a majority of OSADL members and funded
 12  * by OSADL membership fees in 2009;  for details see www.osadl.org.
 13  *
 14  * This program is free software; you can redistribute it and/or modify it
 15  * under the terms of the GNU General Public License as published by the Free
 16  * Software Foundation; either version 2 of the License, or (at your option)
 17  * any later version.
 18  *
 19  * This program is distributed in the hope that it will be useful, but WITHOUT
 20  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 21  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 22  * more details.
 23  *
 24  * You should have received a copy of the GNU General Public License along with
 25  * this program; if not, write to the Free Software Foundation, Inc., 59
 26  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 27  *
 28  * The full GNU General Public License is included in this distribution in the
 29  * file called COPYING.
 30  */
 31 
 32 /*
 33  * MPC512x and MPC8308 DMA driver. It supports
 34  * memory to memory data transfers (tested using dmatest module) and
 35  * data transfers between memory and peripheral I/O memory
 36  * by means of slave scatter/gather with these limitations:
 37  *  - chunked transfers (described by s/g lists with more than one item)
 38  *     are refused as long as proper support for scatter/gather is missing;
 39  *  - transfers on MPC8308 always start from software as this SoC appears
 40  *     not to have external request lines for peripheral flow control;
 41  *  - only peripheral devices with 4-byte FIFO access register are supported;
 42  *  - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
 43  *     source and destination addresses must be 4-byte aligned
 44  *     and transfer size must be aligned on (4 * maxburst) boundary;
 45  */
 46 
 47 #include <linux/module.h>
 48 #include <linux/dmaengine.h>
 49 #include <linux/dma-mapping.h>
 50 #include <linux/interrupt.h>
 51 #include <linux/io.h>
 52 #include <linux/slab.h>
 53 #include <linux/of_address.h>
 54 #include <linux/of_device.h>
 55 #include <linux/of_irq.h>
 56 #include <linux/of_platform.h>
 57 
 58 #include <linux/random.h>
 59 
 60 #include "dmaengine.h"
 61 
 62 /* Number of DMA Transfer descriptors allocated per channel */
 63 #define MPC_DMA_DESCRIPTORS     64
 64 
 65 /* Macro definitions */
 66 #define MPC_DMA_TCD_OFFSET      0x1000
 67 
 68 /*
 69  * Maximum channel counts for individual hardware variants
 70  * and the maximum channel count over all supported controllers,
 71  * used for data structure size
 72  */
 73 #define MPC8308_DMACHAN_MAX     16
 74 #define MPC512x_DMACHAN_MAX     64
 75 #define MPC_DMA_CHANNELS        64
 76 
 77 /* Arbitration mode of group and channel */
 78 #define MPC_DMA_DMACR_EDCG      (1 << 31)
 79 #define MPC_DMA_DMACR_ERGA      (1 << 3)
 80 #define MPC_DMA_DMACR_ERCA      (1 << 2)
 81 
 82 /* Error codes */
 83 #define MPC_DMA_DMAES_VLD       (1 << 31)
 84 #define MPC_DMA_DMAES_GPE       (1 << 15)
 85 #define MPC_DMA_DMAES_CPE       (1 << 14)
 86 #define MPC_DMA_DMAES_ERRCHN(err) \
 87                                 (((err) >> 8) & 0x3f)
 88 #define MPC_DMA_DMAES_SAE       (1 << 7)
 89 #define MPC_DMA_DMAES_SOE       (1 << 6)
 90 #define MPC_DMA_DMAES_DAE       (1 << 5)
 91 #define MPC_DMA_DMAES_DOE       (1 << 4)
 92 #define MPC_DMA_DMAES_NCE       (1 << 3)
 93 #define MPC_DMA_DMAES_SGE       (1 << 2)
 94 #define MPC_DMA_DMAES_SBE       (1 << 1)
 95 #define MPC_DMA_DMAES_DBE       (1 << 0)
 96 
 97 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE    (1 << 6)
 98 
 99 #define MPC_DMA_TSIZE_1         0x00
100 #define MPC_DMA_TSIZE_2         0x01
101 #define MPC_DMA_TSIZE_4         0x02
102 #define MPC_DMA_TSIZE_16        0x04
103 #define MPC_DMA_TSIZE_32        0x05
104 
105 /* MPC5121 DMA engine registers */
106 struct __attribute__ ((__packed__)) mpc_dma_regs {
107         /* 0x00 */
108         u32 dmacr;              /* DMA control register */
109         u32 dmaes;              /* DMA error status */
110         /* 0x08 */
111         u32 dmaerqh;            /* DMA enable request high(channels 63~32) */
112         u32 dmaerql;            /* DMA enable request low(channels 31~0) */
113         u32 dmaeeih;            /* DMA enable error interrupt high(ch63~32) */
114         u32 dmaeeil;            /* DMA enable error interrupt low(ch31~0) */
115         /* 0x18 */
116         u8 dmaserq;             /* DMA set enable request */
117         u8 dmacerq;             /* DMA clear enable request */
118         u8 dmaseei;             /* DMA set enable error interrupt */
119         u8 dmaceei;             /* DMA clear enable error interrupt */
120         /* 0x1c */
121         u8 dmacint;             /* DMA clear interrupt request */
122         u8 dmacerr;             /* DMA clear error */
123         u8 dmassrt;             /* DMA set start bit */
124         u8 dmacdne;             /* DMA clear DONE status bit */
125         /* 0x20 */
126         u32 dmainth;            /* DMA interrupt request high(ch63~32) */
127         u32 dmaintl;            /* DMA interrupt request low(ch31~0) */
128         u32 dmaerrh;            /* DMA error high(ch63~32) */
129         u32 dmaerrl;            /* DMA error low(ch31~0) */
130         /* 0x30 */
131         u32 dmahrsh;            /* DMA hw request status high(ch63~32) */
132         u32 dmahrsl;            /* DMA hardware request status low(ch31~0) */
133         union {
134                 u32 dmaihsa;    /* DMA interrupt high select AXE(ch63~32) */
135                 u32 dmagpor;    /* (General purpose register on MPC8308) */
136         };
137         u32 dmailsa;            /* DMA interrupt low select AXE(ch31~0) */
138         /* 0x40 ~ 0xff */
139         u32 reserve0[48];       /* Reserved */
140         /* 0x100 */
141         u8 dchpri[MPC_DMA_CHANNELS];
142         /* DMA channels(0~63) priority */
143 };
144 
145 struct __attribute__ ((__packed__)) mpc_dma_tcd {
146         /* 0x00 */
147         u32 saddr;              /* Source address */
148 
149         u32 smod:5;             /* Source address modulo */
150         u32 ssize:3;            /* Source data transfer size */
151         u32 dmod:5;             /* Destination address modulo */
152         u32 dsize:3;            /* Destination data transfer size */
153         u32 soff:16;            /* Signed source address offset */
154 
155         /* 0x08 */
156         u32 nbytes;             /* Inner "minor" byte count */
157         u32 slast;              /* Last source address adjustment */
158         u32 daddr;              /* Destination address */
159 
160         /* 0x14 */
161         u32 citer_elink:1;      /* Enable channel-to-channel linking on
162                                  * minor loop complete
163                                  */
164         u32 citer_linkch:6;     /* Link channel for minor loop complete */
165         u32 citer:9;            /* Current "major" iteration count */
166         u32 doff:16;            /* Signed destination address offset */
167 
168         /* 0x18 */
169         u32 dlast_sga;          /* Last Destination address adjustment/scatter
170                                  * gather address
171                                  */
172 
173         /* 0x1c */
174         u32 biter_elink:1;      /* Enable channel-to-channel linking on major
175                                  * loop complete
176                                  */
177         u32 biter_linkch:6;
178         u32 biter:9;            /* Beginning "major" iteration count */
179         u32 bwc:2;              /* Bandwidth control */
180         u32 major_linkch:6;     /* Link channel number */
181         u32 done:1;             /* Channel done */
182         u32 active:1;           /* Channel active */
183         u32 major_elink:1;      /* Enable channel-to-channel linking on major
184                                  * loop complete
185                                  */
186         u32 e_sg:1;             /* Enable scatter/gather processing */
187         u32 d_req:1;            /* Disable request */
188         u32 int_half:1;         /* Enable an interrupt when major counter is
189                                  * half complete
190                                  */
191         u32 int_maj:1;          /* Enable an interrupt when major iteration
192                                  * count completes
193                                  */
194         u32 start:1;            /* Channel start */
195 };
196 
197 struct mpc_dma_desc {
198         struct dma_async_tx_descriptor  desc;
199         struct mpc_dma_tcd              *tcd;
200         dma_addr_t                      tcd_paddr;
201         int                             error;
202         struct list_head                node;
203         int                             will_access_peripheral;
204 };
205 
206 struct mpc_dma_chan {
207         struct dma_chan                 chan;
208         struct list_head                free;
209         struct list_head                prepared;
210         struct list_head                queued;
211         struct list_head                active;
212         struct list_head                completed;
213         struct mpc_dma_tcd              *tcd;
214         dma_addr_t                      tcd_paddr;
215 
216         /* Settings for access to peripheral FIFO */
217         dma_addr_t                      src_per_paddr;
218         u32                             src_tcd_nunits;
219         dma_addr_t                      dst_per_paddr;
220         u32                             dst_tcd_nunits;
221 
222         /* Lock for this structure */
223         spinlock_t                      lock;
224 };
225 
226 struct mpc_dma {
227         struct dma_device               dma;
228         struct tasklet_struct           tasklet;
229         struct mpc_dma_chan             channels[MPC_DMA_CHANNELS];
230         struct mpc_dma_regs __iomem     *regs;
231         struct mpc_dma_tcd __iomem      *tcd;
232         int                             irq;
233         int                             irq2;
234         uint                            error_status;
235         int                             is_mpc8308;
236 
237         /* Lock for error_status field in this structure */
238         spinlock_t                      error_status_lock;
239 };
240 
241 #define DRV_NAME        "mpc512x_dma"
242 
243 /* Convert struct dma_chan to struct mpc_dma_chan */
244 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
245 {
246         return container_of(c, struct mpc_dma_chan, chan);
247 }
248 
249 /* Convert struct dma_chan to struct mpc_dma */
250 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
251 {
252         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
253         return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
254 }
255 
256 /*
257  * Execute all queued DMA descriptors.
258  *
259  * Following requirements must be met while calling mpc_dma_execute():
260  *      a) mchan->lock is acquired,
261  *      b) mchan->active list is empty,
262  *      c) mchan->queued list contains at least one entry.
263  */
264 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
265 {
266         struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
267         struct mpc_dma_desc *first = NULL;
268         struct mpc_dma_desc *prev = NULL;
269         struct mpc_dma_desc *mdesc;
270         int cid = mchan->chan.chan_id;
271 
272         while (!list_empty(&mchan->queued)) {
273                 mdesc = list_first_entry(&mchan->queued,
274                                                 struct mpc_dma_desc, node);
275                 /*
276                  * Grab either several mem-to-mem transfer descriptors
277                  * or one peripheral transfer descriptor,
278                  * don't mix mem-to-mem and peripheral transfer descriptors
279                  * within the same 'active' list.
280                  */
281                 if (mdesc->will_access_peripheral) {
282                         if (list_empty(&mchan->active))
283                                 list_move_tail(&mdesc->node, &mchan->active);
284                         break;
285                 } else {
286                         list_move_tail(&mdesc->node, &mchan->active);
287                 }
288         }
289 
290         /* Chain descriptors into one transaction */
291         list_for_each_entry(mdesc, &mchan->active, node) {
292                 if (!first)
293                         first = mdesc;
294 
295                 if (!prev) {
296                         prev = mdesc;
297                         continue;
298                 }
299 
300                 prev->tcd->dlast_sga = mdesc->tcd_paddr;
301                 prev->tcd->e_sg = 1;
302                 mdesc->tcd->start = 1;
303 
304                 prev = mdesc;
305         }
306 
307         prev->tcd->int_maj = 1;
308 
309         /* Send first descriptor in chain into hardware */
310         memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
311 
312         if (first != prev)
313                 mdma->tcd[cid].e_sg = 1;
314 
315         if (mdma->is_mpc8308) {
316                 /* MPC8308, no request lines, software initiated start */
317                 out_8(&mdma->regs->dmassrt, cid);
318         } else if (first->will_access_peripheral) {
319                 /* Peripherals involved, start by external request signal */
320                 out_8(&mdma->regs->dmaserq, cid);
321         } else {
322                 /* Memory to memory transfer, software initiated start */
323                 out_8(&mdma->regs->dmassrt, cid);
324         }
325 }
326 
327 /* Handle interrupt on one half of DMA controller (32 channels) */
328 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
329 {
330         struct mpc_dma_chan *mchan;
331         struct mpc_dma_desc *mdesc;
332         u32 status = is | es;
333         int ch;
334 
335         while ((ch = fls(status) - 1) >= 0) {
336                 status &= ~(1 << ch);
337                 mchan = &mdma->channels[ch + off];
338 
339                 spin_lock(&mchan->lock);
340 
341                 out_8(&mdma->regs->dmacint, ch + off);
342                 out_8(&mdma->regs->dmacerr, ch + off);
343 
344                 /* Check error status */
345                 if (es & (1 << ch))
346                         list_for_each_entry(mdesc, &mchan->active, node)
347                                 mdesc->error = -EIO;
348 
349                 /* Execute queued descriptors */
350                 list_splice_tail_init(&mchan->active, &mchan->completed);
351                 if (!list_empty(&mchan->queued))
352                         mpc_dma_execute(mchan);
353 
354                 spin_unlock(&mchan->lock);
355         }
356 }
357 
358 /* Interrupt handler */
359 static irqreturn_t mpc_dma_irq(int irq, void *data)
360 {
361         struct mpc_dma *mdma = data;
362         uint es;
363 
364         /* Save error status register */
365         es = in_be32(&mdma->regs->dmaes);
366         spin_lock(&mdma->error_status_lock);
367         if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
368                 mdma->error_status = es;
369         spin_unlock(&mdma->error_status_lock);
370 
371         /* Handle interrupt on each channel */
372         if (mdma->dma.chancnt > 32) {
373                 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
374                                         in_be32(&mdma->regs->dmaerrh), 32);
375         }
376         mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
377                                         in_be32(&mdma->regs->dmaerrl), 0);
378 
379         /* Schedule tasklet */
380         tasklet_schedule(&mdma->tasklet);
381 
382         return IRQ_HANDLED;
383 }
384 
385 /* process completed descriptors */
386 static void mpc_dma_process_completed(struct mpc_dma *mdma)
387 {
388         dma_cookie_t last_cookie = 0;
389         struct mpc_dma_chan *mchan;
390         struct mpc_dma_desc *mdesc;
391         struct dma_async_tx_descriptor *desc;
392         unsigned long flags;
393         LIST_HEAD(list);
394         int i;
395 
396         for (i = 0; i < mdma->dma.chancnt; i++) {
397                 mchan = &mdma->channels[i];
398 
399                 /* Get all completed descriptors */
400                 spin_lock_irqsave(&mchan->lock, flags);
401                 if (!list_empty(&mchan->completed))
402                         list_splice_tail_init(&mchan->completed, &list);
403                 spin_unlock_irqrestore(&mchan->lock, flags);
404 
405                 if (list_empty(&list))
406                         continue;
407 
408                 /* Execute callbacks and run dependencies */
409                 list_for_each_entry(mdesc, &list, node) {
410                         desc = &mdesc->desc;
411 
412                         if (desc->callback)
413                                 desc->callback(desc->callback_param);
414 
415                         last_cookie = desc->cookie;
416                         dma_run_dependencies(desc);
417                 }
418 
419                 /* Free descriptors */
420                 spin_lock_irqsave(&mchan->lock, flags);
421                 list_splice_tail_init(&list, &mchan->free);
422                 mchan->chan.completed_cookie = last_cookie;
423                 spin_unlock_irqrestore(&mchan->lock, flags);
424         }
425 }
426 
427 /* DMA Tasklet */
428 static void mpc_dma_tasklet(unsigned long data)
429 {
430         struct mpc_dma *mdma = (void *)data;
431         unsigned long flags;
432         uint es;
433 
434         spin_lock_irqsave(&mdma->error_status_lock, flags);
435         es = mdma->error_status;
436         mdma->error_status = 0;
437         spin_unlock_irqrestore(&mdma->error_status_lock, flags);
438 
439         /* Print nice error report */
440         if (es) {
441                 dev_err(mdma->dma.dev,
442                         "Hardware reported following error(s) on channel %u:\n",
443                                                       MPC_DMA_DMAES_ERRCHN(es));
444 
445                 if (es & MPC_DMA_DMAES_GPE)
446                         dev_err(mdma->dma.dev, "- Group Priority Error\n");
447                 if (es & MPC_DMA_DMAES_CPE)
448                         dev_err(mdma->dma.dev, "- Channel Priority Error\n");
449                 if (es & MPC_DMA_DMAES_SAE)
450                         dev_err(mdma->dma.dev, "- Source Address Error\n");
451                 if (es & MPC_DMA_DMAES_SOE)
452                         dev_err(mdma->dma.dev, "- Source Offset"
453                                                 " Configuration Error\n");
454                 if (es & MPC_DMA_DMAES_DAE)
455                         dev_err(mdma->dma.dev, "- Destination Address"
456                                                                 " Error\n");
457                 if (es & MPC_DMA_DMAES_DOE)
458                         dev_err(mdma->dma.dev, "- Destination Offset"
459                                                 " Configuration Error\n");
460                 if (es & MPC_DMA_DMAES_NCE)
461                         dev_err(mdma->dma.dev, "- NBytes/Citter"
462                                                 " Configuration Error\n");
463                 if (es & MPC_DMA_DMAES_SGE)
464                         dev_err(mdma->dma.dev, "- Scatter/Gather"
465                                                 " Configuration Error\n");
466                 if (es & MPC_DMA_DMAES_SBE)
467                         dev_err(mdma->dma.dev, "- Source Bus Error\n");
468                 if (es & MPC_DMA_DMAES_DBE)
469                         dev_err(mdma->dma.dev, "- Destination Bus Error\n");
470         }
471 
472         mpc_dma_process_completed(mdma);
473 }
474 
475 /* Submit descriptor to hardware */
476 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
477 {
478         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
479         struct mpc_dma_desc *mdesc;
480         unsigned long flags;
481         dma_cookie_t cookie;
482 
483         mdesc = container_of(txd, struct mpc_dma_desc, desc);
484 
485         spin_lock_irqsave(&mchan->lock, flags);
486 
487         /* Move descriptor to queue */
488         list_move_tail(&mdesc->node, &mchan->queued);
489 
490         /* If channel is idle, execute all queued descriptors */
491         if (list_empty(&mchan->active))
492                 mpc_dma_execute(mchan);
493 
494         /* Update cookie */
495         cookie = dma_cookie_assign(txd);
496         spin_unlock_irqrestore(&mchan->lock, flags);
497 
498         return cookie;
499 }
500 
501 /* Alloc channel resources */
502 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
503 {
504         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
505         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
506         struct mpc_dma_desc *mdesc;
507         struct mpc_dma_tcd *tcd;
508         dma_addr_t tcd_paddr;
509         unsigned long flags;
510         LIST_HEAD(descs);
511         int i;
512 
513         /* Alloc DMA memory for Transfer Control Descriptors */
514         tcd = dma_alloc_coherent(mdma->dma.dev,
515                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
516                                                         &tcd_paddr, GFP_KERNEL);
517         if (!tcd)
518                 return -ENOMEM;
519 
520         /* Alloc descriptors for this channel */
521         for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
522                 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
523                 if (!mdesc) {
524                         dev_notice(mdma->dma.dev, "Memory allocation error. "
525                                         "Allocated only %u descriptors\n", i);
526                         break;
527                 }
528 
529                 dma_async_tx_descriptor_init(&mdesc->desc, chan);
530                 mdesc->desc.flags = DMA_CTRL_ACK;
531                 mdesc->desc.tx_submit = mpc_dma_tx_submit;
532 
533                 mdesc->tcd = &tcd[i];
534                 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
535 
536                 list_add_tail(&mdesc->node, &descs);
537         }
538 
539         /* Return error only if no descriptors were allocated */
540         if (i == 0) {
541                 dma_free_coherent(mdma->dma.dev,
542                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
543                                                                 tcd, tcd_paddr);
544                 return -ENOMEM;
545         }
546 
547         spin_lock_irqsave(&mchan->lock, flags);
548         mchan->tcd = tcd;
549         mchan->tcd_paddr = tcd_paddr;
550         list_splice_tail_init(&descs, &mchan->free);
551         spin_unlock_irqrestore(&mchan->lock, flags);
552 
553         /* Enable Error Interrupt */
554         out_8(&mdma->regs->dmaseei, chan->chan_id);
555 
556         return 0;
557 }
558 
559 /* Free channel resources */
560 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
561 {
562         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
563         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
564         struct mpc_dma_desc *mdesc, *tmp;
565         struct mpc_dma_tcd *tcd;
566         dma_addr_t tcd_paddr;
567         unsigned long flags;
568         LIST_HEAD(descs);
569 
570         spin_lock_irqsave(&mchan->lock, flags);
571 
572         /* Channel must be idle */
573         BUG_ON(!list_empty(&mchan->prepared));
574         BUG_ON(!list_empty(&mchan->queued));
575         BUG_ON(!list_empty(&mchan->active));
576         BUG_ON(!list_empty(&mchan->completed));
577 
578         /* Move data */
579         list_splice_tail_init(&mchan->free, &descs);
580         tcd = mchan->tcd;
581         tcd_paddr = mchan->tcd_paddr;
582 
583         spin_unlock_irqrestore(&mchan->lock, flags);
584 
585         /* Free DMA memory used by descriptors */
586         dma_free_coherent(mdma->dma.dev,
587                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
588                                                                 tcd, tcd_paddr);
589 
590         /* Free descriptors */
591         list_for_each_entry_safe(mdesc, tmp, &descs, node)
592                 kfree(mdesc);
593 
594         /* Disable Error Interrupt */
595         out_8(&mdma->regs->dmaceei, chan->chan_id);
596 }
597 
598 /* Send all pending descriptor to hardware */
599 static void mpc_dma_issue_pending(struct dma_chan *chan)
600 {
601         /*
602          * We are posting descriptors to the hardware as soon as
603          * they are ready, so this function does nothing.
604          */
605 }
606 
607 /* Check request completion status */
608 static enum dma_status
609 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
610                struct dma_tx_state *txstate)
611 {
612         return dma_cookie_status(chan, cookie, txstate);
613 }
614 
615 /* Prepare descriptor for memory to memory copy */
616 static struct dma_async_tx_descriptor *
617 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
618                                         size_t len, unsigned long flags)
619 {
620         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
621         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
622         struct mpc_dma_desc *mdesc = NULL;
623         struct mpc_dma_tcd *tcd;
624         unsigned long iflags;
625 
626         /* Get free descriptor */
627         spin_lock_irqsave(&mchan->lock, iflags);
628         if (!list_empty(&mchan->free)) {
629                 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
630                                                                         node);
631                 list_del(&mdesc->node);
632         }
633         spin_unlock_irqrestore(&mchan->lock, iflags);
634 
635         if (!mdesc) {
636                 /* try to free completed descriptors */
637                 mpc_dma_process_completed(mdma);
638                 return NULL;
639         }
640 
641         mdesc->error = 0;
642         mdesc->will_access_peripheral = 0;
643         tcd = mdesc->tcd;
644 
645         /* Prepare Transfer Control Descriptor for this transaction */
646         memset(tcd, 0, sizeof(struct mpc_dma_tcd));
647 
648         if (IS_ALIGNED(src | dst | len, 32)) {
649                 tcd->ssize = MPC_DMA_TSIZE_32;
650                 tcd->dsize = MPC_DMA_TSIZE_32;
651                 tcd->soff = 32;
652                 tcd->doff = 32;
653         } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
654                 /* MPC8308 doesn't support 16 byte transfers */
655                 tcd->ssize = MPC_DMA_TSIZE_16;
656                 tcd->dsize = MPC_DMA_TSIZE_16;
657                 tcd->soff = 16;
658                 tcd->doff = 16;
659         } else if (IS_ALIGNED(src | dst | len, 4)) {
660                 tcd->ssize = MPC_DMA_TSIZE_4;
661                 tcd->dsize = MPC_DMA_TSIZE_4;
662                 tcd->soff = 4;
663                 tcd->doff = 4;
664         } else if (IS_ALIGNED(src | dst | len, 2)) {
665                 tcd->ssize = MPC_DMA_TSIZE_2;
666                 tcd->dsize = MPC_DMA_TSIZE_2;
667                 tcd->soff = 2;
668                 tcd->doff = 2;
669         } else {
670                 tcd->ssize = MPC_DMA_TSIZE_1;
671                 tcd->dsize = MPC_DMA_TSIZE_1;
672                 tcd->soff = 1;
673                 tcd->doff = 1;
674         }
675 
676         tcd->saddr = src;
677         tcd->daddr = dst;
678         tcd->nbytes = len;
679         tcd->biter = 1;
680         tcd->citer = 1;
681 
682         /* Place descriptor in prepared list */
683         spin_lock_irqsave(&mchan->lock, iflags);
684         list_add_tail(&mdesc->node, &mchan->prepared);
685         spin_unlock_irqrestore(&mchan->lock, iflags);
686 
687         return &mdesc->desc;
688 }
689 
690 static struct dma_async_tx_descriptor *
691 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
692                 unsigned int sg_len, enum dma_transfer_direction direction,
693                 unsigned long flags, void *context)
694 {
695         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
696         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
697         struct mpc_dma_desc *mdesc = NULL;
698         dma_addr_t per_paddr;
699         u32 tcd_nunits;
700         struct mpc_dma_tcd *tcd;
701         unsigned long iflags;
702         struct scatterlist *sg;
703         size_t len;
704         int iter, i;
705 
706         /* Currently there is no proper support for scatter/gather */
707         if (sg_len != 1)
708                 return NULL;
709 
710         if (!is_slave_direction(direction))
711                 return NULL;
712 
713         for_each_sg(sgl, sg, sg_len, i) {
714                 spin_lock_irqsave(&mchan->lock, iflags);
715 
716                 mdesc = list_first_entry(&mchan->free,
717                                                 struct mpc_dma_desc, node);
718                 if (!mdesc) {
719                         spin_unlock_irqrestore(&mchan->lock, iflags);
720                         /* Try to free completed descriptors */
721                         mpc_dma_process_completed(mdma);
722                         return NULL;
723                 }
724 
725                 list_del(&mdesc->node);
726 
727                 if (direction == DMA_DEV_TO_MEM) {
728                         per_paddr = mchan->src_per_paddr;
729                         tcd_nunits = mchan->src_tcd_nunits;
730                 } else {
731                         per_paddr = mchan->dst_per_paddr;
732                         tcd_nunits = mchan->dst_tcd_nunits;
733                 }
734 
735                 spin_unlock_irqrestore(&mchan->lock, iflags);
736 
737                 if (per_paddr == 0 || tcd_nunits == 0)
738                         goto err_prep;
739 
740                 mdesc->error = 0;
741                 mdesc->will_access_peripheral = 1;
742 
743                 /* Prepare Transfer Control Descriptor for this transaction */
744                 tcd = mdesc->tcd;
745 
746                 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
747 
748                 if (!IS_ALIGNED(sg_dma_address(sg), 4))
749                         goto err_prep;
750 
751                 if (direction == DMA_DEV_TO_MEM) {
752                         tcd->saddr = per_paddr;
753                         tcd->daddr = sg_dma_address(sg);
754                         tcd->soff = 0;
755                         tcd->doff = 4;
756                 } else {
757                         tcd->saddr = sg_dma_address(sg);
758                         tcd->daddr = per_paddr;
759                         tcd->soff = 4;
760                         tcd->doff = 0;
761                 }
762 
763                 tcd->ssize = MPC_DMA_TSIZE_4;
764                 tcd->dsize = MPC_DMA_TSIZE_4;
765 
766                 len = sg_dma_len(sg);
767                 tcd->nbytes = tcd_nunits * 4;
768                 if (!IS_ALIGNED(len, tcd->nbytes))
769                         goto err_prep;
770 
771                 iter = len / tcd->nbytes;
772                 if (iter >= 1 << 15) {
773                         /* len is too big */
774                         goto err_prep;
775                 }
776                 /* citer_linkch contains the high bits of iter */
777                 tcd->biter = iter & 0x1ff;
778                 tcd->biter_linkch = iter >> 9;
779                 tcd->citer = tcd->biter;
780                 tcd->citer_linkch = tcd->biter_linkch;
781 
782                 tcd->e_sg = 0;
783                 tcd->d_req = 1;
784 
785                 /* Place descriptor in prepared list */
786                 spin_lock_irqsave(&mchan->lock, iflags);
787                 list_add_tail(&mdesc->node, &mchan->prepared);
788                 spin_unlock_irqrestore(&mchan->lock, iflags);
789         }
790 
791         return &mdesc->desc;
792 
793 err_prep:
794         /* Put the descriptor back */
795         spin_lock_irqsave(&mchan->lock, iflags);
796         list_add_tail(&mdesc->node, &mchan->free);
797         spin_unlock_irqrestore(&mchan->lock, iflags);
798 
799         return NULL;
800 }
801 
802 static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
803                                                         unsigned long arg)
804 {
805         struct mpc_dma_chan *mchan;
806         struct mpc_dma *mdma;
807         struct dma_slave_config *cfg;
808         unsigned long flags;
809 
810         mchan = dma_chan_to_mpc_dma_chan(chan);
811         switch (cmd) {
812         case DMA_TERMINATE_ALL:
813                 /* Disable channel requests */
814                 mdma = dma_chan_to_mpc_dma(chan);
815 
816                 spin_lock_irqsave(&mchan->lock, flags);
817 
818                 out_8(&mdma->regs->dmacerq, chan->chan_id);
819                 list_splice_tail_init(&mchan->prepared, &mchan->free);
820                 list_splice_tail_init(&mchan->queued, &mchan->free);
821                 list_splice_tail_init(&mchan->active, &mchan->free);
822 
823                 spin_unlock_irqrestore(&mchan->lock, flags);
824 
825                 return 0;
826 
827         case DMA_SLAVE_CONFIG:
828                 /*
829                  * Software constraints:
830                  *  - only transfers between a peripheral device and
831                  *     memory are supported;
832                  *  - only peripheral devices with 4-byte FIFO access register
833                  *     are supported;
834                  *  - minimal transfer chunk is 4 bytes and consequently
835                  *     source and destination addresses must be 4-byte aligned
836                  *     and transfer size must be aligned on (4 * maxburst)
837                  *     boundary;
838                  *  - during the transfer RAM address is being incremented by
839                  *     the size of minimal transfer chunk;
840                  *  - peripheral port's address is constant during the transfer.
841                  */
842 
843                 cfg = (void *)arg;
844 
845                 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
846                     cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
847                     !IS_ALIGNED(cfg->src_addr, 4) ||
848                     !IS_ALIGNED(cfg->dst_addr, 4)) {
849                         return -EINVAL;
850                 }
851 
852                 spin_lock_irqsave(&mchan->lock, flags);
853 
854                 mchan->src_per_paddr = cfg->src_addr;
855                 mchan->src_tcd_nunits = cfg->src_maxburst;
856                 mchan->dst_per_paddr = cfg->dst_addr;
857                 mchan->dst_tcd_nunits = cfg->dst_maxburst;
858 
859                 /* Apply defaults */
860                 if (mchan->src_tcd_nunits == 0)
861                         mchan->src_tcd_nunits = 1;
862                 if (mchan->dst_tcd_nunits == 0)
863                         mchan->dst_tcd_nunits = 1;
864 
865                 spin_unlock_irqrestore(&mchan->lock, flags);
866 
867                 return 0;
868 
869         default:
870                 /* Unknown command */
871                 break;
872         }
873 
874         return -ENXIO;
875 }
876 
877 static int mpc_dma_probe(struct platform_device *op)
878 {
879         struct device_node *dn = op->dev.of_node;
880         struct device *dev = &op->dev;
881         struct dma_device *dma;
882         struct mpc_dma *mdma;
883         struct mpc_dma_chan *mchan;
884         struct resource res;
885         ulong regs_start, regs_size;
886         int retval, i;
887 
888         mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
889         if (!mdma) {
890                 dev_err(dev, "Memory exhausted!\n");
891                 retval = -ENOMEM;
892                 goto err;
893         }
894 
895         mdma->irq = irq_of_parse_and_map(dn, 0);
896         if (mdma->irq == NO_IRQ) {
897                 dev_err(dev, "Error mapping IRQ!\n");
898                 retval = -EINVAL;
899                 goto err;
900         }
901 
902         if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
903                 mdma->is_mpc8308 = 1;
904                 mdma->irq2 = irq_of_parse_and_map(dn, 1);
905                 if (mdma->irq2 == NO_IRQ) {
906                         dev_err(dev, "Error mapping IRQ!\n");
907                         retval = -EINVAL;
908                         goto err_dispose1;
909                 }
910         }
911 
912         retval = of_address_to_resource(dn, 0, &res);
913         if (retval) {
914                 dev_err(dev, "Error parsing memory region!\n");
915                 goto err_dispose2;
916         }
917 
918         regs_start = res.start;
919         regs_size = resource_size(&res);
920 
921         if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
922                 dev_err(dev, "Error requesting memory region!\n");
923                 retval = -EBUSY;
924                 goto err_dispose2;
925         }
926 
927         mdma->regs = devm_ioremap(dev, regs_start, regs_size);
928         if (!mdma->regs) {
929                 dev_err(dev, "Error mapping memory region!\n");
930                 retval = -ENOMEM;
931                 goto err_dispose2;
932         }
933 
934         mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
935                                                         + MPC_DMA_TCD_OFFSET);
936 
937         retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
938         if (retval) {
939                 dev_err(dev, "Error requesting IRQ!\n");
940                 retval = -EINVAL;
941                 goto err_dispose2;
942         }
943 
944         if (mdma->is_mpc8308) {
945                 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
946                                                         DRV_NAME, mdma);
947                 if (retval) {
948                         dev_err(dev, "Error requesting IRQ2!\n");
949                         retval = -EINVAL;
950                         goto err_free1;
951                 }
952         }
953 
954         spin_lock_init(&mdma->error_status_lock);
955 
956         dma = &mdma->dma;
957         dma->dev = dev;
958         if (mdma->is_mpc8308)
959                 dma->chancnt = MPC8308_DMACHAN_MAX;
960         else
961                 dma->chancnt = MPC512x_DMACHAN_MAX;
962         dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
963         dma->device_free_chan_resources = mpc_dma_free_chan_resources;
964         dma->device_issue_pending = mpc_dma_issue_pending;
965         dma->device_tx_status = mpc_dma_tx_status;
966         dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
967         dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
968         dma->device_control = mpc_dma_device_control;
969 
970         INIT_LIST_HEAD(&dma->channels);
971         dma_cap_set(DMA_MEMCPY, dma->cap_mask);
972         dma_cap_set(DMA_SLAVE, dma->cap_mask);
973 
974         for (i = 0; i < dma->chancnt; i++) {
975                 mchan = &mdma->channels[i];
976 
977                 mchan->chan.device = dma;
978                 dma_cookie_init(&mchan->chan);
979 
980                 INIT_LIST_HEAD(&mchan->free);
981                 INIT_LIST_HEAD(&mchan->prepared);
982                 INIT_LIST_HEAD(&mchan->queued);
983                 INIT_LIST_HEAD(&mchan->active);
984                 INIT_LIST_HEAD(&mchan->completed);
985 
986                 spin_lock_init(&mchan->lock);
987                 list_add_tail(&mchan->chan.device_node, &dma->channels);
988         }
989 
990         tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
991 
992         /*
993          * Configure DMA Engine:
994          * - Dynamic clock,
995          * - Round-robin group arbitration,
996          * - Round-robin channel arbitration.
997          */
998         if (mdma->is_mpc8308) {
999                 /* MPC8308 has 16 channels and lacks some registers */
1000                 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1001 
1002                 /* enable snooping */
1003                 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1004                 /* Disable error interrupts */
1005                 out_be32(&mdma->regs->dmaeeil, 0);
1006 
1007                 /* Clear interrupts status */
1008                 out_be32(&mdma->regs->dmaintl, 0xFFFF);
1009                 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1010         } else {
1011                 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1012                                         MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
1013 
1014                 /* Disable hardware DMA requests */
1015                 out_be32(&mdma->regs->dmaerqh, 0);
1016                 out_be32(&mdma->regs->dmaerql, 0);
1017 
1018                 /* Disable error interrupts */
1019                 out_be32(&mdma->regs->dmaeeih, 0);
1020                 out_be32(&mdma->regs->dmaeeil, 0);
1021 
1022                 /* Clear interrupts status */
1023                 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1024                 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1025                 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1026                 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1027 
1028                 /* Route interrupts to IPIC */
1029                 out_be32(&mdma->regs->dmaihsa, 0);
1030                 out_be32(&mdma->regs->dmailsa, 0);
1031         }
1032 
1033         /* Register DMA engine */
1034         dev_set_drvdata(dev, mdma);
1035         retval = dma_async_device_register(dma);
1036         if (retval)
1037                 goto err_free2;
1038 
1039         return retval;
1040 
1041 err_free2:
1042         if (mdma->is_mpc8308)
1043                 free_irq(mdma->irq2, mdma);
1044 err_free1:
1045         free_irq(mdma->irq, mdma);
1046 err_dispose2:
1047         if (mdma->is_mpc8308)
1048                 irq_dispose_mapping(mdma->irq2);
1049 err_dispose1:
1050         irq_dispose_mapping(mdma->irq);
1051 err:
1052         return retval;
1053 }
1054 
1055 static int mpc_dma_remove(struct platform_device *op)
1056 {
1057         struct device *dev = &op->dev;
1058         struct mpc_dma *mdma = dev_get_drvdata(dev);
1059 
1060         dma_async_device_unregister(&mdma->dma);
1061         if (mdma->is_mpc8308) {
1062                 free_irq(mdma->irq2, mdma);
1063                 irq_dispose_mapping(mdma->irq2);
1064         }
1065         free_irq(mdma->irq, mdma);
1066         irq_dispose_mapping(mdma->irq);
1067 
1068         return 0;
1069 }
1070 
1071 static struct of_device_id mpc_dma_match[] = {
1072         { .compatible = "fsl,mpc5121-dma", },
1073         { .compatible = "fsl,mpc8308-dma", },
1074         {},
1075 };
1076 
1077 static struct platform_driver mpc_dma_driver = {
1078         .probe          = mpc_dma_probe,
1079         .remove         = mpc_dma_remove,
1080         .driver = {
1081                 .name = DRV_NAME,
1082                 .owner = THIS_MODULE,
1083                 .of_match_table = mpc_dma_match,
1084         },
1085 };
1086 
1087 module_platform_driver(mpc_dma_driver);
1088 
1089 MODULE_LICENSE("GPL");
1090 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
1091 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us