Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/dma/mpc512x_dma.c

  1 /*
  2  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  3  * Copyright (C) Semihalf 2009
  4  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  5  *
  6  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  7  * (defines, structures and comments) was taken from MPC5121 DMA driver
  8  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
  9  *
 10  * Approved as OSADL project by a majority of OSADL members and funded
 11  * by OSADL membership fees in 2009;  for details see www.osadl.org.
 12  *
 13  * This program is free software; you can redistribute it and/or modify it
 14  * under the terms of the GNU General Public License as published by the Free
 15  * Software Foundation; either version 2 of the License, or (at your option)
 16  * any later version.
 17  *
 18  * This program is distributed in the hope that it will be useful, but WITHOUT
 19  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 20  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 21  * more details.
 22  *
 23  * You should have received a copy of the GNU General Public License along with
 24  * this program; if not, write to the Free Software Foundation, Inc., 59
 25  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 26  *
 27  * The full GNU General Public License is included in this distribution in the
 28  * file called COPYING.
 29  */
 30 
 31 /*
 32  * This is initial version of MPC5121 DMA driver. Only memory to memory
 33  * transfers are supported (tested using dmatest module).
 34  */
 35 
 36 #include <linux/module.h>
 37 #include <linux/dmaengine.h>
 38 #include <linux/dma-mapping.h>
 39 #include <linux/interrupt.h>
 40 #include <linux/io.h>
 41 #include <linux/slab.h>
 42 #include <linux/of_address.h>
 43 #include <linux/of_device.h>
 44 #include <linux/of_irq.h>
 45 #include <linux/of_platform.h>
 46 
 47 #include <linux/random.h>
 48 
 49 #include "dmaengine.h"
 50 
 51 /* Number of DMA Transfer descriptors allocated per channel */
 52 #define MPC_DMA_DESCRIPTORS     64
 53 
 54 /* Macro definitions */
 55 #define MPC_DMA_CHANNELS        64
 56 #define MPC_DMA_TCD_OFFSET      0x1000
 57 
 58 /* Arbitration mode of group and channel */
 59 #define MPC_DMA_DMACR_EDCG      (1 << 31)
 60 #define MPC_DMA_DMACR_ERGA      (1 << 3)
 61 #define MPC_DMA_DMACR_ERCA      (1 << 2)
 62 
 63 /* Error codes */
 64 #define MPC_DMA_DMAES_VLD       (1 << 31)
 65 #define MPC_DMA_DMAES_GPE       (1 << 15)
 66 #define MPC_DMA_DMAES_CPE       (1 << 14)
 67 #define MPC_DMA_DMAES_ERRCHN(err) \
 68                                 (((err) >> 8) & 0x3f)
 69 #define MPC_DMA_DMAES_SAE       (1 << 7)
 70 #define MPC_DMA_DMAES_SOE       (1 << 6)
 71 #define MPC_DMA_DMAES_DAE       (1 << 5)
 72 #define MPC_DMA_DMAES_DOE       (1 << 4)
 73 #define MPC_DMA_DMAES_NCE       (1 << 3)
 74 #define MPC_DMA_DMAES_SGE       (1 << 2)
 75 #define MPC_DMA_DMAES_SBE       (1 << 1)
 76 #define MPC_DMA_DMAES_DBE       (1 << 0)
 77 
 78 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE    (1 << 6)
 79 
 80 #define MPC_DMA_TSIZE_1         0x00
 81 #define MPC_DMA_TSIZE_2         0x01
 82 #define MPC_DMA_TSIZE_4         0x02
 83 #define MPC_DMA_TSIZE_16        0x04
 84 #define MPC_DMA_TSIZE_32        0x05
 85 
 86 /* MPC5121 DMA engine registers */
 87 struct __attribute__ ((__packed__)) mpc_dma_regs {
 88         /* 0x00 */
 89         u32 dmacr;              /* DMA control register */
 90         u32 dmaes;              /* DMA error status */
 91         /* 0x08 */
 92         u32 dmaerqh;            /* DMA enable request high(channels 63~32) */
 93         u32 dmaerql;            /* DMA enable request low(channels 31~0) */
 94         u32 dmaeeih;            /* DMA enable error interrupt high(ch63~32) */
 95         u32 dmaeeil;            /* DMA enable error interrupt low(ch31~0) */
 96         /* 0x18 */
 97         u8 dmaserq;             /* DMA set enable request */
 98         u8 dmacerq;             /* DMA clear enable request */
 99         u8 dmaseei;             /* DMA set enable error interrupt */
100         u8 dmaceei;             /* DMA clear enable error interrupt */
101         /* 0x1c */
102         u8 dmacint;             /* DMA clear interrupt request */
103         u8 dmacerr;             /* DMA clear error */
104         u8 dmassrt;             /* DMA set start bit */
105         u8 dmacdne;             /* DMA clear DONE status bit */
106         /* 0x20 */
107         u32 dmainth;            /* DMA interrupt request high(ch63~32) */
108         u32 dmaintl;            /* DMA interrupt request low(ch31~0) */
109         u32 dmaerrh;            /* DMA error high(ch63~32) */
110         u32 dmaerrl;            /* DMA error low(ch31~0) */
111         /* 0x30 */
112         u32 dmahrsh;            /* DMA hw request status high(ch63~32) */
113         u32 dmahrsl;            /* DMA hardware request status low(ch31~0) */
114         union {
115                 u32 dmaihsa;    /* DMA interrupt high select AXE(ch63~32) */
116                 u32 dmagpor;    /* (General purpose register on MPC8308) */
117         };
118         u32 dmailsa;            /* DMA interrupt low select AXE(ch31~0) */
119         /* 0x40 ~ 0xff */
120         u32 reserve0[48];       /* Reserved */
121         /* 0x100 */
122         u8 dchpri[MPC_DMA_CHANNELS];
123         /* DMA channels(0~63) priority */
124 };
125 
126 struct __attribute__ ((__packed__)) mpc_dma_tcd {
127         /* 0x00 */
128         u32 saddr;              /* Source address */
129 
130         u32 smod:5;             /* Source address modulo */
131         u32 ssize:3;            /* Source data transfer size */
132         u32 dmod:5;             /* Destination address modulo */
133         u32 dsize:3;            /* Destination data transfer size */
134         u32 soff:16;            /* Signed source address offset */
135 
136         /* 0x08 */
137         u32 nbytes;             /* Inner "minor" byte count */
138         u32 slast;              /* Last source address adjustment */
139         u32 daddr;              /* Destination address */
140 
141         /* 0x14 */
142         u32 citer_elink:1;      /* Enable channel-to-channel linking on
143                                  * minor loop complete
144                                  */
145         u32 citer_linkch:6;     /* Link channel for minor loop complete */
146         u32 citer:9;            /* Current "major" iteration count */
147         u32 doff:16;            /* Signed destination address offset */
148 
149         /* 0x18 */
150         u32 dlast_sga;          /* Last Destination address adjustment/scatter
151                                  * gather address
152                                  */
153 
154         /* 0x1c */
155         u32 biter_elink:1;      /* Enable channel-to-channel linking on major
156                                  * loop complete
157                                  */
158         u32 biter_linkch:6;
159         u32 biter:9;            /* Beginning "major" iteration count */
160         u32 bwc:2;              /* Bandwidth control */
161         u32 major_linkch:6;     /* Link channel number */
162         u32 done:1;             /* Channel done */
163         u32 active:1;           /* Channel active */
164         u32 major_elink:1;      /* Enable channel-to-channel linking on major
165                                  * loop complete
166                                  */
167         u32 e_sg:1;             /* Enable scatter/gather processing */
168         u32 d_req:1;            /* Disable request */
169         u32 int_half:1;         /* Enable an interrupt when major counter is
170                                  * half complete
171                                  */
172         u32 int_maj:1;          /* Enable an interrupt when major iteration
173                                  * count completes
174                                  */
175         u32 start:1;            /* Channel start */
176 };
177 
178 struct mpc_dma_desc {
179         struct dma_async_tx_descriptor  desc;
180         struct mpc_dma_tcd              *tcd;
181         dma_addr_t                      tcd_paddr;
182         int                             error;
183         struct list_head                node;
184 };
185 
186 struct mpc_dma_chan {
187         struct dma_chan                 chan;
188         struct list_head                free;
189         struct list_head                prepared;
190         struct list_head                queued;
191         struct list_head                active;
192         struct list_head                completed;
193         struct mpc_dma_tcd              *tcd;
194         dma_addr_t                      tcd_paddr;
195 
196         /* Lock for this structure */
197         spinlock_t                      lock;
198 };
199 
200 struct mpc_dma {
201         struct dma_device               dma;
202         struct tasklet_struct           tasklet;
203         struct mpc_dma_chan             channels[MPC_DMA_CHANNELS];
204         struct mpc_dma_regs __iomem     *regs;
205         struct mpc_dma_tcd __iomem      *tcd;
206         int                             irq;
207         int                             irq2;
208         uint                            error_status;
209         int                             is_mpc8308;
210 
211         /* Lock for error_status field in this structure */
212         spinlock_t                      error_status_lock;
213 };
214 
215 #define DRV_NAME        "mpc512x_dma"
216 
217 /* Convert struct dma_chan to struct mpc_dma_chan */
218 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
219 {
220         return container_of(c, struct mpc_dma_chan, chan);
221 }
222 
223 /* Convert struct dma_chan to struct mpc_dma */
224 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
225 {
226         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
227         return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
228 }
229 
230 /*
231  * Execute all queued DMA descriptors.
232  *
233  * Following requirements must be met while calling mpc_dma_execute():
234  *      a) mchan->lock is acquired,
235  *      b) mchan->active list is empty,
236  *      c) mchan->queued list contains at least one entry.
237  */
238 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
239 {
240         struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
241         struct mpc_dma_desc *first = NULL;
242         struct mpc_dma_desc *prev = NULL;
243         struct mpc_dma_desc *mdesc;
244         int cid = mchan->chan.chan_id;
245 
246         /* Move all queued descriptors to active list */
247         list_splice_tail_init(&mchan->queued, &mchan->active);
248 
249         /* Chain descriptors into one transaction */
250         list_for_each_entry(mdesc, &mchan->active, node) {
251                 if (!first)
252                         first = mdesc;
253 
254                 if (!prev) {
255                         prev = mdesc;
256                         continue;
257                 }
258 
259                 prev->tcd->dlast_sga = mdesc->tcd_paddr;
260                 prev->tcd->e_sg = 1;
261                 mdesc->tcd->start = 1;
262 
263                 prev = mdesc;
264         }
265 
266         prev->tcd->int_maj = 1;
267 
268         /* Send first descriptor in chain into hardware */
269         memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
270 
271         if (first != prev)
272                 mdma->tcd[cid].e_sg = 1;
273         out_8(&mdma->regs->dmassrt, cid);
274 }
275 
276 /* Handle interrupt on one half of DMA controller (32 channels) */
277 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
278 {
279         struct mpc_dma_chan *mchan;
280         struct mpc_dma_desc *mdesc;
281         u32 status = is | es;
282         int ch;
283 
284         while ((ch = fls(status) - 1) >= 0) {
285                 status &= ~(1 << ch);
286                 mchan = &mdma->channels[ch + off];
287 
288                 spin_lock(&mchan->lock);
289 
290                 out_8(&mdma->regs->dmacint, ch + off);
291                 out_8(&mdma->regs->dmacerr, ch + off);
292 
293                 /* Check error status */
294                 if (es & (1 << ch))
295                         list_for_each_entry(mdesc, &mchan->active, node)
296                                 mdesc->error = -EIO;
297 
298                 /* Execute queued descriptors */
299                 list_splice_tail_init(&mchan->active, &mchan->completed);
300                 if (!list_empty(&mchan->queued))
301                         mpc_dma_execute(mchan);
302 
303                 spin_unlock(&mchan->lock);
304         }
305 }
306 
307 /* Interrupt handler */
308 static irqreturn_t mpc_dma_irq(int irq, void *data)
309 {
310         struct mpc_dma *mdma = data;
311         uint es;
312 
313         /* Save error status register */
314         es = in_be32(&mdma->regs->dmaes);
315         spin_lock(&mdma->error_status_lock);
316         if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
317                 mdma->error_status = es;
318         spin_unlock(&mdma->error_status_lock);
319 
320         /* Handle interrupt on each channel */
321         if (mdma->dma.chancnt > 32) {
322                 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
323                                         in_be32(&mdma->regs->dmaerrh), 32);
324         }
325         mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
326                                         in_be32(&mdma->regs->dmaerrl), 0);
327 
328         /* Schedule tasklet */
329         tasklet_schedule(&mdma->tasklet);
330 
331         return IRQ_HANDLED;
332 }
333 
334 /* process completed descriptors */
335 static void mpc_dma_process_completed(struct mpc_dma *mdma)
336 {
337         dma_cookie_t last_cookie = 0;
338         struct mpc_dma_chan *mchan;
339         struct mpc_dma_desc *mdesc;
340         struct dma_async_tx_descriptor *desc;
341         unsigned long flags;
342         LIST_HEAD(list);
343         int i;
344 
345         for (i = 0; i < mdma->dma.chancnt; i++) {
346                 mchan = &mdma->channels[i];
347 
348                 /* Get all completed descriptors */
349                 spin_lock_irqsave(&mchan->lock, flags);
350                 if (!list_empty(&mchan->completed))
351                         list_splice_tail_init(&mchan->completed, &list);
352                 spin_unlock_irqrestore(&mchan->lock, flags);
353 
354                 if (list_empty(&list))
355                         continue;
356 
357                 /* Execute callbacks and run dependencies */
358                 list_for_each_entry(mdesc, &list, node) {
359                         desc = &mdesc->desc;
360 
361                         if (desc->callback)
362                                 desc->callback(desc->callback_param);
363 
364                         last_cookie = desc->cookie;
365                         dma_run_dependencies(desc);
366                 }
367 
368                 /* Free descriptors */
369                 spin_lock_irqsave(&mchan->lock, flags);
370                 list_splice_tail_init(&list, &mchan->free);
371                 mchan->chan.completed_cookie = last_cookie;
372                 spin_unlock_irqrestore(&mchan->lock, flags);
373         }
374 }
375 
376 /* DMA Tasklet */
377 static void mpc_dma_tasklet(unsigned long data)
378 {
379         struct mpc_dma *mdma = (void *)data;
380         unsigned long flags;
381         uint es;
382 
383         spin_lock_irqsave(&mdma->error_status_lock, flags);
384         es = mdma->error_status;
385         mdma->error_status = 0;
386         spin_unlock_irqrestore(&mdma->error_status_lock, flags);
387 
388         /* Print nice error report */
389         if (es) {
390                 dev_err(mdma->dma.dev,
391                         "Hardware reported following error(s) on channel %u:\n",
392                                                       MPC_DMA_DMAES_ERRCHN(es));
393 
394                 if (es & MPC_DMA_DMAES_GPE)
395                         dev_err(mdma->dma.dev, "- Group Priority Error\n");
396                 if (es & MPC_DMA_DMAES_CPE)
397                         dev_err(mdma->dma.dev, "- Channel Priority Error\n");
398                 if (es & MPC_DMA_DMAES_SAE)
399                         dev_err(mdma->dma.dev, "- Source Address Error\n");
400                 if (es & MPC_DMA_DMAES_SOE)
401                         dev_err(mdma->dma.dev, "- Source Offset"
402                                                 " Configuration Error\n");
403                 if (es & MPC_DMA_DMAES_DAE)
404                         dev_err(mdma->dma.dev, "- Destination Address"
405                                                                 " Error\n");
406                 if (es & MPC_DMA_DMAES_DOE)
407                         dev_err(mdma->dma.dev, "- Destination Offset"
408                                                 " Configuration Error\n");
409                 if (es & MPC_DMA_DMAES_NCE)
410                         dev_err(mdma->dma.dev, "- NBytes/Citter"
411                                                 " Configuration Error\n");
412                 if (es & MPC_DMA_DMAES_SGE)
413                         dev_err(mdma->dma.dev, "- Scatter/Gather"
414                                                 " Configuration Error\n");
415                 if (es & MPC_DMA_DMAES_SBE)
416                         dev_err(mdma->dma.dev, "- Source Bus Error\n");
417                 if (es & MPC_DMA_DMAES_DBE)
418                         dev_err(mdma->dma.dev, "- Destination Bus Error\n");
419         }
420 
421         mpc_dma_process_completed(mdma);
422 }
423 
424 /* Submit descriptor to hardware */
425 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
426 {
427         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
428         struct mpc_dma_desc *mdesc;
429         unsigned long flags;
430         dma_cookie_t cookie;
431 
432         mdesc = container_of(txd, struct mpc_dma_desc, desc);
433 
434         spin_lock_irqsave(&mchan->lock, flags);
435 
436         /* Move descriptor to queue */
437         list_move_tail(&mdesc->node, &mchan->queued);
438 
439         /* If channel is idle, execute all queued descriptors */
440         if (list_empty(&mchan->active))
441                 mpc_dma_execute(mchan);
442 
443         /* Update cookie */
444         cookie = dma_cookie_assign(txd);
445         spin_unlock_irqrestore(&mchan->lock, flags);
446 
447         return cookie;
448 }
449 
450 /* Alloc channel resources */
451 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
452 {
453         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
454         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
455         struct mpc_dma_desc *mdesc;
456         struct mpc_dma_tcd *tcd;
457         dma_addr_t tcd_paddr;
458         unsigned long flags;
459         LIST_HEAD(descs);
460         int i;
461 
462         /* Alloc DMA memory for Transfer Control Descriptors */
463         tcd = dma_alloc_coherent(mdma->dma.dev,
464                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
465                                                         &tcd_paddr, GFP_KERNEL);
466         if (!tcd)
467                 return -ENOMEM;
468 
469         /* Alloc descriptors for this channel */
470         for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
471                 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
472                 if (!mdesc) {
473                         dev_notice(mdma->dma.dev, "Memory allocation error. "
474                                         "Allocated only %u descriptors\n", i);
475                         break;
476                 }
477 
478                 dma_async_tx_descriptor_init(&mdesc->desc, chan);
479                 mdesc->desc.flags = DMA_CTRL_ACK;
480                 mdesc->desc.tx_submit = mpc_dma_tx_submit;
481 
482                 mdesc->tcd = &tcd[i];
483                 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
484 
485                 list_add_tail(&mdesc->node, &descs);
486         }
487 
488         /* Return error only if no descriptors were allocated */
489         if (i == 0) {
490                 dma_free_coherent(mdma->dma.dev,
491                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
492                                                                 tcd, tcd_paddr);
493                 return -ENOMEM;
494         }
495 
496         spin_lock_irqsave(&mchan->lock, flags);
497         mchan->tcd = tcd;
498         mchan->tcd_paddr = tcd_paddr;
499         list_splice_tail_init(&descs, &mchan->free);
500         spin_unlock_irqrestore(&mchan->lock, flags);
501 
502         /* Enable Error Interrupt */
503         out_8(&mdma->regs->dmaseei, chan->chan_id);
504 
505         return 0;
506 }
507 
508 /* Free channel resources */
509 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
510 {
511         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
512         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
513         struct mpc_dma_desc *mdesc, *tmp;
514         struct mpc_dma_tcd *tcd;
515         dma_addr_t tcd_paddr;
516         unsigned long flags;
517         LIST_HEAD(descs);
518 
519         spin_lock_irqsave(&mchan->lock, flags);
520 
521         /* Channel must be idle */
522         BUG_ON(!list_empty(&mchan->prepared));
523         BUG_ON(!list_empty(&mchan->queued));
524         BUG_ON(!list_empty(&mchan->active));
525         BUG_ON(!list_empty(&mchan->completed));
526 
527         /* Move data */
528         list_splice_tail_init(&mchan->free, &descs);
529         tcd = mchan->tcd;
530         tcd_paddr = mchan->tcd_paddr;
531 
532         spin_unlock_irqrestore(&mchan->lock, flags);
533 
534         /* Free DMA memory used by descriptors */
535         dma_free_coherent(mdma->dma.dev,
536                         MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
537                                                                 tcd, tcd_paddr);
538 
539         /* Free descriptors */
540         list_for_each_entry_safe(mdesc, tmp, &descs, node)
541                 kfree(mdesc);
542 
543         /* Disable Error Interrupt */
544         out_8(&mdma->regs->dmaceei, chan->chan_id);
545 }
546 
547 /* Send all pending descriptor to hardware */
548 static void mpc_dma_issue_pending(struct dma_chan *chan)
549 {
550         /*
551          * We are posting descriptors to the hardware as soon as
552          * they are ready, so this function does nothing.
553          */
554 }
555 
556 /* Check request completion status */
557 static enum dma_status
558 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
559                struct dma_tx_state *txstate)
560 {
561         return dma_cookie_status(chan, cookie, txstate);
562 }
563 
564 /* Prepare descriptor for memory to memory copy */
565 static struct dma_async_tx_descriptor *
566 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
567                                         size_t len, unsigned long flags)
568 {
569         struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
570         struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
571         struct mpc_dma_desc *mdesc = NULL;
572         struct mpc_dma_tcd *tcd;
573         unsigned long iflags;
574 
575         /* Get free descriptor */
576         spin_lock_irqsave(&mchan->lock, iflags);
577         if (!list_empty(&mchan->free)) {
578                 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
579                                                                         node);
580                 list_del(&mdesc->node);
581         }
582         spin_unlock_irqrestore(&mchan->lock, iflags);
583 
584         if (!mdesc) {
585                 /* try to free completed descriptors */
586                 mpc_dma_process_completed(mdma);
587                 return NULL;
588         }
589 
590         mdesc->error = 0;
591         tcd = mdesc->tcd;
592 
593         /* Prepare Transfer Control Descriptor for this transaction */
594         memset(tcd, 0, sizeof(struct mpc_dma_tcd));
595 
596         if (IS_ALIGNED(src | dst | len, 32)) {
597                 tcd->ssize = MPC_DMA_TSIZE_32;
598                 tcd->dsize = MPC_DMA_TSIZE_32;
599                 tcd->soff = 32;
600                 tcd->doff = 32;
601         } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
602                 /* MPC8308 doesn't support 16 byte transfers */
603                 tcd->ssize = MPC_DMA_TSIZE_16;
604                 tcd->dsize = MPC_DMA_TSIZE_16;
605                 tcd->soff = 16;
606                 tcd->doff = 16;
607         } else if (IS_ALIGNED(src | dst | len, 4)) {
608                 tcd->ssize = MPC_DMA_TSIZE_4;
609                 tcd->dsize = MPC_DMA_TSIZE_4;
610                 tcd->soff = 4;
611                 tcd->doff = 4;
612         } else if (IS_ALIGNED(src | dst | len, 2)) {
613                 tcd->ssize = MPC_DMA_TSIZE_2;
614                 tcd->dsize = MPC_DMA_TSIZE_2;
615                 tcd->soff = 2;
616                 tcd->doff = 2;
617         } else {
618                 tcd->ssize = MPC_DMA_TSIZE_1;
619                 tcd->dsize = MPC_DMA_TSIZE_1;
620                 tcd->soff = 1;
621                 tcd->doff = 1;
622         }
623 
624         tcd->saddr = src;
625         tcd->daddr = dst;
626         tcd->nbytes = len;
627         tcd->biter = 1;
628         tcd->citer = 1;
629 
630         /* Place descriptor in prepared list */
631         spin_lock_irqsave(&mchan->lock, iflags);
632         list_add_tail(&mdesc->node, &mchan->prepared);
633         spin_unlock_irqrestore(&mchan->lock, iflags);
634 
635         return &mdesc->desc;
636 }
637 
638 static int mpc_dma_probe(struct platform_device *op)
639 {
640         struct device_node *dn = op->dev.of_node;
641         struct device *dev = &op->dev;
642         struct dma_device *dma;
643         struct mpc_dma *mdma;
644         struct mpc_dma_chan *mchan;
645         struct resource res;
646         ulong regs_start, regs_size;
647         int retval, i;
648 
649         mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
650         if (!mdma) {
651                 dev_err(dev, "Memory exhausted!\n");
652                 return -ENOMEM;
653         }
654 
655         mdma->irq = irq_of_parse_and_map(dn, 0);
656         if (mdma->irq == NO_IRQ) {
657                 dev_err(dev, "Error mapping IRQ!\n");
658                 return -EINVAL;
659         }
660 
661         if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
662                 mdma->is_mpc8308 = 1;
663                 mdma->irq2 = irq_of_parse_and_map(dn, 1);
664                 if (mdma->irq2 == NO_IRQ) {
665                         dev_err(dev, "Error mapping IRQ!\n");
666                         return -EINVAL;
667                 }
668         }
669 
670         retval = of_address_to_resource(dn, 0, &res);
671         if (retval) {
672                 dev_err(dev, "Error parsing memory region!\n");
673                 return retval;
674         }
675 
676         regs_start = res.start;
677         regs_size = resource_size(&res);
678 
679         if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
680                 dev_err(dev, "Error requesting memory region!\n");
681                 return -EBUSY;
682         }
683 
684         mdma->regs = devm_ioremap(dev, regs_start, regs_size);
685         if (!mdma->regs) {
686                 dev_err(dev, "Error mapping memory region!\n");
687                 return -ENOMEM;
688         }
689 
690         mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
691                                                         + MPC_DMA_TCD_OFFSET);
692 
693         retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
694                                                                         mdma);
695         if (retval) {
696                 dev_err(dev, "Error requesting IRQ!\n");
697                 return -EINVAL;
698         }
699 
700         if (mdma->is_mpc8308) {
701                 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
702                                 DRV_NAME, mdma);
703                 if (retval) {
704                         dev_err(dev, "Error requesting IRQ2!\n");
705                         return -EINVAL;
706                 }
707         }
708 
709         spin_lock_init(&mdma->error_status_lock);
710 
711         dma = &mdma->dma;
712         dma->dev = dev;
713         if (!mdma->is_mpc8308)
714                 dma->chancnt = MPC_DMA_CHANNELS;
715         else
716                 dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
717         dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
718         dma->device_free_chan_resources = mpc_dma_free_chan_resources;
719         dma->device_issue_pending = mpc_dma_issue_pending;
720         dma->device_tx_status = mpc_dma_tx_status;
721         dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
722 
723         INIT_LIST_HEAD(&dma->channels);
724         dma_cap_set(DMA_MEMCPY, dma->cap_mask);
725 
726         for (i = 0; i < dma->chancnt; i++) {
727                 mchan = &mdma->channels[i];
728 
729                 mchan->chan.device = dma;
730                 dma_cookie_init(&mchan->chan);
731 
732                 INIT_LIST_HEAD(&mchan->free);
733                 INIT_LIST_HEAD(&mchan->prepared);
734                 INIT_LIST_HEAD(&mchan->queued);
735                 INIT_LIST_HEAD(&mchan->active);
736                 INIT_LIST_HEAD(&mchan->completed);
737 
738                 spin_lock_init(&mchan->lock);
739                 list_add_tail(&mchan->chan.device_node, &dma->channels);
740         }
741 
742         tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
743 
744         /*
745          * Configure DMA Engine:
746          * - Dynamic clock,
747          * - Round-robin group arbitration,
748          * - Round-robin channel arbitration.
749          */
750         if (!mdma->is_mpc8308) {
751                 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
752                                         MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
753 
754                 /* Disable hardware DMA requests */
755                 out_be32(&mdma->regs->dmaerqh, 0);
756                 out_be32(&mdma->regs->dmaerql, 0);
757 
758                 /* Disable error interrupts */
759                 out_be32(&mdma->regs->dmaeeih, 0);
760                 out_be32(&mdma->regs->dmaeeil, 0);
761 
762                 /* Clear interrupts status */
763                 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
764                 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
765                 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
766                 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
767 
768                 /* Route interrupts to IPIC */
769                 out_be32(&mdma->regs->dmaihsa, 0);
770                 out_be32(&mdma->regs->dmailsa, 0);
771         } else {
772                 /* MPC8308 has 16 channels and lacks some registers */
773                 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
774 
775                 /* enable snooping */
776                 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
777                 /* Disable error interrupts */
778                 out_be32(&mdma->regs->dmaeeil, 0);
779 
780                 /* Clear interrupts status */
781                 out_be32(&mdma->regs->dmaintl, 0xFFFF);
782                 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
783         }
784 
785         /* Register DMA engine */
786         dev_set_drvdata(dev, mdma);
787         retval = dma_async_device_register(dma);
788         if (retval) {
789                 devm_free_irq(dev, mdma->irq, mdma);
790                 irq_dispose_mapping(mdma->irq);
791         }
792 
793         return retval;
794 }
795 
796 static int mpc_dma_remove(struct platform_device *op)
797 {
798         struct device *dev = &op->dev;
799         struct mpc_dma *mdma = dev_get_drvdata(dev);
800 
801         dma_async_device_unregister(&mdma->dma);
802         devm_free_irq(dev, mdma->irq, mdma);
803         irq_dispose_mapping(mdma->irq);
804 
805         return 0;
806 }
807 
808 static struct of_device_id mpc_dma_match[] = {
809         { .compatible = "fsl,mpc5121-dma", },
810         {},
811 };
812 
813 static struct platform_driver mpc_dma_driver = {
814         .probe          = mpc_dma_probe,
815         .remove         = mpc_dma_remove,
816         .driver = {
817                 .name = DRV_NAME,
818                 .owner = THIS_MODULE,
819                 .of_match_table = mpc_dma_match,
820         },
821 };
822 
823 module_platform_driver(mpc_dma_driver);
824 
825 MODULE_LICENSE("GPL");
826 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
827 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us