Version:  2.0.40 2.2.26 2.4.37 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0

Linux/drivers/dma/at_hdmac.c

  1 /*
  2  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3  *
  4  * Copyright (C) 2008 Atmel Corporation
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License as published by
  8  * the Free Software Foundation; either version 2 of the License, or
  9  * (at your option) any later version.
 10  *
 11  *
 12  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
 13  * The only Atmel DMA Controller that is not covered by this driver is the one
 14  * found on AT91SAM9263.
 15  */
 16 
 17 #include <dt-bindings/dma/at91.h>
 18 #include <linux/clk.h>
 19 #include <linux/dmaengine.h>
 20 #include <linux/dma-mapping.h>
 21 #include <linux/dmapool.h>
 22 #include <linux/interrupt.h>
 23 #include <linux/module.h>
 24 #include <linux/platform_device.h>
 25 #include <linux/slab.h>
 26 #include <linux/of.h>
 27 #include <linux/of_device.h>
 28 #include <linux/of_dma.h>
 29 
 30 #include "at_hdmac_regs.h"
 31 #include "dmaengine.h"
 32 
 33 /*
 34  * Glossary
 35  * --------
 36  *
 37  * at_hdmac             : Name of the ATmel AHB DMA Controller
 38  * at_dma_ / atdma      : ATmel DMA controller entity related
 39  * atc_ / atchan        : ATmel DMA Channel entity related
 40  */
 41 
 42 #define ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
 43 #define ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
 44                                 |ATC_DIF(AT_DMA_MEM_IF))
 45 #define ATC_DMA_BUSWIDTHS\
 46         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
 47         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
 48         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
 49         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 50 
 51 /*
 52  * Initial number of descriptors to allocate for each channel. This could
 53  * be increased during dma usage.
 54  */
 55 static unsigned int init_nr_desc_per_channel = 64;
 56 module_param(init_nr_desc_per_channel, uint, 0644);
 57 MODULE_PARM_DESC(init_nr_desc_per_channel,
 58                  "initial descriptors per channel (default: 64)");
 59 
 60 
 61 /* prototypes */
 62 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
 63 static void atc_issue_pending(struct dma_chan *chan);
 64 
 65 
 66 /*----------------------------------------------------------------------*/
 67 
 68 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
 69 {
 70         return list_first_entry(&atchan->active_list,
 71                                 struct at_desc, desc_node);
 72 }
 73 
 74 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
 75 {
 76         return list_first_entry(&atchan->queue,
 77                                 struct at_desc, desc_node);
 78 }
 79 
 80 /**
 81  * atc_alloc_descriptor - allocate and return an initialized descriptor
 82  * @chan: the channel to allocate descriptors for
 83  * @gfp_flags: GFP allocation flags
 84  *
 85  * Note: The ack-bit is positioned in the descriptor flag at creation time
 86  *       to make initial allocation more convenient. This bit will be cleared
 87  *       and control will be given to client at usage time (during
 88  *       preparation functions).
 89  */
 90 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 91                                             gfp_t gfp_flags)
 92 {
 93         struct at_desc  *desc = NULL;
 94         struct at_dma   *atdma = to_at_dma(chan->device);
 95         dma_addr_t phys;
 96 
 97         desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
 98         if (desc) {
 99                 memset(desc, 0, sizeof(struct at_desc));
100                 INIT_LIST_HEAD(&desc->tx_list);
101                 dma_async_tx_descriptor_init(&desc->txd, chan);
102                 /* txd.flags will be overwritten in prep functions */
103                 desc->txd.flags = DMA_CTRL_ACK;
104                 desc->txd.tx_submit = atc_tx_submit;
105                 desc->txd.phys = phys;
106         }
107 
108         return desc;
109 }
110 
111 /**
112  * atc_desc_get - get an unused descriptor from free_list
113  * @atchan: channel we want a new descriptor for
114  */
115 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
116 {
117         struct at_desc *desc, *_desc;
118         struct at_desc *ret = NULL;
119         unsigned long flags;
120         unsigned int i = 0;
121         LIST_HEAD(tmp_list);
122 
123         spin_lock_irqsave(&atchan->lock, flags);
124         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
125                 i++;
126                 if (async_tx_test_ack(&desc->txd)) {
127                         list_del(&desc->desc_node);
128                         ret = desc;
129                         break;
130                 }
131                 dev_dbg(chan2dev(&atchan->chan_common),
132                                 "desc %p not ACKed\n", desc);
133         }
134         spin_unlock_irqrestore(&atchan->lock, flags);
135         dev_vdbg(chan2dev(&atchan->chan_common),
136                 "scanned %u descriptors on freelist\n", i);
137 
138         /* no more descriptor available in initial pool: create one more */
139         if (!ret) {
140                 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
141                 if (ret) {
142                         spin_lock_irqsave(&atchan->lock, flags);
143                         atchan->descs_allocated++;
144                         spin_unlock_irqrestore(&atchan->lock, flags);
145                 } else {
146                         dev_err(chan2dev(&atchan->chan_common),
147                                         "not enough descriptors available\n");
148                 }
149         }
150 
151         return ret;
152 }
153 
154 /**
155  * atc_desc_put - move a descriptor, including any children, to the free list
156  * @atchan: channel we work on
157  * @desc: descriptor, at the head of a chain, to move to free list
158  */
159 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
160 {
161         if (desc) {
162                 struct at_desc *child;
163                 unsigned long flags;
164 
165                 spin_lock_irqsave(&atchan->lock, flags);
166                 list_for_each_entry(child, &desc->tx_list, desc_node)
167                         dev_vdbg(chan2dev(&atchan->chan_common),
168                                         "moving child desc %p to freelist\n",
169                                         child);
170                 list_splice_init(&desc->tx_list, &atchan->free_list);
171                 dev_vdbg(chan2dev(&atchan->chan_common),
172                          "moving desc %p to freelist\n", desc);
173                 list_add(&desc->desc_node, &atchan->free_list);
174                 spin_unlock_irqrestore(&atchan->lock, flags);
175         }
176 }
177 
178 /**
179  * atc_desc_chain - build chain adding a descriptor
180  * @first: address of first descriptor of the chain
181  * @prev: address of previous descriptor of the chain
182  * @desc: descriptor to queue
183  *
184  * Called from prep_* functions
185  */
186 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
187                            struct at_desc *desc)
188 {
189         if (!(*first)) {
190                 *first = desc;
191         } else {
192                 /* inform the HW lli about chaining */
193                 (*prev)->lli.dscr = desc->txd.phys;
194                 /* insert the link descriptor to the LD ring */
195                 list_add_tail(&desc->desc_node,
196                                 &(*first)->tx_list);
197         }
198         *prev = desc;
199 }
200 
201 /**
202  * atc_dostart - starts the DMA engine for real
203  * @atchan: the channel we want to start
204  * @first: first descriptor in the list we want to begin with
205  *
206  * Called with atchan->lock held and bh disabled
207  */
208 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
209 {
210         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
211 
212         /* ASSERT:  channel is idle */
213         if (atc_chan_is_enabled(atchan)) {
214                 dev_err(chan2dev(&atchan->chan_common),
215                         "BUG: Attempted to start non-idle channel\n");
216                 dev_err(chan2dev(&atchan->chan_common),
217                         "  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
218                         channel_readl(atchan, SADDR),
219                         channel_readl(atchan, DADDR),
220                         channel_readl(atchan, CTRLA),
221                         channel_readl(atchan, CTRLB),
222                         channel_readl(atchan, DSCR));
223 
224                 /* The tasklet will hopefully advance the queue... */
225                 return;
226         }
227 
228         vdbg_dump_regs(atchan);
229 
230         channel_writel(atchan, SADDR, 0);
231         channel_writel(atchan, DADDR, 0);
232         channel_writel(atchan, CTRLA, 0);
233         channel_writel(atchan, CTRLB, 0);
234         channel_writel(atchan, DSCR, first->txd.phys);
235         dma_writel(atdma, CHER, atchan->mask);
236 
237         vdbg_dump_regs(atchan);
238 }
239 
240 /*
241  * atc_get_desc_by_cookie - get the descriptor of a cookie
242  * @atchan: the DMA channel
243  * @cookie: the cookie to get the descriptor for
244  */
245 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
246                                                 dma_cookie_t cookie)
247 {
248         struct at_desc *desc, *_desc;
249 
250         list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
251                 if (desc->txd.cookie == cookie)
252                         return desc;
253         }
254 
255         list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
256                 if (desc->txd.cookie == cookie)
257                         return desc;
258         }
259 
260         return NULL;
261 }
262 
263 /**
264  * atc_calc_bytes_left - calculates the number of bytes left according to the
265  * value read from CTRLA.
266  *
267  * @current_len: the number of bytes left before reading CTRLA
268  * @ctrla: the value of CTRLA
269  * @desc: the descriptor containing the transfer width
270  */
271 static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
272                                         struct at_desc *desc)
273 {
274         return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
275 }
276 
277 /**
278  * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
279  * to the current value of CTRLA.
280  *
281  * @current_len: the number of bytes left before reading CTRLA
282  * @atchan: the channel to read CTRLA for
283  * @desc: the descriptor containing the transfer width
284  */
285 static inline int atc_calc_bytes_left_from_reg(int current_len,
286                         struct at_dma_chan *atchan, struct at_desc *desc)
287 {
288         u32 ctrla = channel_readl(atchan, CTRLA);
289 
290         return atc_calc_bytes_left(current_len, ctrla, desc);
291 }
292 
293 /**
294  * atc_get_bytes_left - get the number of bytes residue for a cookie
295  * @chan: DMA channel
296  * @cookie: transaction identifier to check status of
297  */
298 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
299 {
300         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
301         struct at_desc *desc_first = atc_first_active(atchan);
302         struct at_desc *desc;
303         int ret;
304         u32 ctrla, dscr;
305 
306         /*
307          * If the cookie doesn't match to the currently running transfer then
308          * we can return the total length of the associated DMA transfer,
309          * because it is still queued.
310          */
311         desc = atc_get_desc_by_cookie(atchan, cookie);
312         if (desc == NULL)
313                 return -EINVAL;
314         else if (desc != desc_first)
315                 return desc->total_len;
316 
317         /* cookie matches to the currently running transfer */
318         ret = desc_first->total_len;
319 
320         if (desc_first->lli.dscr) {
321                 /* hardware linked list transfer */
322 
323                 /*
324                  * Calculate the residue by removing the length of the child
325                  * descriptors already transferred from the total length.
326                  * To get the current child descriptor we can use the value of
327                  * the channel's DSCR register and compare it against the value
328                  * of the hardware linked list structure of each child
329                  * descriptor.
330                  */
331 
332                 ctrla = channel_readl(atchan, CTRLA);
333                 rmb(); /* ensure CTRLA is read before DSCR */
334                 dscr = channel_readl(atchan, DSCR);
335 
336                 /* for the first descriptor we can be more accurate */
337                 if (desc_first->lli.dscr == dscr)
338                         return atc_calc_bytes_left(ret, ctrla, desc_first);
339 
340                 ret -= desc_first->len;
341                 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
342                         if (desc->lli.dscr == dscr)
343                                 break;
344 
345                         ret -= desc->len;
346                 }
347 
348                 /*
349                  * For the last descriptor in the chain we can calculate
350                  * the remaining bytes using the channel's register.
351                  * Note that the transfer width of the first and last
352                  * descriptor may differ.
353                  */
354                 if (!desc->lli.dscr)
355                         ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
356         } else {
357                 /* single transfer */
358                 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
359         }
360 
361         return ret;
362 }
363 
364 /**
365  * atc_chain_complete - finish work for one transaction chain
366  * @atchan: channel we work on
367  * @desc: descriptor at the head of the chain we want do complete
368  *
369  * Called with atchan->lock held and bh disabled */
370 static void
371 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
372 {
373         struct dma_async_tx_descriptor  *txd = &desc->txd;
374 
375         dev_vdbg(chan2dev(&atchan->chan_common),
376                 "descriptor %u complete\n", txd->cookie);
377 
378         /* mark the descriptor as complete for non cyclic cases only */
379         if (!atc_chan_is_cyclic(atchan))
380                 dma_cookie_complete(txd);
381 
382         /* move children to free_list */
383         list_splice_init(&desc->tx_list, &atchan->free_list);
384         /* move myself to free_list */
385         list_move(&desc->desc_node, &atchan->free_list);
386 
387         dma_descriptor_unmap(txd);
388         /* for cyclic transfers,
389          * no need to replay callback function while stopping */
390         if (!atc_chan_is_cyclic(atchan)) {
391                 dma_async_tx_callback   callback = txd->callback;
392                 void                    *param = txd->callback_param;
393 
394                 /*
395                  * The API requires that no submissions are done from a
396                  * callback, so we don't need to drop the lock here
397                  */
398                 if (callback)
399                         callback(param);
400         }
401 
402         dma_run_dependencies(txd);
403 }
404 
405 /**
406  * atc_complete_all - finish work for all transactions
407  * @atchan: channel to complete transactions for
408  *
409  * Eventually submit queued descriptors if any
410  *
411  * Assume channel is idle while calling this function
412  * Called with atchan->lock held and bh disabled
413  */
414 static void atc_complete_all(struct at_dma_chan *atchan)
415 {
416         struct at_desc *desc, *_desc;
417         LIST_HEAD(list);
418 
419         dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
420 
421         /*
422          * Submit queued descriptors ASAP, i.e. before we go through
423          * the completed ones.
424          */
425         if (!list_empty(&atchan->queue))
426                 atc_dostart(atchan, atc_first_queued(atchan));
427         /* empty active_list now it is completed */
428         list_splice_init(&atchan->active_list, &list);
429         /* empty queue list by moving descriptors (if any) to active_list */
430         list_splice_init(&atchan->queue, &atchan->active_list);
431 
432         list_for_each_entry_safe(desc, _desc, &list, desc_node)
433                 atc_chain_complete(atchan, desc);
434 }
435 
436 /**
437  * atc_advance_work - at the end of a transaction, move forward
438  * @atchan: channel where the transaction ended
439  *
440  * Called with atchan->lock held and bh disabled
441  */
442 static void atc_advance_work(struct at_dma_chan *atchan)
443 {
444         dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
445 
446         if (atc_chan_is_enabled(atchan))
447                 return;
448 
449         if (list_empty(&atchan->active_list) ||
450             list_is_singular(&atchan->active_list)) {
451                 atc_complete_all(atchan);
452         } else {
453                 atc_chain_complete(atchan, atc_first_active(atchan));
454                 /* advance work */
455                 atc_dostart(atchan, atc_first_active(atchan));
456         }
457 }
458 
459 
460 /**
461  * atc_handle_error - handle errors reported by DMA controller
462  * @atchan: channel where error occurs
463  *
464  * Called with atchan->lock held and bh disabled
465  */
466 static void atc_handle_error(struct at_dma_chan *atchan)
467 {
468         struct at_desc *bad_desc;
469         struct at_desc *child;
470 
471         /*
472          * The descriptor currently at the head of the active list is
473          * broked. Since we don't have any way to report errors, we'll
474          * just have to scream loudly and try to carry on.
475          */
476         bad_desc = atc_first_active(atchan);
477         list_del_init(&bad_desc->desc_node);
478 
479         /* As we are stopped, take advantage to push queued descriptors
480          * in active_list */
481         list_splice_init(&atchan->queue, atchan->active_list.prev);
482 
483         /* Try to restart the controller */
484         if (!list_empty(&atchan->active_list))
485                 atc_dostart(atchan, atc_first_active(atchan));
486 
487         /*
488          * KERN_CRITICAL may seem harsh, but since this only happens
489          * when someone submits a bad physical address in a
490          * descriptor, we should consider ourselves lucky that the
491          * controller flagged an error instead of scribbling over
492          * random memory locations.
493          */
494         dev_crit(chan2dev(&atchan->chan_common),
495                         "Bad descriptor submitted for DMA!\n");
496         dev_crit(chan2dev(&atchan->chan_common),
497                         "  cookie: %d\n", bad_desc->txd.cookie);
498         atc_dump_lli(atchan, &bad_desc->lli);
499         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
500                 atc_dump_lli(atchan, &child->lli);
501 
502         /* Pretend the descriptor completed successfully */
503         atc_chain_complete(atchan, bad_desc);
504 }
505 
506 /**
507  * atc_handle_cyclic - at the end of a period, run callback function
508  * @atchan: channel used for cyclic operations
509  *
510  * Called with atchan->lock held and bh disabled
511  */
512 static void atc_handle_cyclic(struct at_dma_chan *atchan)
513 {
514         struct at_desc                  *first = atc_first_active(atchan);
515         struct dma_async_tx_descriptor  *txd = &first->txd;
516         dma_async_tx_callback           callback = txd->callback;
517         void                            *param = txd->callback_param;
518 
519         dev_vdbg(chan2dev(&atchan->chan_common),
520                         "new cyclic period llp 0x%08x\n",
521                         channel_readl(atchan, DSCR));
522 
523         if (callback)
524                 callback(param);
525 }
526 
527 /*--  IRQ & Tasklet  ---------------------------------------------------*/
528 
529 static void atc_tasklet(unsigned long data)
530 {
531         struct at_dma_chan *atchan = (struct at_dma_chan *)data;
532         unsigned long flags;
533 
534         spin_lock_irqsave(&atchan->lock, flags);
535         if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
536                 atc_handle_error(atchan);
537         else if (atc_chan_is_cyclic(atchan))
538                 atc_handle_cyclic(atchan);
539         else
540                 atc_advance_work(atchan);
541 
542         spin_unlock_irqrestore(&atchan->lock, flags);
543 }
544 
545 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
546 {
547         struct at_dma           *atdma = (struct at_dma *)dev_id;
548         struct at_dma_chan      *atchan;
549         int                     i;
550         u32                     status, pending, imr;
551         int                     ret = IRQ_NONE;
552 
553         do {
554                 imr = dma_readl(atdma, EBCIMR);
555                 status = dma_readl(atdma, EBCISR);
556                 pending = status & imr;
557 
558                 if (!pending)
559                         break;
560 
561                 dev_vdbg(atdma->dma_common.dev,
562                         "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
563                          status, imr, pending);
564 
565                 for (i = 0; i < atdma->dma_common.chancnt; i++) {
566                         atchan = &atdma->chan[i];
567                         if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
568                                 if (pending & AT_DMA_ERR(i)) {
569                                         /* Disable channel on AHB error */
570                                         dma_writel(atdma, CHDR,
571                                                 AT_DMA_RES(i) | atchan->mask);
572                                         /* Give information to tasklet */
573                                         set_bit(ATC_IS_ERROR, &atchan->status);
574                                 }
575                                 tasklet_schedule(&atchan->tasklet);
576                                 ret = IRQ_HANDLED;
577                         }
578                 }
579 
580         } while (pending);
581 
582         return ret;
583 }
584 
585 
586 /*--  DMA Engine API  --------------------------------------------------*/
587 
588 /**
589  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
590  * @desc: descriptor at the head of the transaction chain
591  *
592  * Queue chain if DMA engine is working already
593  *
594  * Cookie increment and adding to active_list or queue must be atomic
595  */
596 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
597 {
598         struct at_desc          *desc = txd_to_at_desc(tx);
599         struct at_dma_chan      *atchan = to_at_dma_chan(tx->chan);
600         dma_cookie_t            cookie;
601         unsigned long           flags;
602 
603         spin_lock_irqsave(&atchan->lock, flags);
604         cookie = dma_cookie_assign(tx);
605 
606         if (list_empty(&atchan->active_list)) {
607                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
608                                 desc->txd.cookie);
609                 atc_dostart(atchan, desc);
610                 list_add_tail(&desc->desc_node, &atchan->active_list);
611         } else {
612                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
613                                 desc->txd.cookie);
614                 list_add_tail(&desc->desc_node, &atchan->queue);
615         }
616 
617         spin_unlock_irqrestore(&atchan->lock, flags);
618 
619         return cookie;
620 }
621 
622 /**
623  * atc_prep_dma_memcpy - prepare a memcpy operation
624  * @chan: the channel to prepare operation on
625  * @dest: operation virtual destination address
626  * @src: operation virtual source address
627  * @len: operation length
628  * @flags: tx descriptor status flags
629  */
630 static struct dma_async_tx_descriptor *
631 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
632                 size_t len, unsigned long flags)
633 {
634         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
635         struct at_desc          *desc = NULL;
636         struct at_desc          *first = NULL;
637         struct at_desc          *prev = NULL;
638         size_t                  xfer_count;
639         size_t                  offset;
640         unsigned int            src_width;
641         unsigned int            dst_width;
642         u32                     ctrla;
643         u32                     ctrlb;
644 
645         dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
646                         dest, src, len, flags);
647 
648         if (unlikely(!len)) {
649                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
650                 return NULL;
651         }
652 
653         ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
654                 | ATC_SRC_ADDR_MODE_INCR
655                 | ATC_DST_ADDR_MODE_INCR
656                 | ATC_FC_MEM2MEM;
657 
658         /*
659          * We can be a lot more clever here, but this should take care
660          * of the most common optimization.
661          */
662         if (!((src | dest  | len) & 3)) {
663                 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
664                 src_width = dst_width = 2;
665         } else if (!((src | dest | len) & 1)) {
666                 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
667                 src_width = dst_width = 1;
668         } else {
669                 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
670                 src_width = dst_width = 0;
671         }
672 
673         for (offset = 0; offset < len; offset += xfer_count << src_width) {
674                 xfer_count = min_t(size_t, (len - offset) >> src_width,
675                                 ATC_BTSIZE_MAX);
676 
677                 desc = atc_desc_get(atchan);
678                 if (!desc)
679                         goto err_desc_get;
680 
681                 desc->lli.saddr = src + offset;
682                 desc->lli.daddr = dest + offset;
683                 desc->lli.ctrla = ctrla | xfer_count;
684                 desc->lli.ctrlb = ctrlb;
685 
686                 desc->txd.cookie = 0;
687                 desc->len = xfer_count << src_width;
688 
689                 atc_desc_chain(&first, &prev, desc);
690         }
691 
692         /* First descriptor of the chain embedds additional information */
693         first->txd.cookie = -EBUSY;
694         first->total_len = len;
695 
696         /* set transfer width for the calculation of the residue */
697         first->tx_width = src_width;
698         prev->tx_width = src_width;
699 
700         /* set end-of-link to the last link descriptor of list*/
701         set_desc_eol(desc);
702 
703         first->txd.flags = flags; /* client is in control of this ack */
704 
705         return &first->txd;
706 
707 err_desc_get:
708         atc_desc_put(atchan, first);
709         return NULL;
710 }
711 
712 
713 /**
714  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
715  * @chan: DMA channel
716  * @sgl: scatterlist to transfer to/from
717  * @sg_len: number of entries in @scatterlist
718  * @direction: DMA direction
719  * @flags: tx descriptor status flags
720  * @context: transaction context (ignored)
721  */
722 static struct dma_async_tx_descriptor *
723 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
724                 unsigned int sg_len, enum dma_transfer_direction direction,
725                 unsigned long flags, void *context)
726 {
727         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
728         struct at_dma_slave     *atslave = chan->private;
729         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
730         struct at_desc          *first = NULL;
731         struct at_desc          *prev = NULL;
732         u32                     ctrla;
733         u32                     ctrlb;
734         dma_addr_t              reg;
735         unsigned int            reg_width;
736         unsigned int            mem_width;
737         unsigned int            i;
738         struct scatterlist      *sg;
739         size_t                  total_len = 0;
740 
741         dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
742                         sg_len,
743                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
744                         flags);
745 
746         if (unlikely(!atslave || !sg_len)) {
747                 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
748                 return NULL;
749         }
750 
751         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
752                 | ATC_DCSIZE(sconfig->dst_maxburst);
753         ctrlb = ATC_IEN;
754 
755         switch (direction) {
756         case DMA_MEM_TO_DEV:
757                 reg_width = convert_buswidth(sconfig->dst_addr_width);
758                 ctrla |=  ATC_DST_WIDTH(reg_width);
759                 ctrlb |=  ATC_DST_ADDR_MODE_FIXED
760                         | ATC_SRC_ADDR_MODE_INCR
761                         | ATC_FC_MEM2PER
762                         | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
763                 reg = sconfig->dst_addr;
764                 for_each_sg(sgl, sg, sg_len, i) {
765                         struct at_desc  *desc;
766                         u32             len;
767                         u32             mem;
768 
769                         desc = atc_desc_get(atchan);
770                         if (!desc)
771                                 goto err_desc_get;
772 
773                         mem = sg_dma_address(sg);
774                         len = sg_dma_len(sg);
775                         if (unlikely(!len)) {
776                                 dev_dbg(chan2dev(chan),
777                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
778                                 goto err;
779                         }
780                         mem_width = 2;
781                         if (unlikely(mem & 3 || len & 3))
782                                 mem_width = 0;
783 
784                         desc->lli.saddr = mem;
785                         desc->lli.daddr = reg;
786                         desc->lli.ctrla = ctrla
787                                         | ATC_SRC_WIDTH(mem_width)
788                                         | len >> mem_width;
789                         desc->lli.ctrlb = ctrlb;
790                         desc->len = len;
791 
792                         atc_desc_chain(&first, &prev, desc);
793                         total_len += len;
794                 }
795                 break;
796         case DMA_DEV_TO_MEM:
797                 reg_width = convert_buswidth(sconfig->src_addr_width);
798                 ctrla |=  ATC_SRC_WIDTH(reg_width);
799                 ctrlb |=  ATC_DST_ADDR_MODE_INCR
800                         | ATC_SRC_ADDR_MODE_FIXED
801                         | ATC_FC_PER2MEM
802                         | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
803 
804                 reg = sconfig->src_addr;
805                 for_each_sg(sgl, sg, sg_len, i) {
806                         struct at_desc  *desc;
807                         u32             len;
808                         u32             mem;
809 
810                         desc = atc_desc_get(atchan);
811                         if (!desc)
812                                 goto err_desc_get;
813 
814                         mem = sg_dma_address(sg);
815                         len = sg_dma_len(sg);
816                         if (unlikely(!len)) {
817                                 dev_dbg(chan2dev(chan),
818                                         "prep_slave_sg: sg(%d) data length is zero\n", i);
819                                 goto err;
820                         }
821                         mem_width = 2;
822                         if (unlikely(mem & 3 || len & 3))
823                                 mem_width = 0;
824 
825                         desc->lli.saddr = reg;
826                         desc->lli.daddr = mem;
827                         desc->lli.ctrla = ctrla
828                                         | ATC_DST_WIDTH(mem_width)
829                                         | len >> reg_width;
830                         desc->lli.ctrlb = ctrlb;
831                         desc->len = len;
832 
833                         atc_desc_chain(&first, &prev, desc);
834                         total_len += len;
835                 }
836                 break;
837         default:
838                 return NULL;
839         }
840 
841         /* set end-of-link to the last link descriptor of list*/
842         set_desc_eol(prev);
843 
844         /* First descriptor of the chain embedds additional information */
845         first->txd.cookie = -EBUSY;
846         first->total_len = total_len;
847 
848         /* set transfer width for the calculation of the residue */
849         first->tx_width = reg_width;
850         prev->tx_width = reg_width;
851 
852         /* first link descriptor of list is responsible of flags */
853         first->txd.flags = flags; /* client is in control of this ack */
854 
855         return &first->txd;
856 
857 err_desc_get:
858         dev_err(chan2dev(chan), "not enough descriptors available\n");
859 err:
860         atc_desc_put(atchan, first);
861         return NULL;
862 }
863 
864 /**
865  * atc_dma_cyclic_check_values
866  * Check for too big/unaligned periods and unaligned DMA buffer
867  */
868 static int
869 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
870                 size_t period_len)
871 {
872         if (period_len > (ATC_BTSIZE_MAX << reg_width))
873                 goto err_out;
874         if (unlikely(period_len & ((1 << reg_width) - 1)))
875                 goto err_out;
876         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
877                 goto err_out;
878 
879         return 0;
880 
881 err_out:
882         return -EINVAL;
883 }
884 
885 /**
886  * atc_dma_cyclic_fill_desc - Fill one period descriptor
887  */
888 static int
889 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
890                 unsigned int period_index, dma_addr_t buf_addr,
891                 unsigned int reg_width, size_t period_len,
892                 enum dma_transfer_direction direction)
893 {
894         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
895         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
896         u32                     ctrla;
897 
898         /* prepare common CRTLA value */
899         ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
900                 | ATC_DCSIZE(sconfig->dst_maxburst)
901                 | ATC_DST_WIDTH(reg_width)
902                 | ATC_SRC_WIDTH(reg_width)
903                 | period_len >> reg_width;
904 
905         switch (direction) {
906         case DMA_MEM_TO_DEV:
907                 desc->lli.saddr = buf_addr + (period_len * period_index);
908                 desc->lli.daddr = sconfig->dst_addr;
909                 desc->lli.ctrla = ctrla;
910                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
911                                 | ATC_SRC_ADDR_MODE_INCR
912                                 | ATC_FC_MEM2PER
913                                 | ATC_SIF(atchan->mem_if)
914                                 | ATC_DIF(atchan->per_if);
915                 desc->len = period_len;
916                 break;
917 
918         case DMA_DEV_TO_MEM:
919                 desc->lli.saddr = sconfig->src_addr;
920                 desc->lli.daddr = buf_addr + (period_len * period_index);
921                 desc->lli.ctrla = ctrla;
922                 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
923                                 | ATC_SRC_ADDR_MODE_FIXED
924                                 | ATC_FC_PER2MEM
925                                 | ATC_SIF(atchan->per_if)
926                                 | ATC_DIF(atchan->mem_if);
927                 desc->len = period_len;
928                 break;
929 
930         default:
931                 return -EINVAL;
932         }
933 
934         return 0;
935 }
936 
937 /**
938  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
939  * @chan: the DMA channel to prepare
940  * @buf_addr: physical DMA address where the buffer starts
941  * @buf_len: total number of bytes for the entire buffer
942  * @period_len: number of bytes for each period
943  * @direction: transfer direction, to or from device
944  * @flags: tx descriptor status flags
945  */
946 static struct dma_async_tx_descriptor *
947 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
948                 size_t period_len, enum dma_transfer_direction direction,
949                 unsigned long flags)
950 {
951         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
952         struct at_dma_slave     *atslave = chan->private;
953         struct dma_slave_config *sconfig = &atchan->dma_sconfig;
954         struct at_desc          *first = NULL;
955         struct at_desc          *prev = NULL;
956         unsigned long           was_cyclic;
957         unsigned int            reg_width;
958         unsigned int            periods = buf_len / period_len;
959         unsigned int            i;
960 
961         dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
962                         direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
963                         buf_addr,
964                         periods, buf_len, period_len);
965 
966         if (unlikely(!atslave || !buf_len || !period_len)) {
967                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
968                 return NULL;
969         }
970 
971         was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
972         if (was_cyclic) {
973                 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
974                 return NULL;
975         }
976 
977         if (unlikely(!is_slave_direction(direction)))
978                 goto err_out;
979 
980         if (sconfig->direction == DMA_MEM_TO_DEV)
981                 reg_width = convert_buswidth(sconfig->dst_addr_width);
982         else
983                 reg_width = convert_buswidth(sconfig->src_addr_width);
984 
985         /* Check for too big/unaligned periods and unaligned DMA buffer */
986         if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
987                 goto err_out;
988 
989         /* build cyclic linked list */
990         for (i = 0; i < periods; i++) {
991                 struct at_desc  *desc;
992 
993                 desc = atc_desc_get(atchan);
994                 if (!desc)
995                         goto err_desc_get;
996 
997                 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
998                                              reg_width, period_len, direction))
999                         goto err_desc_get;
1000 
1001                 atc_desc_chain(&first, &prev, desc);
1002         }
1003 
1004         /* lets make a cyclic list */
1005         prev->lli.dscr = first->txd.phys;
1006 
1007         /* First descriptor of the chain embedds additional information */
1008         first->txd.cookie = -EBUSY;
1009         first->total_len = buf_len;
1010         first->tx_width = reg_width;
1011 
1012         return &first->txd;
1013 
1014 err_desc_get:
1015         dev_err(chan2dev(chan), "not enough descriptors available\n");
1016         atc_desc_put(atchan, first);
1017 err_out:
1018         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1019         return NULL;
1020 }
1021 
1022 static int atc_config(struct dma_chan *chan,
1023                       struct dma_slave_config *sconfig)
1024 {
1025         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1026 
1027         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1028 
1029         /* Check if it is chan is configured for slave transfers */
1030         if (!chan->private)
1031                 return -EINVAL;
1032 
1033         memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1034 
1035         convert_burst(&atchan->dma_sconfig.src_maxburst);
1036         convert_burst(&atchan->dma_sconfig.dst_maxburst);
1037 
1038         return 0;
1039 }
1040 
1041 static int atc_pause(struct dma_chan *chan)
1042 {
1043         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1044         struct at_dma           *atdma = to_at_dma(chan->device);
1045         int                     chan_id = atchan->chan_common.chan_id;
1046         unsigned long           flags;
1047 
1048         LIST_HEAD(list);
1049 
1050         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1051 
1052         spin_lock_irqsave(&atchan->lock, flags);
1053 
1054         dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1055         set_bit(ATC_IS_PAUSED, &atchan->status);
1056 
1057         spin_unlock_irqrestore(&atchan->lock, flags);
1058 
1059         return 0;
1060 }
1061 
1062 static int atc_resume(struct dma_chan *chan)
1063 {
1064         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1065         struct at_dma           *atdma = to_at_dma(chan->device);
1066         int                     chan_id = atchan->chan_common.chan_id;
1067         unsigned long           flags;
1068 
1069         LIST_HEAD(list);
1070 
1071         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1072 
1073         if (!atc_chan_is_paused(atchan))
1074                 return 0;
1075 
1076         spin_lock_irqsave(&atchan->lock, flags);
1077 
1078         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1079         clear_bit(ATC_IS_PAUSED, &atchan->status);
1080 
1081         spin_unlock_irqrestore(&atchan->lock, flags);
1082 
1083         return 0;
1084 }
1085 
1086 static int atc_terminate_all(struct dma_chan *chan)
1087 {
1088         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1089         struct at_dma           *atdma = to_at_dma(chan->device);
1090         int                     chan_id = atchan->chan_common.chan_id;
1091         struct at_desc          *desc, *_desc;
1092         unsigned long           flags;
1093 
1094         LIST_HEAD(list);
1095 
1096         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1097 
1098         /*
1099          * This is only called when something went wrong elsewhere, so
1100          * we don't really care about the data. Just disable the
1101          * channel. We still have to poll the channel enable bit due
1102          * to AHB/HSB limitations.
1103          */
1104         spin_lock_irqsave(&atchan->lock, flags);
1105 
1106         /* disabling channel: must also remove suspend state */
1107         dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1108 
1109         /* confirm that this channel is disabled */
1110         while (dma_readl(atdma, CHSR) & atchan->mask)
1111                 cpu_relax();
1112 
1113         /* active_list entries will end up before queued entries */
1114         list_splice_init(&atchan->queue, &list);
1115         list_splice_init(&atchan->active_list, &list);
1116 
1117         /* Flush all pending and queued descriptors */
1118         list_for_each_entry_safe(desc, _desc, &list, desc_node)
1119                 atc_chain_complete(atchan, desc);
1120 
1121         clear_bit(ATC_IS_PAUSED, &atchan->status);
1122         /* if channel dedicated to cyclic operations, free it */
1123         clear_bit(ATC_IS_CYCLIC, &atchan->status);
1124 
1125         spin_unlock_irqrestore(&atchan->lock, flags);
1126 
1127         return 0;
1128 }
1129 
1130 /**
1131  * atc_tx_status - poll for transaction completion
1132  * @chan: DMA channel
1133  * @cookie: transaction identifier to check status of
1134  * @txstate: if not %NULL updated with transaction state
1135  *
1136  * If @txstate is passed in, upon return it reflect the driver
1137  * internal state and can be used with dma_async_is_complete() to check
1138  * the status of multiple cookies without re-checking hardware state.
1139  */
1140 static enum dma_status
1141 atc_tx_status(struct dma_chan *chan,
1142                 dma_cookie_t cookie,
1143                 struct dma_tx_state *txstate)
1144 {
1145         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1146         unsigned long           flags;
1147         enum dma_status         ret;
1148         int bytes = 0;
1149 
1150         ret = dma_cookie_status(chan, cookie, txstate);
1151         if (ret == DMA_COMPLETE)
1152                 return ret;
1153         /*
1154          * There's no point calculating the residue if there's
1155          * no txstate to store the value.
1156          */
1157         if (!txstate)
1158                 return DMA_ERROR;
1159 
1160         spin_lock_irqsave(&atchan->lock, flags);
1161 
1162         /*  Get number of bytes left in the active transactions */
1163         bytes = atc_get_bytes_left(chan, cookie);
1164 
1165         spin_unlock_irqrestore(&atchan->lock, flags);
1166 
1167         if (unlikely(bytes < 0)) {
1168                 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1169                 return DMA_ERROR;
1170         } else {
1171                 dma_set_residue(txstate, bytes);
1172         }
1173 
1174         dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1175                  ret, cookie, bytes);
1176 
1177         return ret;
1178 }
1179 
1180 /**
1181  * atc_issue_pending - try to finish work
1182  * @chan: target DMA channel
1183  */
1184 static void atc_issue_pending(struct dma_chan *chan)
1185 {
1186         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1187         unsigned long           flags;
1188 
1189         dev_vdbg(chan2dev(chan), "issue_pending\n");
1190 
1191         /* Not needed for cyclic transfers */
1192         if (atc_chan_is_cyclic(atchan))
1193                 return;
1194 
1195         spin_lock_irqsave(&atchan->lock, flags);
1196         atc_advance_work(atchan);
1197         spin_unlock_irqrestore(&atchan->lock, flags);
1198 }
1199 
1200 /**
1201  * atc_alloc_chan_resources - allocate resources for DMA channel
1202  * @chan: allocate descriptor resources for this channel
1203  * @client: current client requesting the channel be ready for requests
1204  *
1205  * return - the number of allocated descriptors
1206  */
1207 static int atc_alloc_chan_resources(struct dma_chan *chan)
1208 {
1209         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1210         struct at_dma           *atdma = to_at_dma(chan->device);
1211         struct at_desc          *desc;
1212         struct at_dma_slave     *atslave;
1213         unsigned long           flags;
1214         int                     i;
1215         u32                     cfg;
1216         LIST_HEAD(tmp_list);
1217 
1218         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1219 
1220         /* ASSERT:  channel is idle */
1221         if (atc_chan_is_enabled(atchan)) {
1222                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1223                 return -EIO;
1224         }
1225 
1226         cfg = ATC_DEFAULT_CFG;
1227 
1228         atslave = chan->private;
1229         if (atslave) {
1230                 /*
1231                  * We need controller-specific data to set up slave
1232                  * transfers.
1233                  */
1234                 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1235 
1236                 /* if cfg configuration specified take it instead of default */
1237                 if (atslave->cfg)
1238                         cfg = atslave->cfg;
1239         }
1240 
1241         /* have we already been set up?
1242          * reconfigure channel but no need to reallocate descriptors */
1243         if (!list_empty(&atchan->free_list))
1244                 return atchan->descs_allocated;
1245 
1246         /* Allocate initial pool of descriptors */
1247         for (i = 0; i < init_nr_desc_per_channel; i++) {
1248                 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1249                 if (!desc) {
1250                         dev_err(atdma->dma_common.dev,
1251                                 "Only %d initial descriptors\n", i);
1252                         break;
1253                 }
1254                 list_add_tail(&desc->desc_node, &tmp_list);
1255         }
1256 
1257         spin_lock_irqsave(&atchan->lock, flags);
1258         atchan->descs_allocated = i;
1259         list_splice(&tmp_list, &atchan->free_list);
1260         dma_cookie_init(chan);
1261         spin_unlock_irqrestore(&atchan->lock, flags);
1262 
1263         /* channel parameters */
1264         channel_writel(atchan, CFG, cfg);
1265 
1266         dev_dbg(chan2dev(chan),
1267                 "alloc_chan_resources: allocated %d descriptors\n",
1268                 atchan->descs_allocated);
1269 
1270         return atchan->descs_allocated;
1271 }
1272 
1273 /**
1274  * atc_free_chan_resources - free all channel resources
1275  * @chan: DMA channel
1276  */
1277 static void atc_free_chan_resources(struct dma_chan *chan)
1278 {
1279         struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1280         struct at_dma           *atdma = to_at_dma(chan->device);
1281         struct at_desc          *desc, *_desc;
1282         LIST_HEAD(list);
1283 
1284         dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1285                 atchan->descs_allocated);
1286 
1287         /* ASSERT:  channel is idle */
1288         BUG_ON(!list_empty(&atchan->active_list));
1289         BUG_ON(!list_empty(&atchan->queue));
1290         BUG_ON(atc_chan_is_enabled(atchan));
1291 
1292         list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1293                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1294                 list_del(&desc->desc_node);
1295                 /* free link descriptor */
1296                 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1297         }
1298         list_splice_init(&atchan->free_list, &list);
1299         atchan->descs_allocated = 0;
1300         atchan->status = 0;
1301 
1302         dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1303 }
1304 
1305 #ifdef CONFIG_OF
1306 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1307 {
1308         struct at_dma_slave *atslave = slave;
1309 
1310         if (atslave->dma_dev == chan->device->dev) {
1311                 chan->private = atslave;
1312                 return true;
1313         } else {
1314                 return false;
1315         }
1316 }
1317 
1318 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1319                                      struct of_dma *of_dma)
1320 {
1321         struct dma_chan *chan;
1322         struct at_dma_chan *atchan;
1323         struct at_dma_slave *atslave;
1324         dma_cap_mask_t mask;
1325         unsigned int per_id;
1326         struct platform_device *dmac_pdev;
1327 
1328         if (dma_spec->args_count != 2)
1329                 return NULL;
1330 
1331         dmac_pdev = of_find_device_by_node(dma_spec->np);
1332 
1333         dma_cap_zero(mask);
1334         dma_cap_set(DMA_SLAVE, mask);
1335 
1336         atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1337         if (!atslave)
1338                 return NULL;
1339 
1340         atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1341         /*
1342          * We can fill both SRC_PER and DST_PER, one of these fields will be
1343          * ignored depending on DMA transfer direction.
1344          */
1345         per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1346         atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1347                      | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1348         /*
1349          * We have to translate the value we get from the device tree since
1350          * the half FIFO configuration value had to be 0 to keep backward
1351          * compatibility.
1352          */
1353         switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1354         case AT91_DMA_CFG_FIFOCFG_ALAP:
1355                 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1356                 break;
1357         case AT91_DMA_CFG_FIFOCFG_ASAP:
1358                 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1359                 break;
1360         case AT91_DMA_CFG_FIFOCFG_HALF:
1361         default:
1362                 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1363         }
1364         atslave->dma_dev = &dmac_pdev->dev;
1365 
1366         chan = dma_request_channel(mask, at_dma_filter, atslave);
1367         if (!chan)
1368                 return NULL;
1369 
1370         atchan = to_at_dma_chan(chan);
1371         atchan->per_if = dma_spec->args[0] & 0xff;
1372         atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1373 
1374         return chan;
1375 }
1376 #else
1377 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1378                                      struct of_dma *of_dma)
1379 {
1380         return NULL;
1381 }
1382 #endif
1383 
1384 /*--  Module Management  -----------------------------------------------*/
1385 
1386 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1387 static struct at_dma_platform_data at91sam9rl_config = {
1388         .nr_channels = 2,
1389 };
1390 static struct at_dma_platform_data at91sam9g45_config = {
1391         .nr_channels = 8,
1392 };
1393 
1394 #if defined(CONFIG_OF)
1395 static const struct of_device_id atmel_dma_dt_ids[] = {
1396         {
1397                 .compatible = "atmel,at91sam9rl-dma",
1398                 .data = &at91sam9rl_config,
1399         }, {
1400                 .compatible = "atmel,at91sam9g45-dma",
1401                 .data = &at91sam9g45_config,
1402         }, {
1403                 /* sentinel */
1404         }
1405 };
1406 
1407 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1408 #endif
1409 
1410 static const struct platform_device_id atdma_devtypes[] = {
1411         {
1412                 .name = "at91sam9rl_dma",
1413                 .driver_data = (unsigned long) &at91sam9rl_config,
1414         }, {
1415                 .name = "at91sam9g45_dma",
1416                 .driver_data = (unsigned long) &at91sam9g45_config,
1417         }, {
1418                 /* sentinel */
1419         }
1420 };
1421 
1422 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1423                                                 struct platform_device *pdev)
1424 {
1425         if (pdev->dev.of_node) {
1426                 const struct of_device_id *match;
1427                 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1428                 if (match == NULL)
1429                         return NULL;
1430                 return match->data;
1431         }
1432         return (struct at_dma_platform_data *)
1433                         platform_get_device_id(pdev)->driver_data;
1434 }
1435 
1436 /**
1437  * at_dma_off - disable DMA controller
1438  * @atdma: the Atmel HDAMC device
1439  */
1440 static void at_dma_off(struct at_dma *atdma)
1441 {
1442         dma_writel(atdma, EN, 0);
1443 
1444         /* disable all interrupts */
1445         dma_writel(atdma, EBCIDR, -1L);
1446 
1447         /* confirm that all channels are disabled */
1448         while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1449                 cpu_relax();
1450 }
1451 
1452 static int __init at_dma_probe(struct platform_device *pdev)
1453 {
1454         struct resource         *io;
1455         struct at_dma           *atdma;
1456         size_t                  size;
1457         int                     irq;
1458         int                     err;
1459         int                     i;
1460         const struct at_dma_platform_data *plat_dat;
1461 
1462         /* setup platform data for each SoC */
1463         dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1464         dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1465         dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1466 
1467         /* get DMA parameters from controller type */
1468         plat_dat = at_dma_get_driver_data(pdev);
1469         if (!plat_dat)
1470                 return -ENODEV;
1471 
1472         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1473         if (!io)
1474                 return -EINVAL;
1475 
1476         irq = platform_get_irq(pdev, 0);
1477         if (irq < 0)
1478                 return irq;
1479 
1480         size = sizeof(struct at_dma);
1481         size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1482         atdma = kzalloc(size, GFP_KERNEL);
1483         if (!atdma)
1484                 return -ENOMEM;
1485 
1486         /* discover transaction capabilities */
1487         atdma->dma_common.cap_mask = plat_dat->cap_mask;
1488         atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1489 
1490         size = resource_size(io);
1491         if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1492                 err = -EBUSY;
1493                 goto err_kfree;
1494         }
1495 
1496         atdma->regs = ioremap(io->start, size);
1497         if (!atdma->regs) {
1498                 err = -ENOMEM;
1499                 goto err_release_r;
1500         }
1501 
1502         atdma->clk = clk_get(&pdev->dev, "dma_clk");
1503         if (IS_ERR(atdma->clk)) {
1504                 err = PTR_ERR(atdma->clk);
1505                 goto err_clk;
1506         }
1507         err = clk_prepare_enable(atdma->clk);
1508         if (err)
1509                 goto err_clk_prepare;
1510 
1511         /* force dma off, just in case */
1512         at_dma_off(atdma);
1513 
1514         err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1515         if (err)
1516                 goto err_irq;
1517 
1518         platform_set_drvdata(pdev, atdma);
1519 
1520         /* create a pool of consistent memory blocks for hardware descriptors */
1521         atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1522                         &pdev->dev, sizeof(struct at_desc),
1523                         4 /* word alignment */, 0);
1524         if (!atdma->dma_desc_pool) {
1525                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1526                 err = -ENOMEM;
1527                 goto err_pool_create;
1528         }
1529 
1530         /* clear any pending interrupt */
1531         while (dma_readl(atdma, EBCISR))
1532                 cpu_relax();
1533 
1534         /* initialize channels related values */
1535         INIT_LIST_HEAD(&atdma->dma_common.channels);
1536         for (i = 0; i < plat_dat->nr_channels; i++) {
1537                 struct at_dma_chan      *atchan = &atdma->chan[i];
1538 
1539                 atchan->mem_if = AT_DMA_MEM_IF;
1540                 atchan->per_if = AT_DMA_PER_IF;
1541                 atchan->chan_common.device = &atdma->dma_common;
1542                 dma_cookie_init(&atchan->chan_common);
1543                 list_add_tail(&atchan->chan_common.device_node,
1544                                 &atdma->dma_common.channels);
1545 
1546                 atchan->ch_regs = atdma->regs + ch_regs(i);
1547                 spin_lock_init(&atchan->lock);
1548                 atchan->mask = 1 << i;
1549 
1550                 INIT_LIST_HEAD(&atchan->active_list);
1551                 INIT_LIST_HEAD(&atchan->queue);
1552                 INIT_LIST_HEAD(&atchan->free_list);
1553 
1554                 tasklet_init(&atchan->tasklet, atc_tasklet,
1555                                 (unsigned long)atchan);
1556                 atc_enable_chan_irq(atdma, i);
1557         }
1558 
1559         /* set base routines */
1560         atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1561         atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1562         atdma->dma_common.device_tx_status = atc_tx_status;
1563         atdma->dma_common.device_issue_pending = atc_issue_pending;
1564         atdma->dma_common.dev = &pdev->dev;
1565 
1566         /* set prep routines based on capability */
1567         if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1568                 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1569 
1570         if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1571                 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1572                 /* controller can do slave DMA: can trigger cyclic transfers */
1573                 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1574                 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1575                 atdma->dma_common.device_config = atc_config;
1576                 atdma->dma_common.device_pause = atc_pause;
1577                 atdma->dma_common.device_resume = atc_resume;
1578                 atdma->dma_common.device_terminate_all = atc_terminate_all;
1579                 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1580                 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1581                 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1582                 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1583         }
1584 
1585         dma_writel(atdma, EN, AT_DMA_ENABLE);
1586 
1587         dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1588           dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1589           dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1590           plat_dat->nr_channels);
1591 
1592         dma_async_device_register(&atdma->dma_common);
1593 
1594         /*
1595          * Do not return an error if the dmac node is not present in order to
1596          * not break the existing way of requesting channel with
1597          * dma_request_channel().
1598          */
1599         if (pdev->dev.of_node) {
1600                 err = of_dma_controller_register(pdev->dev.of_node,
1601                                                  at_dma_xlate, atdma);
1602                 if (err) {
1603                         dev_err(&pdev->dev, "could not register of_dma_controller\n");
1604                         goto err_of_dma_controller_register;
1605                 }
1606         }
1607 
1608         return 0;
1609 
1610 err_of_dma_controller_register:
1611         dma_async_device_unregister(&atdma->dma_common);
1612         dma_pool_destroy(atdma->dma_desc_pool);
1613 err_pool_create:
1614         free_irq(platform_get_irq(pdev, 0), atdma);
1615 err_irq:
1616         clk_disable_unprepare(atdma->clk);
1617 err_clk_prepare:
1618         clk_put(atdma->clk);
1619 err_clk:
1620         iounmap(atdma->regs);
1621         atdma->regs = NULL;
1622 err_release_r:
1623         release_mem_region(io->start, size);
1624 err_kfree:
1625         kfree(atdma);
1626         return err;
1627 }
1628 
1629 static int at_dma_remove(struct platform_device *pdev)
1630 {
1631         struct at_dma           *atdma = platform_get_drvdata(pdev);
1632         struct dma_chan         *chan, *_chan;
1633         struct resource         *io;
1634 
1635         at_dma_off(atdma);
1636         dma_async_device_unregister(&atdma->dma_common);
1637 
1638         dma_pool_destroy(atdma->dma_desc_pool);
1639         free_irq(platform_get_irq(pdev, 0), atdma);
1640 
1641         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1642                         device_node) {
1643                 struct at_dma_chan      *atchan = to_at_dma_chan(chan);
1644 
1645                 /* Disable interrupts */
1646                 atc_disable_chan_irq(atdma, chan->chan_id);
1647 
1648                 tasklet_kill(&atchan->tasklet);
1649                 list_del(&chan->device_node);
1650         }
1651 
1652         clk_disable_unprepare(atdma->clk);
1653         clk_put(atdma->clk);
1654 
1655         iounmap(atdma->regs);
1656         atdma->regs = NULL;
1657 
1658         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1659         release_mem_region(io->start, resource_size(io));
1660 
1661         kfree(atdma);
1662 
1663         return 0;
1664 }
1665 
1666 static void at_dma_shutdown(struct platform_device *pdev)
1667 {
1668         struct at_dma   *atdma = platform_get_drvdata(pdev);
1669 
1670         at_dma_off(platform_get_drvdata(pdev));
1671         clk_disable_unprepare(atdma->clk);
1672 }
1673 
1674 static int at_dma_prepare(struct device *dev)
1675 {
1676         struct platform_device *pdev = to_platform_device(dev);
1677         struct at_dma *atdma = platform_get_drvdata(pdev);
1678         struct dma_chan *chan, *_chan;
1679 
1680         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1681                         device_node) {
1682                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1683                 /* wait for transaction completion (except in cyclic case) */
1684                 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1685                         return -EAGAIN;
1686         }
1687         return 0;
1688 }
1689 
1690 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1691 {
1692         struct dma_chan *chan = &atchan->chan_common;
1693 
1694         /* Channel should be paused by user
1695          * do it anyway even if it is not done already */
1696         if (!atc_chan_is_paused(atchan)) {
1697                 dev_warn(chan2dev(chan),
1698                 "cyclic channel not paused, should be done by channel user\n");
1699                 atc_pause(chan);
1700         }
1701 
1702         /* now preserve additional data for cyclic operations */
1703         /* next descriptor address in the cyclic list */
1704         atchan->save_dscr = channel_readl(atchan, DSCR);
1705 
1706         vdbg_dump_regs(atchan);
1707 }
1708 
1709 static int at_dma_suspend_noirq(struct device *dev)
1710 {
1711         struct platform_device *pdev = to_platform_device(dev);
1712         struct at_dma *atdma = platform_get_drvdata(pdev);
1713         struct dma_chan *chan, *_chan;
1714 
1715         /* preserve data */
1716         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1717                         device_node) {
1718                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1719 
1720                 if (atc_chan_is_cyclic(atchan))
1721                         atc_suspend_cyclic(atchan);
1722                 atchan->save_cfg = channel_readl(atchan, CFG);
1723         }
1724         atdma->save_imr = dma_readl(atdma, EBCIMR);
1725 
1726         /* disable DMA controller */
1727         at_dma_off(atdma);
1728         clk_disable_unprepare(atdma->clk);
1729         return 0;
1730 }
1731 
1732 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1733 {
1734         struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
1735 
1736         /* restore channel status for cyclic descriptors list:
1737          * next descriptor in the cyclic list at the time of suspend */
1738         channel_writel(atchan, SADDR, 0);
1739         channel_writel(atchan, DADDR, 0);
1740         channel_writel(atchan, CTRLA, 0);
1741         channel_writel(atchan, CTRLB, 0);
1742         channel_writel(atchan, DSCR, atchan->save_dscr);
1743         dma_writel(atdma, CHER, atchan->mask);
1744 
1745         /* channel pause status should be removed by channel user
1746          * We cannot take the initiative to do it here */
1747 
1748         vdbg_dump_regs(atchan);
1749 }
1750 
1751 static int at_dma_resume_noirq(struct device *dev)
1752 {
1753         struct platform_device *pdev = to_platform_device(dev);
1754         struct at_dma *atdma = platform_get_drvdata(pdev);
1755         struct dma_chan *chan, *_chan;
1756 
1757         /* bring back DMA controller */
1758         clk_prepare_enable(atdma->clk);
1759         dma_writel(atdma, EN, AT_DMA_ENABLE);
1760 
1761         /* clear any pending interrupt */
1762         while (dma_readl(atdma, EBCISR))
1763                 cpu_relax();
1764 
1765         /* restore saved data */
1766         dma_writel(atdma, EBCIER, atdma->save_imr);
1767         list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1768                         device_node) {
1769                 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1770 
1771                 channel_writel(atchan, CFG, atchan->save_cfg);
1772                 if (atc_chan_is_cyclic(atchan))
1773                         atc_resume_cyclic(atchan);
1774         }
1775         return 0;
1776 }
1777 
1778 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1779         .prepare = at_dma_prepare,
1780         .suspend_noirq = at_dma_suspend_noirq,
1781         .resume_noirq = at_dma_resume_noirq,
1782 };
1783 
1784 static struct platform_driver at_dma_driver = {
1785         .remove         = at_dma_remove,
1786         .shutdown       = at_dma_shutdown,
1787         .id_table       = atdma_devtypes,
1788         .driver = {
1789                 .name   = "at_hdmac",
1790                 .pm     = &at_dma_dev_pm_ops,
1791                 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1792         },
1793 };
1794 
1795 static int __init at_dma_init(void)
1796 {
1797         return platform_driver_probe(&at_dma_driver, at_dma_probe);
1798 }
1799 subsys_initcall(at_dma_init);
1800 
1801 static void __exit at_dma_exit(void)
1802 {
1803         platform_driver_unregister(&at_dma_driver);
1804 }
1805 module_exit(at_dma_exit);
1806 
1807 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1808 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1809 MODULE_LICENSE("GPL");
1810 MODULE_ALIAS("platform:at_hdmac");
1811 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us