Version:  2.0.40 2.2.26 2.4.37 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0

Linux/drivers/dma/k3dma.c

  1 /*
  2  * Copyright (c) 2013 Linaro Ltd.
  3  * Copyright (c) 2013 Hisilicon Limited.
  4  *
  5  * This program is free software; you can redistribute it and/or modify
  6  * it under the terms of the GNU General Public License version 2 as
  7  * published by the Free Software Foundation.
  8  */
  9 #include <linux/sched.h>
 10 #include <linux/device.h>
 11 #include <linux/dmaengine.h>
 12 #include <linux/init.h>
 13 #include <linux/interrupt.h>
 14 #include <linux/kernel.h>
 15 #include <linux/module.h>
 16 #include <linux/platform_device.h>
 17 #include <linux/slab.h>
 18 #include <linux/spinlock.h>
 19 #include <linux/of_device.h>
 20 #include <linux/of.h>
 21 #include <linux/clk.h>
 22 #include <linux/of_dma.h>
 23 
 24 #include "virt-dma.h"
 25 
 26 #define DRIVER_NAME             "k3-dma"
 27 #define DMA_ALIGN               3
 28 #define DMA_MAX_SIZE            0x1ffc
 29 
 30 #define INT_STAT                0x00
 31 #define INT_TC1                 0x04
 32 #define INT_ERR1                0x0c
 33 #define INT_ERR2                0x10
 34 #define INT_TC1_MASK            0x18
 35 #define INT_ERR1_MASK           0x20
 36 #define INT_ERR2_MASK           0x24
 37 #define INT_TC1_RAW             0x600
 38 #define INT_ERR1_RAW            0x608
 39 #define INT_ERR2_RAW            0x610
 40 #define CH_PRI                  0x688
 41 #define CH_STAT                 0x690
 42 #define CX_CUR_CNT              0x704
 43 #define CX_LLI                  0x800
 44 #define CX_CNT                  0x810
 45 #define CX_SRC                  0x814
 46 #define CX_DST                  0x818
 47 #define CX_CFG                  0x81c
 48 #define AXI_CFG                 0x820
 49 #define AXI_CFG_DEFAULT         0x201201
 50 
 51 #define CX_LLI_CHAIN_EN         0x2
 52 #define CX_CFG_EN               0x1
 53 #define CX_CFG_MEM2PER          (0x1 << 2)
 54 #define CX_CFG_PER2MEM          (0x2 << 2)
 55 #define CX_CFG_SRCINCR          (0x1 << 31)
 56 #define CX_CFG_DSTINCR          (0x1 << 30)
 57 
 58 struct k3_desc_hw {
 59         u32 lli;
 60         u32 reserved[3];
 61         u32 count;
 62         u32 saddr;
 63         u32 daddr;
 64         u32 config;
 65 } __aligned(32);
 66 
 67 struct k3_dma_desc_sw {
 68         struct virt_dma_desc    vd;
 69         dma_addr_t              desc_hw_lli;
 70         size_t                  desc_num;
 71         size_t                  size;
 72         struct k3_desc_hw       desc_hw[0];
 73 };
 74 
 75 struct k3_dma_phy;
 76 
 77 struct k3_dma_chan {
 78         u32                     ccfg;
 79         struct virt_dma_chan    vc;
 80         struct k3_dma_phy       *phy;
 81         struct list_head        node;
 82         enum dma_transfer_direction dir;
 83         dma_addr_t              dev_addr;
 84         enum dma_status         status;
 85 };
 86 
 87 struct k3_dma_phy {
 88         u32                     idx;
 89         void __iomem            *base;
 90         struct k3_dma_chan      *vchan;
 91         struct k3_dma_desc_sw   *ds_run;
 92         struct k3_dma_desc_sw   *ds_done;
 93 };
 94 
 95 struct k3_dma_dev {
 96         struct dma_device       slave;
 97         void __iomem            *base;
 98         struct tasklet_struct   task;
 99         spinlock_t              lock;
100         struct list_head        chan_pending;
101         struct k3_dma_phy       *phy;
102         struct k3_dma_chan      *chans;
103         struct clk              *clk;
104         u32                     dma_channels;
105         u32                     dma_requests;
106 };
107 
108 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
109 
110 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
111 {
112         return container_of(chan, struct k3_dma_chan, vc.chan);
113 }
114 
115 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
116 {
117         u32 val = 0;
118 
119         if (on) {
120                 val = readl_relaxed(phy->base + CX_CFG);
121                 val |= CX_CFG_EN;
122                 writel_relaxed(val, phy->base + CX_CFG);
123         } else {
124                 val = readl_relaxed(phy->base + CX_CFG);
125                 val &= ~CX_CFG_EN;
126                 writel_relaxed(val, phy->base + CX_CFG);
127         }
128 }
129 
130 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
131 {
132         u32 val = 0;
133 
134         k3_dma_pause_dma(phy, false);
135 
136         val = 0x1 << phy->idx;
137         writel_relaxed(val, d->base + INT_TC1_RAW);
138         writel_relaxed(val, d->base + INT_ERR1_RAW);
139         writel_relaxed(val, d->base + INT_ERR2_RAW);
140 }
141 
142 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
143 {
144         writel_relaxed(hw->lli, phy->base + CX_LLI);
145         writel_relaxed(hw->count, phy->base + CX_CNT);
146         writel_relaxed(hw->saddr, phy->base + CX_SRC);
147         writel_relaxed(hw->daddr, phy->base + CX_DST);
148         writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
149         writel_relaxed(hw->config, phy->base + CX_CFG);
150 }
151 
152 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
153 {
154         u32 cnt = 0;
155 
156         cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
157         cnt &= 0xffff;
158         return cnt;
159 }
160 
161 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
162 {
163         return readl_relaxed(phy->base + CX_LLI);
164 }
165 
166 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
167 {
168         return readl_relaxed(d->base + CH_STAT);
169 }
170 
171 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
172 {
173         if (on) {
174                 /* set same priority */
175                 writel_relaxed(0x0, d->base + CH_PRI);
176 
177                 /* unmask irq */
178                 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
179                 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
180                 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
181         } else {
182                 /* mask irq */
183                 writel_relaxed(0x0, d->base + INT_TC1_MASK);
184                 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
185                 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
186         }
187 }
188 
189 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
190 {
191         struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
192         struct k3_dma_phy *p;
193         struct k3_dma_chan *c;
194         u32 stat = readl_relaxed(d->base + INT_STAT);
195         u32 tc1  = readl_relaxed(d->base + INT_TC1);
196         u32 err1 = readl_relaxed(d->base + INT_ERR1);
197         u32 err2 = readl_relaxed(d->base + INT_ERR2);
198         u32 i, irq_chan = 0;
199 
200         while (stat) {
201                 i = __ffs(stat);
202                 stat &= (stat - 1);
203                 if (likely(tc1 & BIT(i))) {
204                         p = &d->phy[i];
205                         c = p->vchan;
206                         if (c) {
207                                 unsigned long flags;
208 
209                                 spin_lock_irqsave(&c->vc.lock, flags);
210                                 vchan_cookie_complete(&p->ds_run->vd);
211                                 p->ds_done = p->ds_run;
212                                 spin_unlock_irqrestore(&c->vc.lock, flags);
213                         }
214                         irq_chan |= BIT(i);
215                 }
216                 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
217                         dev_warn(d->slave.dev, "DMA ERR\n");
218         }
219 
220         writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
221         writel_relaxed(err1, d->base + INT_ERR1_RAW);
222         writel_relaxed(err2, d->base + INT_ERR2_RAW);
223 
224         if (irq_chan) {
225                 tasklet_schedule(&d->task);
226                 return IRQ_HANDLED;
227         } else
228                 return IRQ_NONE;
229 }
230 
231 static int k3_dma_start_txd(struct k3_dma_chan *c)
232 {
233         struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
234         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
235 
236         if (!c->phy)
237                 return -EAGAIN;
238 
239         if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
240                 return -EAGAIN;
241 
242         if (vd) {
243                 struct k3_dma_desc_sw *ds =
244                         container_of(vd, struct k3_dma_desc_sw, vd);
245                 /*
246                  * fetch and remove request from vc->desc_issued
247                  * so vc->desc_issued only contains desc pending
248                  */
249                 list_del(&ds->vd.node);
250                 c->phy->ds_run = ds;
251                 c->phy->ds_done = NULL;
252                 /* start dma */
253                 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
254                 return 0;
255         }
256         c->phy->ds_done = NULL;
257         c->phy->ds_run = NULL;
258         return -EAGAIN;
259 }
260 
261 static void k3_dma_tasklet(unsigned long arg)
262 {
263         struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
264         struct k3_dma_phy *p;
265         struct k3_dma_chan *c, *cn;
266         unsigned pch, pch_alloc = 0;
267 
268         /* check new dma request of running channel in vc->desc_issued */
269         list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
270                 spin_lock_irq(&c->vc.lock);
271                 p = c->phy;
272                 if (p && p->ds_done) {
273                         if (k3_dma_start_txd(c)) {
274                                 /* No current txd associated with this channel */
275                                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
276                                 /* Mark this channel free */
277                                 c->phy = NULL;
278                                 p->vchan = NULL;
279                         }
280                 }
281                 spin_unlock_irq(&c->vc.lock);
282         }
283 
284         /* check new channel request in d->chan_pending */
285         spin_lock_irq(&d->lock);
286         for (pch = 0; pch < d->dma_channels; pch++) {
287                 p = &d->phy[pch];
288 
289                 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
290                         c = list_first_entry(&d->chan_pending,
291                                 struct k3_dma_chan, node);
292                         /* remove from d->chan_pending */
293                         list_del_init(&c->node);
294                         pch_alloc |= 1 << pch;
295                         /* Mark this channel allocated */
296                         p->vchan = c;
297                         c->phy = p;
298                         dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
299                 }
300         }
301         spin_unlock_irq(&d->lock);
302 
303         for (pch = 0; pch < d->dma_channels; pch++) {
304                 if (pch_alloc & (1 << pch)) {
305                         p = &d->phy[pch];
306                         c = p->vchan;
307                         if (c) {
308                                 spin_lock_irq(&c->vc.lock);
309                                 k3_dma_start_txd(c);
310                                 spin_unlock_irq(&c->vc.lock);
311                         }
312                 }
313         }
314 }
315 
316 static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
317 {
318         return 0;
319 }
320 
321 static void k3_dma_free_chan_resources(struct dma_chan *chan)
322 {
323         struct k3_dma_chan *c = to_k3_chan(chan);
324         struct k3_dma_dev *d = to_k3_dma(chan->device);
325         unsigned long flags;
326 
327         spin_lock_irqsave(&d->lock, flags);
328         list_del_init(&c->node);
329         spin_unlock_irqrestore(&d->lock, flags);
330 
331         vchan_free_chan_resources(&c->vc);
332         c->ccfg = 0;
333 }
334 
335 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
336         dma_cookie_t cookie, struct dma_tx_state *state)
337 {
338         struct k3_dma_chan *c = to_k3_chan(chan);
339         struct k3_dma_dev *d = to_k3_dma(chan->device);
340         struct k3_dma_phy *p;
341         struct virt_dma_desc *vd;
342         unsigned long flags;
343         enum dma_status ret;
344         size_t bytes = 0;
345 
346         ret = dma_cookie_status(&c->vc.chan, cookie, state);
347         if (ret == DMA_COMPLETE)
348                 return ret;
349 
350         spin_lock_irqsave(&c->vc.lock, flags);
351         p = c->phy;
352         ret = c->status;
353 
354         /*
355          * If the cookie is on our issue queue, then the residue is
356          * its total size.
357          */
358         vd = vchan_find_desc(&c->vc, cookie);
359         if (vd) {
360                 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
361         } else if ((!p) || (!p->ds_run)) {
362                 bytes = 0;
363         } else {
364                 struct k3_dma_desc_sw *ds = p->ds_run;
365                 u32 clli = 0, index = 0;
366 
367                 bytes = k3_dma_get_curr_cnt(d, p);
368                 clli = k3_dma_get_curr_lli(p);
369                 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
370                 for (; index < ds->desc_num; index++) {
371                         bytes += ds->desc_hw[index].count;
372                         /* end of lli */
373                         if (!ds->desc_hw[index].lli)
374                                 break;
375                 }
376         }
377         spin_unlock_irqrestore(&c->vc.lock, flags);
378         dma_set_residue(state, bytes);
379         return ret;
380 }
381 
382 static void k3_dma_issue_pending(struct dma_chan *chan)
383 {
384         struct k3_dma_chan *c = to_k3_chan(chan);
385         struct k3_dma_dev *d = to_k3_dma(chan->device);
386         unsigned long flags;
387 
388         spin_lock_irqsave(&c->vc.lock, flags);
389         /* add request to vc->desc_issued */
390         if (vchan_issue_pending(&c->vc)) {
391                 spin_lock(&d->lock);
392                 if (!c->phy) {
393                         if (list_empty(&c->node)) {
394                                 /* if new channel, add chan_pending */
395                                 list_add_tail(&c->node, &d->chan_pending);
396                                 /* check in tasklet */
397                                 tasklet_schedule(&d->task);
398                                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
399                         }
400                 }
401                 spin_unlock(&d->lock);
402         } else
403                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
404         spin_unlock_irqrestore(&c->vc.lock, flags);
405 }
406 
407 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
408                         dma_addr_t src, size_t len, u32 num, u32 ccfg)
409 {
410         if ((num + 1) < ds->desc_num)
411                 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
412                         sizeof(struct k3_desc_hw);
413         ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
414         ds->desc_hw[num].count = len;
415         ds->desc_hw[num].saddr = src;
416         ds->desc_hw[num].daddr = dst;
417         ds->desc_hw[num].config = ccfg;
418 }
419 
420 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
421         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
422         size_t len, unsigned long flags)
423 {
424         struct k3_dma_chan *c = to_k3_chan(chan);
425         struct k3_dma_desc_sw *ds;
426         size_t copy = 0;
427         int num = 0;
428 
429         if (!len)
430                 return NULL;
431 
432         num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
433         ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
434         if (!ds) {
435                 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
436                 return NULL;
437         }
438         ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
439         ds->size = len;
440         ds->desc_num = num;
441         num = 0;
442 
443         if (!c->ccfg) {
444                 /* default is memtomem, without calling device_config */
445                 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
446                 c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
447                 c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
448         }
449 
450         do {
451                 copy = min_t(size_t, len, DMA_MAX_SIZE);
452                 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
453 
454                 if (c->dir == DMA_MEM_TO_DEV) {
455                         src += copy;
456                 } else if (c->dir == DMA_DEV_TO_MEM) {
457                         dst += copy;
458                 } else {
459                         src += copy;
460                         dst += copy;
461                 }
462                 len -= copy;
463         } while (len);
464 
465         ds->desc_hw[num-1].lli = 0;     /* end of link */
466         return vchan_tx_prep(&c->vc, &ds->vd, flags);
467 }
468 
469 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
470         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
471         enum dma_transfer_direction dir, unsigned long flags, void *context)
472 {
473         struct k3_dma_chan *c = to_k3_chan(chan);
474         struct k3_dma_desc_sw *ds;
475         size_t len, avail, total = 0;
476         struct scatterlist *sg;
477         dma_addr_t addr, src = 0, dst = 0;
478         int num = sglen, i;
479 
480         if (sgl == NULL)
481                 return NULL;
482 
483         for_each_sg(sgl, sg, sglen, i) {
484                 avail = sg_dma_len(sg);
485                 if (avail > DMA_MAX_SIZE)
486                         num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
487         }
488 
489         ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
490         if (!ds) {
491                 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
492                 return NULL;
493         }
494         ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
495         ds->desc_num = num;
496         num = 0;
497 
498         for_each_sg(sgl, sg, sglen, i) {
499                 addr = sg_dma_address(sg);
500                 avail = sg_dma_len(sg);
501                 total += avail;
502 
503                 do {
504                         len = min_t(size_t, avail, DMA_MAX_SIZE);
505 
506                         if (dir == DMA_MEM_TO_DEV) {
507                                 src = addr;
508                                 dst = c->dev_addr;
509                         } else if (dir == DMA_DEV_TO_MEM) {
510                                 src = c->dev_addr;
511                                 dst = addr;
512                         }
513 
514                         k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
515 
516                         addr += len;
517                         avail -= len;
518                 } while (avail);
519         }
520 
521         ds->desc_hw[num-1].lli = 0;     /* end of link */
522         ds->size = total;
523         return vchan_tx_prep(&c->vc, &ds->vd, flags);
524 }
525 
526 static int k3_dma_config(struct dma_chan *chan,
527                          struct dma_slave_config *cfg)
528 {
529         struct k3_dma_chan *c = to_k3_chan(chan);
530         u32 maxburst = 0, val = 0;
531         enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
532 
533         if (cfg == NULL)
534                 return -EINVAL;
535         c->dir = cfg->direction;
536         if (c->dir == DMA_DEV_TO_MEM) {
537                 c->ccfg = CX_CFG_DSTINCR;
538                 c->dev_addr = cfg->src_addr;
539                 maxburst = cfg->src_maxburst;
540                 width = cfg->src_addr_width;
541         } else if (c->dir == DMA_MEM_TO_DEV) {
542                 c->ccfg = CX_CFG_SRCINCR;
543                 c->dev_addr = cfg->dst_addr;
544                 maxburst = cfg->dst_maxburst;
545                 width = cfg->dst_addr_width;
546         }
547         switch (width) {
548         case DMA_SLAVE_BUSWIDTH_1_BYTE:
549         case DMA_SLAVE_BUSWIDTH_2_BYTES:
550         case DMA_SLAVE_BUSWIDTH_4_BYTES:
551         case DMA_SLAVE_BUSWIDTH_8_BYTES:
552                 val =  __ffs(width);
553                 break;
554         default:
555                 val = 3;
556                 break;
557         }
558         c->ccfg |= (val << 12) | (val << 16);
559 
560         if ((maxburst == 0) || (maxburst > 16))
561                 val = 16;
562         else
563                 val = maxburst - 1;
564         c->ccfg |= (val << 20) | (val << 24);
565         c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
566 
567         /* specific request line */
568         c->ccfg |= c->vc.chan.chan_id << 4;
569 
570         return 0;
571 }
572 
573 static int k3_dma_terminate_all(struct dma_chan *chan)
574 {
575         struct k3_dma_chan *c = to_k3_chan(chan);
576         struct k3_dma_dev *d = to_k3_dma(chan->device);
577         struct k3_dma_phy *p = c->phy;
578         unsigned long flags;
579         LIST_HEAD(head);
580 
581         dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
582 
583         /* Prevent this channel being scheduled */
584         spin_lock(&d->lock);
585         list_del_init(&c->node);
586         spin_unlock(&d->lock);
587 
588         /* Clear the tx descriptor lists */
589         spin_lock_irqsave(&c->vc.lock, flags);
590         vchan_get_all_descriptors(&c->vc, &head);
591         if (p) {
592                 /* vchan is assigned to a pchan - stop the channel */
593                 k3_dma_terminate_chan(p, d);
594                 c->phy = NULL;
595                 p->vchan = NULL;
596                 p->ds_run = p->ds_done = NULL;
597         }
598         spin_unlock_irqrestore(&c->vc.lock, flags);
599         vchan_dma_desc_free_list(&c->vc, &head);
600 
601         return 0;
602 }
603 
604 static int k3_dma_transfer_pause(struct dma_chan *chan)
605 {
606         struct k3_dma_chan *c = to_k3_chan(chan);
607         struct k3_dma_dev *d = to_k3_dma(chan->device);
608         struct k3_dma_phy *p = c->phy;
609 
610         dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
611         if (c->status == DMA_IN_PROGRESS) {
612                 c->status = DMA_PAUSED;
613                 if (p) {
614                         k3_dma_pause_dma(p, false);
615                 } else {
616                         spin_lock(&d->lock);
617                         list_del_init(&c->node);
618                         spin_unlock(&d->lock);
619                 }
620         }
621 
622         return 0;
623 }
624 
625 static int k3_dma_transfer_resume(struct dma_chan *chan)
626 {
627         struct k3_dma_chan *c = to_k3_chan(chan);
628         struct k3_dma_dev *d = to_k3_dma(chan->device);
629         struct k3_dma_phy *p = c->phy;
630         unsigned long flags;
631 
632         dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
633         spin_lock_irqsave(&c->vc.lock, flags);
634         if (c->status == DMA_PAUSED) {
635                 c->status = DMA_IN_PROGRESS;
636                 if (p) {
637                         k3_dma_pause_dma(p, true);
638                 } else if (!list_empty(&c->vc.desc_issued)) {
639                         spin_lock(&d->lock);
640                         list_add_tail(&c->node, &d->chan_pending);
641                         spin_unlock(&d->lock);
642                 }
643         }
644         spin_unlock_irqrestore(&c->vc.lock, flags);
645 
646         return 0;
647 }
648 
649 static void k3_dma_free_desc(struct virt_dma_desc *vd)
650 {
651         struct k3_dma_desc_sw *ds =
652                 container_of(vd, struct k3_dma_desc_sw, vd);
653 
654         kfree(ds);
655 }
656 
657 static struct of_device_id k3_pdma_dt_ids[] = {
658         { .compatible = "hisilicon,k3-dma-1.0", },
659         {}
660 };
661 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
662 
663 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
664                                                 struct of_dma *ofdma)
665 {
666         struct k3_dma_dev *d = ofdma->of_dma_data;
667         unsigned int request = dma_spec->args[0];
668 
669         if (request > d->dma_requests)
670                 return NULL;
671 
672         return dma_get_slave_channel(&(d->chans[request].vc.chan));
673 }
674 
675 static int k3_dma_probe(struct platform_device *op)
676 {
677         struct k3_dma_dev *d;
678         const struct of_device_id *of_id;
679         struct resource *iores;
680         int i, ret, irq = 0;
681 
682         iores = platform_get_resource(op, IORESOURCE_MEM, 0);
683         if (!iores)
684                 return -EINVAL;
685 
686         d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
687         if (!d)
688                 return -ENOMEM;
689 
690         d->base = devm_ioremap_resource(&op->dev, iores);
691         if (IS_ERR(d->base))
692                 return PTR_ERR(d->base);
693 
694         of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
695         if (of_id) {
696                 of_property_read_u32((&op->dev)->of_node,
697                                 "dma-channels", &d->dma_channels);
698                 of_property_read_u32((&op->dev)->of_node,
699                                 "dma-requests", &d->dma_requests);
700         }
701 
702         d->clk = devm_clk_get(&op->dev, NULL);
703         if (IS_ERR(d->clk)) {
704                 dev_err(&op->dev, "no dma clk\n");
705                 return PTR_ERR(d->clk);
706         }
707 
708         irq = platform_get_irq(op, 0);
709         ret = devm_request_irq(&op->dev, irq,
710                         k3_dma_int_handler, 0, DRIVER_NAME, d);
711         if (ret)
712                 return ret;
713 
714         /* init phy channel */
715         d->phy = devm_kzalloc(&op->dev,
716                 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
717         if (d->phy == NULL)
718                 return -ENOMEM;
719 
720         for (i = 0; i < d->dma_channels; i++) {
721                 struct k3_dma_phy *p = &d->phy[i];
722 
723                 p->idx = i;
724                 p->base = d->base + i * 0x40;
725         }
726 
727         INIT_LIST_HEAD(&d->slave.channels);
728         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
729         dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
730         d->slave.dev = &op->dev;
731         d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
732         d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
733         d->slave.device_tx_status = k3_dma_tx_status;
734         d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
735         d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
736         d->slave.device_issue_pending = k3_dma_issue_pending;
737         d->slave.device_config = k3_dma_config;
738         d->slave.device_pause = k3_dma_transfer_pause;
739         d->slave.device_resume = k3_dma_transfer_resume;
740         d->slave.device_terminate_all = k3_dma_terminate_all;
741         d->slave.copy_align = DMA_ALIGN;
742 
743         /* init virtual channel */
744         d->chans = devm_kzalloc(&op->dev,
745                 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
746         if (d->chans == NULL)
747                 return -ENOMEM;
748 
749         for (i = 0; i < d->dma_requests; i++) {
750                 struct k3_dma_chan *c = &d->chans[i];
751 
752                 c->status = DMA_IN_PROGRESS;
753                 INIT_LIST_HEAD(&c->node);
754                 c->vc.desc_free = k3_dma_free_desc;
755                 vchan_init(&c->vc, &d->slave);
756         }
757 
758         /* Enable clock before accessing registers */
759         ret = clk_prepare_enable(d->clk);
760         if (ret < 0) {
761                 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
762                 return ret;
763         }
764 
765         k3_dma_enable_dma(d, true);
766 
767         ret = dma_async_device_register(&d->slave);
768         if (ret)
769                 return ret;
770 
771         ret = of_dma_controller_register((&op->dev)->of_node,
772                                         k3_of_dma_simple_xlate, d);
773         if (ret)
774                 goto of_dma_register_fail;
775 
776         spin_lock_init(&d->lock);
777         INIT_LIST_HEAD(&d->chan_pending);
778         tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
779         platform_set_drvdata(op, d);
780         dev_info(&op->dev, "initialized\n");
781 
782         return 0;
783 
784 of_dma_register_fail:
785         dma_async_device_unregister(&d->slave);
786         return ret;
787 }
788 
789 static int k3_dma_remove(struct platform_device *op)
790 {
791         struct k3_dma_chan *c, *cn;
792         struct k3_dma_dev *d = platform_get_drvdata(op);
793 
794         dma_async_device_unregister(&d->slave);
795         of_dma_controller_free((&op->dev)->of_node);
796 
797         list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
798                 list_del(&c->vc.chan.device_node);
799                 tasklet_kill(&c->vc.task);
800         }
801         tasklet_kill(&d->task);
802         clk_disable_unprepare(d->clk);
803         return 0;
804 }
805 
806 #ifdef CONFIG_PM_SLEEP
807 static int k3_dma_suspend_dev(struct device *dev)
808 {
809         struct k3_dma_dev *d = dev_get_drvdata(dev);
810         u32 stat = 0;
811 
812         stat = k3_dma_get_chan_stat(d);
813         if (stat) {
814                 dev_warn(d->slave.dev,
815                         "chan %d is running fail to suspend\n", stat);
816                 return -1;
817         }
818         k3_dma_enable_dma(d, false);
819         clk_disable_unprepare(d->clk);
820         return 0;
821 }
822 
823 static int k3_dma_resume_dev(struct device *dev)
824 {
825         struct k3_dma_dev *d = dev_get_drvdata(dev);
826         int ret = 0;
827 
828         ret = clk_prepare_enable(d->clk);
829         if (ret < 0) {
830                 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
831                 return ret;
832         }
833         k3_dma_enable_dma(d, true);
834         return 0;
835 }
836 #endif
837 
838 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
839 
840 static struct platform_driver k3_pdma_driver = {
841         .driver         = {
842                 .name   = DRIVER_NAME,
843                 .pm     = &k3_dma_pmops,
844                 .of_match_table = k3_pdma_dt_ids,
845         },
846         .probe          = k3_dma_probe,
847         .remove         = k3_dma_remove,
848 };
849 
850 module_platform_driver(k3_pdma_driver);
851 
852 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
853 MODULE_ALIAS("platform:k3dma");
854 MODULE_LICENSE("GPL v2");
855 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us