Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/dma/pch_dma.c

  1 /*
  2  * Topcliff PCH DMA controller driver
  3  * Copyright (c) 2010 Intel Corporation
  4  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  *
 10  * This program is distributed in the hope that it will be useful,
 11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13  * GNU General Public License for more details.
 14  *
 15  * You should have received a copy of the GNU General Public License
 16  * along with this program; if not, write to the Free Software
 17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 18  */
 19 
 20 #include <linux/dmaengine.h>
 21 #include <linux/dma-mapping.h>
 22 #include <linux/init.h>
 23 #include <linux/pci.h>
 24 #include <linux/slab.h>
 25 #include <linux/interrupt.h>
 26 #include <linux/module.h>
 27 #include <linux/pch_dma.h>
 28 
 29 #include "dmaengine.h"
 30 
 31 #define DRV_NAME "pch-dma"
 32 
 33 #define DMA_CTL0_DISABLE                0x0
 34 #define DMA_CTL0_SG                     0x1
 35 #define DMA_CTL0_ONESHOT                0x2
 36 #define DMA_CTL0_MODE_MASK_BITS         0x3
 37 #define DMA_CTL0_DIR_SHIFT_BITS         2
 38 #define DMA_CTL0_BITS_PER_CH            4
 39 
 40 #define DMA_CTL2_START_SHIFT_BITS       8
 41 #define DMA_CTL2_IRQ_ENABLE_MASK        ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
 42 
 43 #define DMA_STATUS_IDLE                 0x0
 44 #define DMA_STATUS_DESC_READ            0x1
 45 #define DMA_STATUS_WAIT                 0x2
 46 #define DMA_STATUS_ACCESS               0x3
 47 #define DMA_STATUS_BITS_PER_CH          2
 48 #define DMA_STATUS_MASK_BITS            0x3
 49 #define DMA_STATUS_SHIFT_BITS           16
 50 #define DMA_STATUS_IRQ(x)               (0x1 << (x))
 51 #define DMA_STATUS0_ERR(x)              (0x1 << ((x) + 8))
 52 #define DMA_STATUS2_ERR(x)              (0x1 << (x))
 53 
 54 #define DMA_DESC_WIDTH_SHIFT_BITS       12
 55 #define DMA_DESC_WIDTH_1_BYTE           (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
 56 #define DMA_DESC_WIDTH_2_BYTES          (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
 57 #define DMA_DESC_WIDTH_4_BYTES          (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
 58 #define DMA_DESC_MAX_COUNT_1_BYTE       0x3FF
 59 #define DMA_DESC_MAX_COUNT_2_BYTES      0x3FF
 60 #define DMA_DESC_MAX_COUNT_4_BYTES      0x7FF
 61 #define DMA_DESC_END_WITHOUT_IRQ        0x0
 62 #define DMA_DESC_END_WITH_IRQ           0x1
 63 #define DMA_DESC_FOLLOW_WITHOUT_IRQ     0x2
 64 #define DMA_DESC_FOLLOW_WITH_IRQ        0x3
 65 
 66 #define MAX_CHAN_NR                     12
 67 
 68 #define DMA_MASK_CTL0_MODE      0x33333333
 69 #define DMA_MASK_CTL2_MODE      0x00003333
 70 
 71 static unsigned int init_nr_desc_per_channel = 64;
 72 module_param(init_nr_desc_per_channel, uint, 0644);
 73 MODULE_PARM_DESC(init_nr_desc_per_channel,
 74                  "initial descriptors per channel (default: 64)");
 75 
 76 struct pch_dma_desc_regs {
 77         u32     dev_addr;
 78         u32     mem_addr;
 79         u32     size;
 80         u32     next;
 81 };
 82 
 83 struct pch_dma_regs {
 84         u32     dma_ctl0;
 85         u32     dma_ctl1;
 86         u32     dma_ctl2;
 87         u32     dma_ctl3;
 88         u32     dma_sts0;
 89         u32     dma_sts1;
 90         u32     dma_sts2;
 91         u32     reserved3;
 92         struct pch_dma_desc_regs desc[MAX_CHAN_NR];
 93 };
 94 
 95 struct pch_dma_desc {
 96         struct pch_dma_desc_regs regs;
 97         struct dma_async_tx_descriptor txd;
 98         struct list_head        desc_node;
 99         struct list_head        tx_list;
100 };
101 
102 struct pch_dma_chan {
103         struct dma_chan         chan;
104         void __iomem *membase;
105         enum dma_transfer_direction dir;
106         struct tasklet_struct   tasklet;
107         unsigned long           err_status;
108 
109         spinlock_t              lock;
110 
111         struct list_head        active_list;
112         struct list_head        queue;
113         struct list_head        free_list;
114         unsigned int            descs_allocated;
115 };
116 
117 #define PDC_DEV_ADDR    0x00
118 #define PDC_MEM_ADDR    0x04
119 #define PDC_SIZE        0x08
120 #define PDC_NEXT        0x0C
121 
122 #define channel_readl(pdc, name) \
123         readl((pdc)->membase + PDC_##name)
124 #define channel_writel(pdc, name, val) \
125         writel((val), (pdc)->membase + PDC_##name)
126 
127 struct pch_dma {
128         struct dma_device       dma;
129         void __iomem *membase;
130         struct pci_pool         *pool;
131         struct pch_dma_regs     regs;
132         struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
133         struct pch_dma_chan     channels[MAX_CHAN_NR];
134 };
135 
136 #define PCH_DMA_CTL0    0x00
137 #define PCH_DMA_CTL1    0x04
138 #define PCH_DMA_CTL2    0x08
139 #define PCH_DMA_CTL3    0x0C
140 #define PCH_DMA_STS0    0x10
141 #define PCH_DMA_STS1    0x14
142 #define PCH_DMA_STS2    0x18
143 
144 #define dma_readl(pd, name) \
145         readl((pd)->membase + PCH_DMA_##name)
146 #define dma_writel(pd, name, val) \
147         writel((val), (pd)->membase + PCH_DMA_##name)
148 
149 static inline
150 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
151 {
152         return container_of(txd, struct pch_dma_desc, txd);
153 }
154 
155 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
156 {
157         return container_of(chan, struct pch_dma_chan, chan);
158 }
159 
160 static inline struct pch_dma *to_pd(struct dma_device *ddev)
161 {
162         return container_of(ddev, struct pch_dma, dma);
163 }
164 
165 static inline struct device *chan2dev(struct dma_chan *chan)
166 {
167         return &chan->dev->device;
168 }
169 
170 static inline struct device *chan2parent(struct dma_chan *chan)
171 {
172         return chan->dev->device.parent;
173 }
174 
175 static inline
176 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
177 {
178         return list_first_entry(&pd_chan->active_list,
179                                 struct pch_dma_desc, desc_node);
180 }
181 
182 static inline
183 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
184 {
185         return list_first_entry(&pd_chan->queue,
186                                 struct pch_dma_desc, desc_node);
187 }
188 
189 static void pdc_enable_irq(struct dma_chan *chan, int enable)
190 {
191         struct pch_dma *pd = to_pd(chan->device);
192         u32 val;
193         int pos;
194 
195         if (chan->chan_id < 8)
196                 pos = chan->chan_id;
197         else
198                 pos = chan->chan_id + 8;
199 
200         val = dma_readl(pd, CTL2);
201 
202         if (enable)
203                 val |= 0x1 << pos;
204         else
205                 val &= ~(0x1 << pos);
206 
207         dma_writel(pd, CTL2, val);
208 
209         dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
210                 chan->chan_id, val);
211 }
212 
213 static void pdc_set_dir(struct dma_chan *chan)
214 {
215         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
216         struct pch_dma *pd = to_pd(chan->device);
217         u32 val;
218         u32 mask_mode;
219         u32 mask_ctl;
220 
221         if (chan->chan_id < 8) {
222                 val = dma_readl(pd, CTL0);
223 
224                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
225                                         (DMA_CTL0_BITS_PER_CH * chan->chan_id);
226                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
227                                        (DMA_CTL0_BITS_PER_CH * chan->chan_id));
228                 val &= mask_mode;
229                 if (pd_chan->dir == DMA_MEM_TO_DEV)
230                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
231                                        DMA_CTL0_DIR_SHIFT_BITS);
232                 else
233                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
234                                          DMA_CTL0_DIR_SHIFT_BITS));
235 
236                 val |= mask_ctl;
237                 dma_writel(pd, CTL0, val);
238         } else {
239                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
240                 val = dma_readl(pd, CTL3);
241 
242                 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
243                                                 (DMA_CTL0_BITS_PER_CH * ch);
244                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
245                                                  (DMA_CTL0_BITS_PER_CH * ch));
246                 val &= mask_mode;
247                 if (pd_chan->dir == DMA_MEM_TO_DEV)
248                         val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
249                                        DMA_CTL0_DIR_SHIFT_BITS);
250                 else
251                         val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
252                                          DMA_CTL0_DIR_SHIFT_BITS));
253                 val |= mask_ctl;
254                 dma_writel(pd, CTL3, val);
255         }
256 
257         dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
258                 chan->chan_id, val);
259 }
260 
261 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
262 {
263         struct pch_dma *pd = to_pd(chan->device);
264         u32 val;
265         u32 mask_ctl;
266         u32 mask_dir;
267 
268         if (chan->chan_id < 8) {
269                 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
270                            (DMA_CTL0_BITS_PER_CH * chan->chan_id));
271                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
272                                  DMA_CTL0_DIR_SHIFT_BITS);
273                 val = dma_readl(pd, CTL0);
274                 val &= mask_dir;
275                 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
276                 val |= mask_ctl;
277                 dma_writel(pd, CTL0, val);
278         } else {
279                 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
280                 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
281                                                  (DMA_CTL0_BITS_PER_CH * ch));
282                 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
283                                  DMA_CTL0_DIR_SHIFT_BITS);
284                 val = dma_readl(pd, CTL3);
285                 val &= mask_dir;
286                 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
287                 val |= mask_ctl;
288                 dma_writel(pd, CTL3, val);
289         }
290 
291         dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
292                 chan->chan_id, val);
293 }
294 
295 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
296 {
297         struct pch_dma *pd = to_pd(pd_chan->chan.device);
298         u32 val;
299 
300         val = dma_readl(pd, STS0);
301         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
302                         DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
303 }
304 
305 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
306 {
307         struct pch_dma *pd = to_pd(pd_chan->chan.device);
308         u32 val;
309 
310         val = dma_readl(pd, STS2);
311         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
312                         DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
313 }
314 
315 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
316 {
317         u32 sts;
318 
319         if (pd_chan->chan.chan_id < 8)
320                 sts = pdc_get_status0(pd_chan);
321         else
322                 sts = pdc_get_status2(pd_chan);
323 
324 
325         if (sts == DMA_STATUS_IDLE)
326                 return true;
327         else
328                 return false;
329 }
330 
331 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
332 {
333         if (!pdc_is_idle(pd_chan)) {
334                 dev_err(chan2dev(&pd_chan->chan),
335                         "BUG: Attempt to start non-idle channel\n");
336                 return;
337         }
338 
339         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
340                 pd_chan->chan.chan_id, desc->regs.dev_addr);
341         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
342                 pd_chan->chan.chan_id, desc->regs.mem_addr);
343         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
344                 pd_chan->chan.chan_id, desc->regs.size);
345         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
346                 pd_chan->chan.chan_id, desc->regs.next);
347 
348         if (list_empty(&desc->tx_list)) {
349                 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
350                 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
351                 channel_writel(pd_chan, SIZE, desc->regs.size);
352                 channel_writel(pd_chan, NEXT, desc->regs.next);
353                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
354         } else {
355                 channel_writel(pd_chan, NEXT, desc->txd.phys);
356                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
357         }
358 }
359 
360 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
361                                struct pch_dma_desc *desc)
362 {
363         struct dma_async_tx_descriptor *txd = &desc->txd;
364         dma_async_tx_callback callback = txd->callback;
365         void *param = txd->callback_param;
366 
367         list_splice_init(&desc->tx_list, &pd_chan->free_list);
368         list_move(&desc->desc_node, &pd_chan->free_list);
369 
370         if (callback)
371                 callback(param);
372 }
373 
374 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
375 {
376         struct pch_dma_desc *desc, *_d;
377         LIST_HEAD(list);
378 
379         BUG_ON(!pdc_is_idle(pd_chan));
380 
381         if (!list_empty(&pd_chan->queue))
382                 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
383 
384         list_splice_init(&pd_chan->active_list, &list);
385         list_splice_init(&pd_chan->queue, &pd_chan->active_list);
386 
387         list_for_each_entry_safe(desc, _d, &list, desc_node)
388                 pdc_chain_complete(pd_chan, desc);
389 }
390 
391 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
392 {
393         struct pch_dma_desc *bad_desc;
394 
395         bad_desc = pdc_first_active(pd_chan);
396         list_del(&bad_desc->desc_node);
397 
398         list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
399 
400         if (!list_empty(&pd_chan->active_list))
401                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
402 
403         dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
404         dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
405                  bad_desc->txd.cookie);
406 
407         pdc_chain_complete(pd_chan, bad_desc);
408 }
409 
410 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
411 {
412         if (list_empty(&pd_chan->active_list) ||
413                 list_is_singular(&pd_chan->active_list)) {
414                 pdc_complete_all(pd_chan);
415         } else {
416                 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
417                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
418         }
419 }
420 
421 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
422 {
423         struct pch_dma_desc *desc = to_pd_desc(txd);
424         struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
425         dma_cookie_t cookie;
426 
427         spin_lock(&pd_chan->lock);
428         cookie = dma_cookie_assign(txd);
429 
430         if (list_empty(&pd_chan->active_list)) {
431                 list_add_tail(&desc->desc_node, &pd_chan->active_list);
432                 pdc_dostart(pd_chan, desc);
433         } else {
434                 list_add_tail(&desc->desc_node, &pd_chan->queue);
435         }
436 
437         spin_unlock(&pd_chan->lock);
438         return 0;
439 }
440 
441 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
442 {
443         struct pch_dma_desc *desc = NULL;
444         struct pch_dma *pd = to_pd(chan->device);
445         dma_addr_t addr;
446 
447         desc = pci_pool_alloc(pd->pool, flags, &addr);
448         if (desc) {
449                 memset(desc, 0, sizeof(struct pch_dma_desc));
450                 INIT_LIST_HEAD(&desc->tx_list);
451                 dma_async_tx_descriptor_init(&desc->txd, chan);
452                 desc->txd.tx_submit = pd_tx_submit;
453                 desc->txd.flags = DMA_CTRL_ACK;
454                 desc->txd.phys = addr;
455         }
456 
457         return desc;
458 }
459 
460 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
461 {
462         struct pch_dma_desc *desc, *_d;
463         struct pch_dma_desc *ret = NULL;
464         int i = 0;
465 
466         spin_lock(&pd_chan->lock);
467         list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
468                 i++;
469                 if (async_tx_test_ack(&desc->txd)) {
470                         list_del(&desc->desc_node);
471                         ret = desc;
472                         break;
473                 }
474                 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
475         }
476         spin_unlock(&pd_chan->lock);
477         dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
478 
479         if (!ret) {
480                 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
481                 if (ret) {
482                         spin_lock(&pd_chan->lock);
483                         pd_chan->descs_allocated++;
484                         spin_unlock(&pd_chan->lock);
485                 } else {
486                         dev_err(chan2dev(&pd_chan->chan),
487                                 "failed to alloc desc\n");
488                 }
489         }
490 
491         return ret;
492 }
493 
494 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
495                          struct pch_dma_desc *desc)
496 {
497         if (desc) {
498                 spin_lock(&pd_chan->lock);
499                 list_splice_init(&desc->tx_list, &pd_chan->free_list);
500                 list_add(&desc->desc_node, &pd_chan->free_list);
501                 spin_unlock(&pd_chan->lock);
502         }
503 }
504 
505 static int pd_alloc_chan_resources(struct dma_chan *chan)
506 {
507         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
508         struct pch_dma_desc *desc;
509         LIST_HEAD(tmp_list);
510         int i;
511 
512         if (!pdc_is_idle(pd_chan)) {
513                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
514                 return -EIO;
515         }
516 
517         if (!list_empty(&pd_chan->free_list))
518                 return pd_chan->descs_allocated;
519 
520         for (i = 0; i < init_nr_desc_per_channel; i++) {
521                 desc = pdc_alloc_desc(chan, GFP_KERNEL);
522 
523                 if (!desc) {
524                         dev_warn(chan2dev(chan),
525                                 "Only allocated %d initial descriptors\n", i);
526                         break;
527                 }
528 
529                 list_add_tail(&desc->desc_node, &tmp_list);
530         }
531 
532         spin_lock_irq(&pd_chan->lock);
533         list_splice(&tmp_list, &pd_chan->free_list);
534         pd_chan->descs_allocated = i;
535         dma_cookie_init(chan);
536         spin_unlock_irq(&pd_chan->lock);
537 
538         pdc_enable_irq(chan, 1);
539 
540         return pd_chan->descs_allocated;
541 }
542 
543 static void pd_free_chan_resources(struct dma_chan *chan)
544 {
545         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
546         struct pch_dma *pd = to_pd(chan->device);
547         struct pch_dma_desc *desc, *_d;
548         LIST_HEAD(tmp_list);
549 
550         BUG_ON(!pdc_is_idle(pd_chan));
551         BUG_ON(!list_empty(&pd_chan->active_list));
552         BUG_ON(!list_empty(&pd_chan->queue));
553 
554         spin_lock_irq(&pd_chan->lock);
555         list_splice_init(&pd_chan->free_list, &tmp_list);
556         pd_chan->descs_allocated = 0;
557         spin_unlock_irq(&pd_chan->lock);
558 
559         list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
560                 pci_pool_free(pd->pool, desc, desc->txd.phys);
561 
562         pdc_enable_irq(chan, 0);
563 }
564 
565 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
566                                     struct dma_tx_state *txstate)
567 {
568         return dma_cookie_status(chan, cookie, txstate);
569 }
570 
571 static void pd_issue_pending(struct dma_chan *chan)
572 {
573         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
574 
575         if (pdc_is_idle(pd_chan)) {
576                 spin_lock(&pd_chan->lock);
577                 pdc_advance_work(pd_chan);
578                 spin_unlock(&pd_chan->lock);
579         }
580 }
581 
582 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
583                         struct scatterlist *sgl, unsigned int sg_len,
584                         enum dma_transfer_direction direction, unsigned long flags,
585                         void *context)
586 {
587         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
588         struct pch_dma_slave *pd_slave = chan->private;
589         struct pch_dma_desc *first = NULL;
590         struct pch_dma_desc *prev = NULL;
591         struct pch_dma_desc *desc = NULL;
592         struct scatterlist *sg;
593         dma_addr_t reg;
594         int i;
595 
596         if (unlikely(!sg_len)) {
597                 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
598                 return NULL;
599         }
600 
601         if (direction == DMA_DEV_TO_MEM)
602                 reg = pd_slave->rx_reg;
603         else if (direction == DMA_MEM_TO_DEV)
604                 reg = pd_slave->tx_reg;
605         else
606                 return NULL;
607 
608         pd_chan->dir = direction;
609         pdc_set_dir(chan);
610 
611         for_each_sg(sgl, sg, sg_len, i) {
612                 desc = pdc_desc_get(pd_chan);
613 
614                 if (!desc)
615                         goto err_desc_get;
616 
617                 desc->regs.dev_addr = reg;
618                 desc->regs.mem_addr = sg_dma_address(sg);
619                 desc->regs.size = sg_dma_len(sg);
620                 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
621 
622                 switch (pd_slave->width) {
623                 case PCH_DMA_WIDTH_1_BYTE:
624                         if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
625                                 goto err_desc_get;
626                         desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
627                         break;
628                 case PCH_DMA_WIDTH_2_BYTES:
629                         if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
630                                 goto err_desc_get;
631                         desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
632                         break;
633                 case PCH_DMA_WIDTH_4_BYTES:
634                         if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
635                                 goto err_desc_get;
636                         desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
637                         break;
638                 default:
639                         goto err_desc_get;
640                 }
641 
642                 if (!first) {
643                         first = desc;
644                 } else {
645                         prev->regs.next |= desc->txd.phys;
646                         list_add_tail(&desc->desc_node, &first->tx_list);
647                 }
648 
649                 prev = desc;
650         }
651 
652         if (flags & DMA_PREP_INTERRUPT)
653                 desc->regs.next = DMA_DESC_END_WITH_IRQ;
654         else
655                 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
656 
657         first->txd.cookie = -EBUSY;
658         desc->txd.flags = flags;
659 
660         return &first->txd;
661 
662 err_desc_get:
663         dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
664         pdc_desc_put(pd_chan, first);
665         return NULL;
666 }
667 
668 static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
669                              unsigned long arg)
670 {
671         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
672         struct pch_dma_desc *desc, *_d;
673         LIST_HEAD(list);
674 
675         if (cmd != DMA_TERMINATE_ALL)
676                 return -ENXIO;
677 
678         spin_lock_irq(&pd_chan->lock);
679 
680         pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
681 
682         list_splice_init(&pd_chan->active_list, &list);
683         list_splice_init(&pd_chan->queue, &list);
684 
685         list_for_each_entry_safe(desc, _d, &list, desc_node)
686                 pdc_chain_complete(pd_chan, desc);
687 
688         spin_unlock_irq(&pd_chan->lock);
689 
690         return 0;
691 }
692 
693 static void pdc_tasklet(unsigned long data)
694 {
695         struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
696         unsigned long flags;
697 
698         if (!pdc_is_idle(pd_chan)) {
699                 dev_err(chan2dev(&pd_chan->chan),
700                         "BUG: handle non-idle channel in tasklet\n");
701                 return;
702         }
703 
704         spin_lock_irqsave(&pd_chan->lock, flags);
705         if (test_and_clear_bit(0, &pd_chan->err_status))
706                 pdc_handle_error(pd_chan);
707         else
708                 pdc_advance_work(pd_chan);
709         spin_unlock_irqrestore(&pd_chan->lock, flags);
710 }
711 
712 static irqreturn_t pd_irq(int irq, void *devid)
713 {
714         struct pch_dma *pd = (struct pch_dma *)devid;
715         struct pch_dma_chan *pd_chan;
716         u32 sts0;
717         u32 sts2;
718         int i;
719         int ret0 = IRQ_NONE;
720         int ret2 = IRQ_NONE;
721 
722         sts0 = dma_readl(pd, STS0);
723         sts2 = dma_readl(pd, STS2);
724 
725         dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
726 
727         for (i = 0; i < pd->dma.chancnt; i++) {
728                 pd_chan = &pd->channels[i];
729 
730                 if (i < 8) {
731                         if (sts0 & DMA_STATUS_IRQ(i)) {
732                                 if (sts0 & DMA_STATUS0_ERR(i))
733                                         set_bit(0, &pd_chan->err_status);
734 
735                                 tasklet_schedule(&pd_chan->tasklet);
736                                 ret0 = IRQ_HANDLED;
737                         }
738                 } else {
739                         if (sts2 & DMA_STATUS_IRQ(i - 8)) {
740                                 if (sts2 & DMA_STATUS2_ERR(i))
741                                         set_bit(0, &pd_chan->err_status);
742 
743                                 tasklet_schedule(&pd_chan->tasklet);
744                                 ret2 = IRQ_HANDLED;
745                         }
746                 }
747         }
748 
749         /* clear interrupt bits in status register */
750         if (ret0)
751                 dma_writel(pd, STS0, sts0);
752         if (ret2)
753                 dma_writel(pd, STS2, sts2);
754 
755         return ret0 | ret2;
756 }
757 
758 #ifdef  CONFIG_PM
759 static void pch_dma_save_regs(struct pch_dma *pd)
760 {
761         struct pch_dma_chan *pd_chan;
762         struct dma_chan *chan, *_c;
763         int i = 0;
764 
765         pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
766         pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
767         pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
768         pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
769 
770         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
771                 pd_chan = to_pd_chan(chan);
772 
773                 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
774                 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
775                 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
776                 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
777 
778                 i++;
779         }
780 }
781 
782 static void pch_dma_restore_regs(struct pch_dma *pd)
783 {
784         struct pch_dma_chan *pd_chan;
785         struct dma_chan *chan, *_c;
786         int i = 0;
787 
788         dma_writel(pd, CTL0, pd->regs.dma_ctl0);
789         dma_writel(pd, CTL1, pd->regs.dma_ctl1);
790         dma_writel(pd, CTL2, pd->regs.dma_ctl2);
791         dma_writel(pd, CTL3, pd->regs.dma_ctl3);
792 
793         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
794                 pd_chan = to_pd_chan(chan);
795 
796                 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
797                 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
798                 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
799                 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
800 
801                 i++;
802         }
803 }
804 
805 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
806 {
807         struct pch_dma *pd = pci_get_drvdata(pdev);
808 
809         if (pd)
810                 pch_dma_save_regs(pd);
811 
812         pci_save_state(pdev);
813         pci_disable_device(pdev);
814         pci_set_power_state(pdev, pci_choose_state(pdev, state));
815 
816         return 0;
817 }
818 
819 static int pch_dma_resume(struct pci_dev *pdev)
820 {
821         struct pch_dma *pd = pci_get_drvdata(pdev);
822         int err;
823 
824         pci_set_power_state(pdev, PCI_D0);
825         pci_restore_state(pdev);
826 
827         err = pci_enable_device(pdev);
828         if (err) {
829                 dev_dbg(&pdev->dev, "failed to enable device\n");
830                 return err;
831         }
832 
833         if (pd)
834                 pch_dma_restore_regs(pd);
835 
836         return 0;
837 }
838 #endif
839 
840 static int pch_dma_probe(struct pci_dev *pdev,
841                                    const struct pci_device_id *id)
842 {
843         struct pch_dma *pd;
844         struct pch_dma_regs *regs;
845         unsigned int nr_channels;
846         int err;
847         int i;
848 
849         nr_channels = id->driver_data;
850         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
851         if (!pd)
852                 return -ENOMEM;
853 
854         pci_set_drvdata(pdev, pd);
855 
856         err = pci_enable_device(pdev);
857         if (err) {
858                 dev_err(&pdev->dev, "Cannot enable PCI device\n");
859                 goto err_free_mem;
860         }
861 
862         if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
863                 dev_err(&pdev->dev, "Cannot find proper base address\n");
864                 err = -ENODEV;
865                 goto err_disable_pdev;
866         }
867 
868         err = pci_request_regions(pdev, DRV_NAME);
869         if (err) {
870                 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
871                 goto err_disable_pdev;
872         }
873 
874         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
875         if (err) {
876                 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
877                 goto err_free_res;
878         }
879 
880         regs = pd->membase = pci_iomap(pdev, 1, 0);
881         if (!pd->membase) {
882                 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
883                 err = -ENOMEM;
884                 goto err_free_res;
885         }
886 
887         pci_set_master(pdev);
888 
889         err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
890         if (err) {
891                 dev_err(&pdev->dev, "Failed to request IRQ\n");
892                 goto err_iounmap;
893         }
894 
895         pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
896                                    sizeof(struct pch_dma_desc), 4, 0);
897         if (!pd->pool) {
898                 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
899                 err = -ENOMEM;
900                 goto err_free_irq;
901         }
902 
903         pd->dma.dev = &pdev->dev;
904 
905         INIT_LIST_HEAD(&pd->dma.channels);
906 
907         for (i = 0; i < nr_channels; i++) {
908                 struct pch_dma_chan *pd_chan = &pd->channels[i];
909 
910                 pd_chan->chan.device = &pd->dma;
911                 dma_cookie_init(&pd_chan->chan);
912 
913                 pd_chan->membase = &regs->desc[i];
914 
915                 spin_lock_init(&pd_chan->lock);
916 
917                 INIT_LIST_HEAD(&pd_chan->active_list);
918                 INIT_LIST_HEAD(&pd_chan->queue);
919                 INIT_LIST_HEAD(&pd_chan->free_list);
920 
921                 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
922                              (unsigned long)pd_chan);
923                 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
924         }
925 
926         dma_cap_zero(pd->dma.cap_mask);
927         dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
928         dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
929 
930         pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
931         pd->dma.device_free_chan_resources = pd_free_chan_resources;
932         pd->dma.device_tx_status = pd_tx_status;
933         pd->dma.device_issue_pending = pd_issue_pending;
934         pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
935         pd->dma.device_control = pd_device_control;
936 
937         err = dma_async_device_register(&pd->dma);
938         if (err) {
939                 dev_err(&pdev->dev, "Failed to register DMA device\n");
940                 goto err_free_pool;
941         }
942 
943         return 0;
944 
945 err_free_pool:
946         pci_pool_destroy(pd->pool);
947 err_free_irq:
948         free_irq(pdev->irq, pd);
949 err_iounmap:
950         pci_iounmap(pdev, pd->membase);
951 err_free_res:
952         pci_release_regions(pdev);
953 err_disable_pdev:
954         pci_disable_device(pdev);
955 err_free_mem:
956         return err;
957 }
958 
959 static void pch_dma_remove(struct pci_dev *pdev)
960 {
961         struct pch_dma *pd = pci_get_drvdata(pdev);
962         struct pch_dma_chan *pd_chan;
963         struct dma_chan *chan, *_c;
964 
965         if (pd) {
966                 dma_async_device_unregister(&pd->dma);
967 
968                 free_irq(pdev->irq, pd);
969 
970                 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
971                                          device_node) {
972                         pd_chan = to_pd_chan(chan);
973 
974                         tasklet_kill(&pd_chan->tasklet);
975                 }
976 
977                 pci_pool_destroy(pd->pool);
978                 pci_iounmap(pdev, pd->membase);
979                 pci_release_regions(pdev);
980                 pci_disable_device(pdev);
981                 kfree(pd);
982         }
983 }
984 
985 /* PCI Device ID of DMA device */
986 #define PCI_VENDOR_ID_ROHM             0x10DB
987 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
988 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
989 #define PCI_DEVICE_ID_ML7213_DMA1_8CH   0x8026
990 #define PCI_DEVICE_ID_ML7213_DMA2_8CH   0x802B
991 #define PCI_DEVICE_ID_ML7213_DMA3_4CH   0x8034
992 #define PCI_DEVICE_ID_ML7213_DMA4_12CH  0x8032
993 #define PCI_DEVICE_ID_ML7223_DMA1_4CH   0x800B
994 #define PCI_DEVICE_ID_ML7223_DMA2_4CH   0x800E
995 #define PCI_DEVICE_ID_ML7223_DMA3_4CH   0x8017
996 #define PCI_DEVICE_ID_ML7223_DMA4_4CH   0x803B
997 #define PCI_DEVICE_ID_ML7831_DMA1_8CH   0x8810
998 #define PCI_DEVICE_ID_ML7831_DMA2_4CH   0x8815
999 
1000 const struct pci_device_id pch_dma_id_table[] = {
1001         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1002         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1003         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1004         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1005         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
1006         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1007         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1008         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1009         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1010         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1011         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1012         { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1013         { 0, },
1014 };
1015 
1016 static struct pci_driver pch_dma_driver = {
1017         .name           = DRV_NAME,
1018         .id_table       = pch_dma_id_table,
1019         .probe          = pch_dma_probe,
1020         .remove         = pch_dma_remove,
1021 #ifdef CONFIG_PM
1022         .suspend        = pch_dma_suspend,
1023         .resume         = pch_dma_resume,
1024 #endif
1025 };
1026 
1027 module_pci_driver(pch_dma_driver);
1028 
1029 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1030                    "DMA controller driver");
1031 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1032 MODULE_LICENSE("GPL v2");
1033 MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
1034 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us