Version:  2.0.40 2.2.26 2.4.37 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8

Linux/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c

  1 /*
  2  * DMM IOMMU driver support functions for TI OMAP processors.
  3  *
  4  * Author: Rob Clark <rob@ti.com>
  5  *         Andy Gross <andy.gross@ti.com>
  6  *
  7  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
  8  *
  9  * This program is free software; you can redistribute it and/or
 10  * modify it under the terms of the GNU General Public License as
 11  * published by the Free Software Foundation version 2.
 12  *
 13  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 14  * kind, whether express or implied; without even the implied warranty
 15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16  * GNU General Public License for more details.
 17  */
 18 
 19 #include <linux/completion.h>
 20 #include <linux/delay.h>
 21 #include <linux/dma-mapping.h>
 22 #include <linux/errno.h>
 23 #include <linux/init.h>
 24 #include <linux/interrupt.h>
 25 #include <linux/list.h>
 26 #include <linux/mm.h>
 27 #include <linux/module.h>
 28 #include <linux/platform_device.h> /* platform_device() */
 29 #include <linux/sched.h>
 30 #include <linux/seq_file.h>
 31 #include <linux/slab.h>
 32 #include <linux/time.h>
 33 #include <linux/vmalloc.h>
 34 #include <linux/wait.h>
 35 
 36 #include "omap_dmm_tiler.h"
 37 #include "omap_dmm_priv.h"
 38 
 39 #define DMM_DRIVER_NAME "dmm"
 40 
 41 /* mappings for associating views to luts */
 42 static struct tcm *containers[TILFMT_NFORMATS];
 43 static struct dmm *omap_dmm;
 44 
 45 #if defined(CONFIG_OF)
 46 static const struct of_device_id dmm_of_match[];
 47 #endif
 48 
 49 /* global spinlock for protecting lists */
 50 static DEFINE_SPINLOCK(list_lock);
 51 
 52 /* Geometry table */
 53 #define GEOM(xshift, yshift, bytes_per_pixel) { \
 54                 .x_shft = (xshift), \
 55                 .y_shft = (yshift), \
 56                 .cpp    = (bytes_per_pixel), \
 57                 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
 58                 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
 59         }
 60 
 61 static const struct {
 62         uint32_t x_shft;        /* unused X-bits (as part of bpp) */
 63         uint32_t y_shft;        /* unused Y-bits (as part of bpp) */
 64         uint32_t cpp;           /* bytes/chars per pixel */
 65         uint32_t slot_w;        /* width of each slot (in pixels) */
 66         uint32_t slot_h;        /* height of each slot (in pixels) */
 67 } geom[TILFMT_NFORMATS] = {
 68         [TILFMT_8BIT]  = GEOM(0, 0, 1),
 69         [TILFMT_16BIT] = GEOM(0, 1, 2),
 70         [TILFMT_32BIT] = GEOM(1, 1, 4),
 71         [TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
 72 };
 73 
 74 
 75 /* lookup table for registers w/ per-engine instances */
 76 static const uint32_t reg[][4] = {
 77         [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
 78                         DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
 79         [PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
 80                         DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
 81 };
 82 
 83 static u32 dmm_read(struct dmm *dmm, u32 reg)
 84 {
 85         return readl(dmm->base + reg);
 86 }
 87 
 88 static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
 89 {
 90         writel(val, dmm->base + reg);
 91 }
 92 
 93 /* simple allocator to grab next 16 byte aligned memory from txn */
 94 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
 95 {
 96         void *ptr;
 97         struct refill_engine *engine = txn->engine_handle;
 98 
 99         /* dmm programming requires 16 byte aligned addresses */
100         txn->current_pa = round_up(txn->current_pa, 16);
101         txn->current_va = (void *)round_up((long)txn->current_va, 16);
102 
103         ptr = txn->current_va;
104         *pa = txn->current_pa;
105 
106         txn->current_pa += sz;
107         txn->current_va += sz;
108 
109         BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
110 
111         return ptr;
112 }
113 
114 /* check status and spin until wait_mask comes true */
115 static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
116 {
117         struct dmm *dmm = engine->dmm;
118         uint32_t r = 0, err, i;
119 
120         i = DMM_FIXED_RETRY_COUNT;
121         while (true) {
122                 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
123                 err = r & DMM_PATSTATUS_ERR;
124                 if (err)
125                         return -EFAULT;
126 
127                 if ((r & wait_mask) == wait_mask)
128                         break;
129 
130                 if (--i == 0)
131                         return -ETIMEDOUT;
132 
133                 udelay(1);
134         }
135 
136         return 0;
137 }
138 
139 static void release_engine(struct refill_engine *engine)
140 {
141         unsigned long flags;
142 
143         spin_lock_irqsave(&list_lock, flags);
144         list_add(&engine->idle_node, &omap_dmm->idle_head);
145         spin_unlock_irqrestore(&list_lock, flags);
146 
147         atomic_inc(&omap_dmm->engine_counter);
148         wake_up_interruptible(&omap_dmm->engine_queue);
149 }
150 
151 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
152 {
153         struct dmm *dmm = arg;
154         uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
155         int i;
156 
157         /* ack IRQ */
158         dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
159 
160         for (i = 0; i < dmm->num_engines; i++) {
161                 if (status & DMM_IRQSTAT_LST) {
162                         if (dmm->engines[i].async)
163                                 release_engine(&dmm->engines[i]);
164 
165                         complete(&dmm->engines[i].compl);
166                 }
167 
168                 status >>= 8;
169         }
170 
171         return IRQ_HANDLED;
172 }
173 
174 /**
175  * Get a handle for a DMM transaction
176  */
177 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
178 {
179         struct dmm_txn *txn = NULL;
180         struct refill_engine *engine = NULL;
181         int ret;
182         unsigned long flags;
183 
184 
185         /* wait until an engine is available */
186         ret = wait_event_interruptible(omap_dmm->engine_queue,
187                 atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
188         if (ret)
189                 return ERR_PTR(ret);
190 
191         /* grab an idle engine */
192         spin_lock_irqsave(&list_lock, flags);
193         if (!list_empty(&dmm->idle_head)) {
194                 engine = list_entry(dmm->idle_head.next, struct refill_engine,
195                                         idle_node);
196                 list_del(&engine->idle_node);
197         }
198         spin_unlock_irqrestore(&list_lock, flags);
199 
200         BUG_ON(!engine);
201 
202         txn = &engine->txn;
203         engine->tcm = tcm;
204         txn->engine_handle = engine;
205         txn->last_pat = NULL;
206         txn->current_va = engine->refill_va;
207         txn->current_pa = engine->refill_pa;
208 
209         return txn;
210 }
211 
212 /**
213  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
214  * corresponding slot is cleared (ie. dummy_pa is programmed)
215  */
216 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
217                 struct page **pages, uint32_t npages, uint32_t roll)
218 {
219         dma_addr_t pat_pa = 0, data_pa = 0;
220         uint32_t *data;
221         struct pat *pat;
222         struct refill_engine *engine = txn->engine_handle;
223         int columns = (1 + area->x1 - area->x0);
224         int rows = (1 + area->y1 - area->y0);
225         int i = columns*rows;
226 
227         pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
228 
229         if (txn->last_pat)
230                 txn->last_pat->next_pa = (uint32_t)pat_pa;
231 
232         pat->area = *area;
233 
234         /* adjust Y coordinates based off of container parameters */
235         pat->area.y0 += engine->tcm->y_offset;
236         pat->area.y1 += engine->tcm->y_offset;
237 
238         pat->ctrl = (struct pat_ctrl){
239                         .start = 1,
240                         .lut_id = engine->tcm->lut_id,
241                 };
242 
243         data = alloc_dma(txn, 4*i, &data_pa);
244         /* FIXME: what if data_pa is more than 32-bit ? */
245         pat->data_pa = data_pa;
246 
247         while (i--) {
248                 int n = i + roll;
249                 if (n >= npages)
250                         n -= npages;
251                 data[i] = (pages && pages[n]) ?
252                         page_to_phys(pages[n]) : engine->dmm->dummy_pa;
253         }
254 
255         txn->last_pat = pat;
256 
257         return;
258 }
259 
260 /**
261  * Commit the DMM transaction.
262  */
263 static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
264 {
265         int ret = 0;
266         struct refill_engine *engine = txn->engine_handle;
267         struct dmm *dmm = engine->dmm;
268 
269         if (!txn->last_pat) {
270                 dev_err(engine->dmm->dev, "need at least one txn\n");
271                 ret = -EINVAL;
272                 goto cleanup;
273         }
274 
275         txn->last_pat->next_pa = 0;
276 
277         /* write to PAT_DESCR to clear out any pending transaction */
278         dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
279 
280         /* wait for engine ready: */
281         ret = wait_status(engine, DMM_PATSTATUS_READY);
282         if (ret) {
283                 ret = -EFAULT;
284                 goto cleanup;
285         }
286 
287         /* mark whether it is async to denote list management in IRQ handler */
288         engine->async = wait ? false : true;
289         reinit_completion(&engine->compl);
290         /* verify that the irq handler sees the 'async' and completion value */
291         smp_mb();
292 
293         /* kick reload */
294         dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
295 
296         if (wait) {
297                 if (!wait_for_completion_timeout(&engine->compl,
298                                 msecs_to_jiffies(100))) {
299                         dev_err(dmm->dev, "timed out waiting for done\n");
300                         ret = -ETIMEDOUT;
301                 }
302         }
303 
304 cleanup:
305         /* only place engine back on list if we are done with it */
306         if (ret || wait)
307                 release_engine(engine);
308 
309         return ret;
310 }
311 
312 /*
313  * DMM programming
314  */
315 static int fill(struct tcm_area *area, struct page **pages,
316                 uint32_t npages, uint32_t roll, bool wait)
317 {
318         int ret = 0;
319         struct tcm_area slice, area_s;
320         struct dmm_txn *txn;
321 
322         /*
323          * FIXME
324          *
325          * Asynchronous fill does not work reliably, as the driver does not
326          * handle errors in the async code paths. The fill operation may
327          * silently fail, leading to leaking DMM engines, which may eventually
328          * lead to deadlock if we run out of DMM engines.
329          *
330          * For now, always set 'wait' so that we only use sync fills. Async
331          * fills should be fixed, or alternatively we could decide to only
332          * support sync fills and so the whole async code path could be removed.
333          */
334 
335         wait = true;
336 
337         txn = dmm_txn_init(omap_dmm, area->tcm);
338         if (IS_ERR_OR_NULL(txn))
339                 return -ENOMEM;
340 
341         tcm_for_each_slice(slice, *area, area_s) {
342                 struct pat_area p_area = {
343                                 .x0 = slice.p0.x,  .y0 = slice.p0.y,
344                                 .x1 = slice.p1.x,  .y1 = slice.p1.y,
345                 };
346 
347                 dmm_txn_append(txn, &p_area, pages, npages, roll);
348 
349                 roll += tcm_sizeof(slice);
350         }
351 
352         ret = dmm_txn_commit(txn, wait);
353 
354         return ret;
355 }
356 
357 /*
358  * Pin/unpin
359  */
360 
361 /* note: slots for which pages[i] == NULL are filled w/ dummy page
362  */
363 int tiler_pin(struct tiler_block *block, struct page **pages,
364                 uint32_t npages, uint32_t roll, bool wait)
365 {
366         int ret;
367 
368         ret = fill(&block->area, pages, npages, roll, wait);
369 
370         if (ret)
371                 tiler_unpin(block);
372 
373         return ret;
374 }
375 
376 int tiler_unpin(struct tiler_block *block)
377 {
378         return fill(&block->area, NULL, 0, 0, false);
379 }
380 
381 /*
382  * Reserve/release
383  */
384 struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
385                 uint16_t h, uint16_t align)
386 {
387         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
388         u32 min_align = 128;
389         int ret;
390         unsigned long flags;
391         size_t slot_bytes;
392 
393         BUG_ON(!validfmt(fmt));
394 
395         /* convert width/height to slots */
396         w = DIV_ROUND_UP(w, geom[fmt].slot_w);
397         h = DIV_ROUND_UP(h, geom[fmt].slot_h);
398 
399         /* convert alignment to slots */
400         slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
401         min_align = max(min_align, slot_bytes);
402         align = (align > min_align) ? ALIGN(align, min_align) : min_align;
403         align /= slot_bytes;
404 
405         block->fmt = fmt;
406 
407         ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
408                         &block->area);
409         if (ret) {
410                 kfree(block);
411                 return ERR_PTR(-ENOMEM);
412         }
413 
414         /* add to allocation list */
415         spin_lock_irqsave(&list_lock, flags);
416         list_add(&block->alloc_node, &omap_dmm->alloc_head);
417         spin_unlock_irqrestore(&list_lock, flags);
418 
419         return block;
420 }
421 
422 struct tiler_block *tiler_reserve_1d(size_t size)
423 {
424         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
425         int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
426         unsigned long flags;
427 
428         if (!block)
429                 return ERR_PTR(-ENOMEM);
430 
431         block->fmt = TILFMT_PAGE;
432 
433         if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
434                                 &block->area)) {
435                 kfree(block);
436                 return ERR_PTR(-ENOMEM);
437         }
438 
439         spin_lock_irqsave(&list_lock, flags);
440         list_add(&block->alloc_node, &omap_dmm->alloc_head);
441         spin_unlock_irqrestore(&list_lock, flags);
442 
443         return block;
444 }
445 
446 /* note: if you have pin'd pages, you should have already unpin'd first! */
447 int tiler_release(struct tiler_block *block)
448 {
449         int ret = tcm_free(&block->area);
450         unsigned long flags;
451 
452         if (block->area.tcm)
453                 dev_err(omap_dmm->dev, "failed to release block\n");
454 
455         spin_lock_irqsave(&list_lock, flags);
456         list_del(&block->alloc_node);
457         spin_unlock_irqrestore(&list_lock, flags);
458 
459         kfree(block);
460         return ret;
461 }
462 
463 /*
464  * Utils
465  */
466 
467 /* calculate the tiler space address of a pixel in a view orientation...
468  * below description copied from the display subsystem section of TRM:
469  *
470  * When the TILER is addressed, the bits:
471  *   [28:27] = 0x0 for 8-bit tiled
472  *             0x1 for 16-bit tiled
473  *             0x2 for 32-bit tiled
474  *             0x3 for page mode
475  *   [31:29] = 0x0 for 0-degree view
476  *             0x1 for 180-degree view + mirroring
477  *             0x2 for 0-degree view + mirroring
478  *             0x3 for 180-degree view
479  *             0x4 for 270-degree view + mirroring
480  *             0x5 for 270-degree view
481  *             0x6 for 90-degree view
482  *             0x7 for 90-degree view + mirroring
483  * Otherwise the bits indicated the corresponding bit address to access
484  * the SDRAM.
485  */
486 static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
487 {
488         u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
489 
490         x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
491         y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
492         alignment = geom[fmt].x_shft + geom[fmt].y_shft;
493 
494         /* validate coordinate */
495         x_mask = MASK(x_bits);
496         y_mask = MASK(y_bits);
497 
498         if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
499                 DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
500                                 x, x, x_mask, y, y, y_mask);
501                 return 0;
502         }
503 
504         /* account for mirroring */
505         if (orient & MASK_X_INVERT)
506                 x ^= x_mask;
507         if (orient & MASK_Y_INVERT)
508                 y ^= y_mask;
509 
510         /* get coordinate address */
511         if (orient & MASK_XY_FLIP)
512                 tmp = ((x << y_bits) + y);
513         else
514                 tmp = ((y << x_bits) + x);
515 
516         return TIL_ADDR((tmp << alignment), orient, fmt);
517 }
518 
519 dma_addr_t tiler_ssptr(struct tiler_block *block)
520 {
521         BUG_ON(!validfmt(block->fmt));
522 
523         return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
524                         block->area.p0.x * geom[block->fmt].slot_w,
525                         block->area.p0.y * geom[block->fmt].slot_h);
526 }
527 
528 dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
529                 uint32_t x, uint32_t y)
530 {
531         struct tcm_pt *p = &block->area.p0;
532         BUG_ON(!validfmt(block->fmt));
533 
534         return tiler_get_address(block->fmt, orient,
535                         (p->x * geom[block->fmt].slot_w) + x,
536                         (p->y * geom[block->fmt].slot_h) + y);
537 }
538 
539 void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
540 {
541         BUG_ON(!validfmt(fmt));
542         *w = round_up(*w, geom[fmt].slot_w);
543         *h = round_up(*h, geom[fmt].slot_h);
544 }
545 
546 uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
547 {
548         BUG_ON(!validfmt(fmt));
549 
550         if (orient & MASK_XY_FLIP)
551                 return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
552         else
553                 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
554 }
555 
556 size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
557 {
558         tiler_align(fmt, &w, &h);
559         return geom[fmt].cpp * w * h;
560 }
561 
562 size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
563 {
564         BUG_ON(!validfmt(fmt));
565         return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
566 }
567 
568 uint32_t tiler_get_cpu_cache_flags(void)
569 {
570         return omap_dmm->plat_data->cpu_cache_flags;
571 }
572 
573 bool dmm_is_available(void)
574 {
575         return omap_dmm ? true : false;
576 }
577 
578 static int omap_dmm_remove(struct platform_device *dev)
579 {
580         struct tiler_block *block, *_block;
581         int i;
582         unsigned long flags;
583 
584         if (omap_dmm) {
585                 /* free all area regions */
586                 spin_lock_irqsave(&list_lock, flags);
587                 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
588                                         alloc_node) {
589                         list_del(&block->alloc_node);
590                         kfree(block);
591                 }
592                 spin_unlock_irqrestore(&list_lock, flags);
593 
594                 for (i = 0; i < omap_dmm->num_lut; i++)
595                         if (omap_dmm->tcm && omap_dmm->tcm[i])
596                                 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
597                 kfree(omap_dmm->tcm);
598 
599                 kfree(omap_dmm->engines);
600                 if (omap_dmm->refill_va)
601                         dma_free_wc(omap_dmm->dev,
602                                     REFILL_BUFFER_SIZE * omap_dmm->num_engines,
603                                     omap_dmm->refill_va, omap_dmm->refill_pa);
604                 if (omap_dmm->dummy_page)
605                         __free_page(omap_dmm->dummy_page);
606 
607                 if (omap_dmm->irq > 0)
608                         free_irq(omap_dmm->irq, omap_dmm);
609 
610                 iounmap(omap_dmm->base);
611                 kfree(omap_dmm);
612                 omap_dmm = NULL;
613         }
614 
615         return 0;
616 }
617 
618 static int omap_dmm_probe(struct platform_device *dev)
619 {
620         int ret = -EFAULT, i;
621         struct tcm_area area = {0};
622         u32 hwinfo, pat_geom;
623         struct resource *mem;
624 
625         omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
626         if (!omap_dmm)
627                 goto fail;
628 
629         /* initialize lists */
630         INIT_LIST_HEAD(&omap_dmm->alloc_head);
631         INIT_LIST_HEAD(&omap_dmm->idle_head);
632 
633         init_waitqueue_head(&omap_dmm->engine_queue);
634 
635         if (dev->dev.of_node) {
636                 const struct of_device_id *match;
637 
638                 match = of_match_node(dmm_of_match, dev->dev.of_node);
639                 if (!match) {
640                         dev_err(&dev->dev, "failed to find matching device node\n");
641                         return -ENODEV;
642                 }
643 
644                 omap_dmm->plat_data = match->data;
645         }
646 
647         /* lookup hwmod data - base address and irq */
648         mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
649         if (!mem) {
650                 dev_err(&dev->dev, "failed to get base address resource\n");
651                 goto fail;
652         }
653 
654         omap_dmm->base = ioremap(mem->start, SZ_2K);
655 
656         if (!omap_dmm->base) {
657                 dev_err(&dev->dev, "failed to get dmm base address\n");
658                 goto fail;
659         }
660 
661         omap_dmm->irq = platform_get_irq(dev, 0);
662         if (omap_dmm->irq < 0) {
663                 dev_err(&dev->dev, "failed to get IRQ resource\n");
664                 goto fail;
665         }
666 
667         omap_dmm->dev = &dev->dev;
668 
669         hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
670         omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
671         omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
672         omap_dmm->container_width = 256;
673         omap_dmm->container_height = 128;
674 
675         atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
676 
677         /* read out actual LUT width and height */
678         pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
679         omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
680         omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
681 
682         /* increment LUT by one if on OMAP5 */
683         /* LUT has twice the height, and is split into a separate container */
684         if (omap_dmm->lut_height != omap_dmm->container_height)
685                 omap_dmm->num_lut++;
686 
687         /* initialize DMM registers */
688         dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
689         dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
690         dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
691         dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
692         dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
693         dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
694 
695         ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
696                                 "omap_dmm_irq_handler", omap_dmm);
697 
698         if (ret) {
699                 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
700                         omap_dmm->irq, ret);
701                 omap_dmm->irq = -1;
702                 goto fail;
703         }
704 
705         /* Enable all interrupts for each refill engine except
706          * ERR_LUT_MISS<n> (which is just advisory, and we don't care
707          * about because we want to be able to refill live scanout
708          * buffers for accelerated pan/scroll) and FILL_DSC<n> which
709          * we just generally don't care about.
710          */
711         dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
712 
713         omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
714         if (!omap_dmm->dummy_page) {
715                 dev_err(&dev->dev, "could not allocate dummy page\n");
716                 ret = -ENOMEM;
717                 goto fail;
718         }
719 
720         /* set dma mask for device */
721         ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
722         if (ret)
723                 goto fail;
724 
725         omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
726 
727         /* alloc refill memory */
728         omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
729                                            REFILL_BUFFER_SIZE * omap_dmm->num_engines,
730                                            &omap_dmm->refill_pa, GFP_KERNEL);
731         if (!omap_dmm->refill_va) {
732                 dev_err(&dev->dev, "could not allocate refill memory\n");
733                 goto fail;
734         }
735 
736         /* alloc engines */
737         omap_dmm->engines = kcalloc(omap_dmm->num_engines,
738                                     sizeof(struct refill_engine), GFP_KERNEL);
739         if (!omap_dmm->engines) {
740                 ret = -ENOMEM;
741                 goto fail;
742         }
743 
744         for (i = 0; i < omap_dmm->num_engines; i++) {
745                 omap_dmm->engines[i].id = i;
746                 omap_dmm->engines[i].dmm = omap_dmm;
747                 omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
748                                                 (REFILL_BUFFER_SIZE * i);
749                 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
750                                                 (REFILL_BUFFER_SIZE * i);
751                 init_completion(&omap_dmm->engines[i].compl);
752 
753                 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
754         }
755 
756         omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
757                                 GFP_KERNEL);
758         if (!omap_dmm->tcm) {
759                 ret = -ENOMEM;
760                 goto fail;
761         }
762 
763         /* init containers */
764         /* Each LUT is associated with a TCM (container manager).  We use the
765            lut_id to denote the lut_id used to identify the correct LUT for
766            programming during reill operations */
767         for (i = 0; i < omap_dmm->num_lut; i++) {
768                 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
769                                                 omap_dmm->container_height);
770 
771                 if (!omap_dmm->tcm[i]) {
772                         dev_err(&dev->dev, "failed to allocate container\n");
773                         ret = -ENOMEM;
774                         goto fail;
775                 }
776 
777                 omap_dmm->tcm[i]->lut_id = i;
778         }
779 
780         /* assign access mode containers to applicable tcm container */
781         /* OMAP 4 has 1 container for all 4 views */
782         /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
783         containers[TILFMT_8BIT] = omap_dmm->tcm[0];
784         containers[TILFMT_16BIT] = omap_dmm->tcm[0];
785         containers[TILFMT_32BIT] = omap_dmm->tcm[0];
786 
787         if (omap_dmm->container_height != omap_dmm->lut_height) {
788                 /* second LUT is used for PAGE mode.  Programming must use
789                    y offset that is added to all y coordinates.  LUT id is still
790                    0, because it is the same LUT, just the upper 128 lines */
791                 containers[TILFMT_PAGE] = omap_dmm->tcm[1];
792                 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
793                 omap_dmm->tcm[1]->lut_id = 0;
794         } else {
795                 containers[TILFMT_PAGE] = omap_dmm->tcm[0];
796         }
797 
798         area = (struct tcm_area) {
799                 .tcm = NULL,
800                 .p1.x = omap_dmm->container_width - 1,
801                 .p1.y = omap_dmm->container_height - 1,
802         };
803 
804         /* initialize all LUTs to dummy page entries */
805         for (i = 0; i < omap_dmm->num_lut; i++) {
806                 area.tcm = omap_dmm->tcm[i];
807                 if (fill(&area, NULL, 0, 0, true))
808                         dev_err(omap_dmm->dev, "refill failed");
809         }
810 
811         dev_info(omap_dmm->dev, "initialized all PAT entries\n");
812 
813         return 0;
814 
815 fail:
816         if (omap_dmm_remove(dev))
817                 dev_err(&dev->dev, "cleanup failed\n");
818         return ret;
819 }
820 
821 /*
822  * debugfs support
823  */
824 
825 #ifdef CONFIG_DEBUG_FS
826 
827 static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
828                                 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
829 static const char *special = ".,:;'\"`~!^-+";
830 
831 static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
832                                                         char c, bool ovw)
833 {
834         int x, y;
835         for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
836                 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
837                         if (map[y][x] == ' ' || ovw)
838                                 map[y][x] = c;
839 }
840 
841 static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
842                                                                         char c)
843 {
844         map[p->y / ydiv][p->x / xdiv] = c;
845 }
846 
847 static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
848 {
849         return map[p->y / ydiv][p->x / xdiv];
850 }
851 
852 static int map_width(int xdiv, int x0, int x1)
853 {
854         return (x1 / xdiv) - (x0 / xdiv) + 1;
855 }
856 
857 static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
858 {
859         char *p = map[yd] + (x0 / xdiv);
860         int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
861         if (w >= 0) {
862                 p += w;
863                 while (*nice)
864                         *p++ = *nice++;
865         }
866 }
867 
868 static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
869                                                         struct tcm_area *a)
870 {
871         sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
872         if (a->p0.y + 1 < a->p1.y) {
873                 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
874                                                         256 - 1);
875         } else if (a->p0.y < a->p1.y) {
876                 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
877                         text_map(map, xdiv, nice, a->p0.y / ydiv,
878                                         a->p0.x + xdiv, 256 - 1);
879                 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
880                         text_map(map, xdiv, nice, a->p1.y / ydiv,
881                                         0, a->p1.y - xdiv);
882         } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
883                 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
884         }
885 }
886 
887 static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
888                                                         struct tcm_area *a)
889 {
890         sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
891         if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
892                 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
893                                                         a->p0.x, a->p1.x);
894 }
895 
896 int tiler_map_show(struct seq_file *s, void *arg)
897 {
898         int xdiv = 2, ydiv = 1;
899         char **map = NULL, *global_map;
900         struct tiler_block *block;
901         struct tcm_area a, p;
902         int i;
903         const char *m2d = alphabet;
904         const char *a2d = special;
905         const char *m2dp = m2d, *a2dp = a2d;
906         char nice[128];
907         int h_adj;
908         int w_adj;
909         unsigned long flags;
910         int lut_idx;
911 
912 
913         if (!omap_dmm) {
914                 /* early return if dmm/tiler device is not initialized */
915                 return 0;
916         }
917 
918         h_adj = omap_dmm->container_height / ydiv;
919         w_adj = omap_dmm->container_width / xdiv;
920 
921         map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
922         global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
923 
924         if (!map || !global_map)
925                 goto error;
926 
927         for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
928                 memset(map, 0, h_adj * sizeof(*map));
929                 memset(global_map, ' ', (w_adj + 1) * h_adj);
930 
931                 for (i = 0; i < omap_dmm->container_height; i++) {
932                         map[i] = global_map + i * (w_adj + 1);
933                         map[i][w_adj] = 0;
934                 }
935 
936                 spin_lock_irqsave(&list_lock, flags);
937 
938                 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
939                         if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
940                                 if (block->fmt != TILFMT_PAGE) {
941                                         fill_map(map, xdiv, ydiv, &block->area,
942                                                 *m2dp, true);
943                                         if (!*++a2dp)
944                                                 a2dp = a2d;
945                                         if (!*++m2dp)
946                                                 m2dp = m2d;
947                                         map_2d_info(map, xdiv, ydiv, nice,
948                                                         &block->area);
949                                 } else {
950                                         bool start = read_map_pt(map, xdiv,
951                                                 ydiv, &block->area.p0) == ' ';
952                                         bool end = read_map_pt(map, xdiv, ydiv,
953                                                         &block->area.p1) == ' ';
954 
955                                         tcm_for_each_slice(a, block->area, p)
956                                                 fill_map(map, xdiv, ydiv, &a,
957                                                         '=', true);
958                                         fill_map_pt(map, xdiv, ydiv,
959                                                         &block->area.p0,
960                                                         start ? '<' : 'X');
961                                         fill_map_pt(map, xdiv, ydiv,
962                                                         &block->area.p1,
963                                                         end ? '>' : 'X');
964                                         map_1d_info(map, xdiv, ydiv, nice,
965                                                         &block->area);
966                                 }
967                         }
968                 }
969 
970                 spin_unlock_irqrestore(&list_lock, flags);
971 
972                 if (s) {
973                         seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
974                         for (i = 0; i < 128; i++)
975                                 seq_printf(s, "%03d:%s\n", i, map[i]);
976                         seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
977                 } else {
978                         dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
979                                 lut_idx);
980                         for (i = 0; i < 128; i++)
981                                 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
982                         dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
983                                 lut_idx);
984                 }
985         }
986 
987 error:
988         kfree(map);
989         kfree(global_map);
990 
991         return 0;
992 }
993 #endif
994 
995 #ifdef CONFIG_PM_SLEEP
996 static int omap_dmm_resume(struct device *dev)
997 {
998         struct tcm_area area;
999         int i;
1000 
1001         if (!omap_dmm)
1002                 return -ENODEV;
1003 
1004         area = (struct tcm_area) {
1005                 .tcm = NULL,
1006                 .p1.x = omap_dmm->container_width - 1,
1007                 .p1.y = omap_dmm->container_height - 1,
1008         };
1009 
1010         /* initialize all LUTs to dummy page entries */
1011         for (i = 0; i < omap_dmm->num_lut; i++) {
1012                 area.tcm = omap_dmm->tcm[i];
1013                 if (fill(&area, NULL, 0, 0, true))
1014                         dev_err(dev, "refill failed");
1015         }
1016 
1017         return 0;
1018 }
1019 #endif
1020 
1021 static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
1022 
1023 #if defined(CONFIG_OF)
1024 static const struct dmm_platform_data dmm_omap4_platform_data = {
1025         .cpu_cache_flags = OMAP_BO_WC,
1026 };
1027 
1028 static const struct dmm_platform_data dmm_omap5_platform_data = {
1029         .cpu_cache_flags = OMAP_BO_UNCACHED,
1030 };
1031 
1032 static const struct of_device_id dmm_of_match[] = {
1033         {
1034                 .compatible = "ti,omap4-dmm",
1035                 .data = &dmm_omap4_platform_data,
1036         },
1037         {
1038                 .compatible = "ti,omap5-dmm",
1039                 .data = &dmm_omap5_platform_data,
1040         },
1041         {},
1042 };
1043 #endif
1044 
1045 struct platform_driver omap_dmm_driver = {
1046         .probe = omap_dmm_probe,
1047         .remove = omap_dmm_remove,
1048         .driver = {
1049                 .owner = THIS_MODULE,
1050                 .name = DMM_DRIVER_NAME,
1051                 .of_match_table = of_match_ptr(dmm_of_match),
1052                 .pm = &omap_dmm_pm_ops,
1053         },
1054 };
1055 
1056 MODULE_LICENSE("GPL v2");
1057 MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1058 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
1059 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us