Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/clocksource/sh_cmt.c

  1 /*
  2  * SuperH Timer Support - CMT
  3  *
  4  *  Copyright (C) 2008 Magnus Damm
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License as published by
  8  * the Free Software Foundation; either version 2 of the License
  9  *
 10  * This program is distributed in the hope that it will be useful,
 11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13  * GNU General Public License for more details.
 14  *
 15  * You should have received a copy of the GNU General Public License
 16  * along with this program; if not, write to the Free Software
 17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 18  */
 19 
 20 #include <linux/init.h>
 21 #include <linux/platform_device.h>
 22 #include <linux/spinlock.h>
 23 #include <linux/interrupt.h>
 24 #include <linux/ioport.h>
 25 #include <linux/io.h>
 26 #include <linux/clk.h>
 27 #include <linux/irq.h>
 28 #include <linux/err.h>
 29 #include <linux/delay.h>
 30 #include <linux/clocksource.h>
 31 #include <linux/clockchips.h>
 32 #include <linux/sh_timer.h>
 33 #include <linux/slab.h>
 34 #include <linux/module.h>
 35 #include <linux/pm_domain.h>
 36 #include <linux/pm_runtime.h>
 37 
 38 struct sh_cmt_priv {
 39         void __iomem *mapbase;
 40         void __iomem *mapbase_str;
 41         struct clk *clk;
 42         unsigned long width; /* 16 or 32 bit version of hardware block */
 43         unsigned long overflow_bit;
 44         unsigned long clear_bits;
 45         struct irqaction irqaction;
 46         struct platform_device *pdev;
 47 
 48         unsigned long flags;
 49         unsigned long match_value;
 50         unsigned long next_match_value;
 51         unsigned long max_match_value;
 52         unsigned long rate;
 53         raw_spinlock_t lock;
 54         struct clock_event_device ced;
 55         struct clocksource cs;
 56         unsigned long total_cycles;
 57         bool cs_enabled;
 58 
 59         /* callbacks for CMSTR and CMCSR access */
 60         unsigned long (*read_control)(void __iomem *base, unsigned long offs);
 61         void (*write_control)(void __iomem *base, unsigned long offs,
 62                               unsigned long value);
 63 
 64         /* callbacks for CMCNT and CMCOR access */
 65         unsigned long (*read_count)(void __iomem *base, unsigned long offs);
 66         void (*write_count)(void __iomem *base, unsigned long offs,
 67                             unsigned long value);
 68 };
 69 
 70 /* Examples of supported CMT timer register layouts and I/O access widths:
 71  *
 72  * "16-bit counter and 16-bit control" as found on sh7263:
 73  * CMSTR 0xfffec000 16-bit
 74  * CMCSR 0xfffec002 16-bit
 75  * CMCNT 0xfffec004 16-bit
 76  * CMCOR 0xfffec006 16-bit
 77  *
 78  * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
 79  * CMSTR 0xffca0000 16-bit
 80  * CMCSR 0xffca0060 16-bit
 81  * CMCNT 0xffca0064 32-bit
 82  * CMCOR 0xffca0068 32-bit
 83  *
 84  * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
 85  * CMSTR 0xffca0500 32-bit
 86  * CMCSR 0xffca0510 32-bit
 87  * CMCNT 0xffca0514 32-bit
 88  * CMCOR 0xffca0518 32-bit
 89  */
 90 
 91 static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
 92 {
 93         return ioread16(base + (offs << 1));
 94 }
 95 
 96 static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
 97 {
 98         return ioread32(base + (offs << 2));
 99 }
100 
101 static void sh_cmt_write16(void __iomem *base, unsigned long offs,
102                            unsigned long value)
103 {
104         iowrite16(value, base + (offs << 1));
105 }
106 
107 static void sh_cmt_write32(void __iomem *base, unsigned long offs,
108                            unsigned long value)
109 {
110         iowrite32(value, base + (offs << 2));
111 }
112 
113 #define CMCSR 0 /* channel register */
114 #define CMCNT 1 /* channel register */
115 #define CMCOR 2 /* channel register */
116 
117 static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
118 {
119         return p->read_control(p->mapbase_str, 0);
120 }
121 
122 static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
123 {
124         return p->read_control(p->mapbase, CMCSR);
125 }
126 
127 static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
128 {
129         return p->read_count(p->mapbase, CMCNT);
130 }
131 
132 static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
133                                       unsigned long value)
134 {
135         p->write_control(p->mapbase_str, 0, value);
136 }
137 
138 static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
139                                       unsigned long value)
140 {
141         p->write_control(p->mapbase, CMCSR, value);
142 }
143 
144 static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
145                                       unsigned long value)
146 {
147         p->write_count(p->mapbase, CMCNT, value);
148 }
149 
150 static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
151                                       unsigned long value)
152 {
153         p->write_count(p->mapbase, CMCOR, value);
154 }
155 
156 static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
157                                         int *has_wrapped)
158 {
159         unsigned long v1, v2, v3;
160         int o1, o2;
161 
162         o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
163 
164         /* Make sure the timer value is stable. Stolen from acpi_pm.c */
165         do {
166                 o2 = o1;
167                 v1 = sh_cmt_read_cmcnt(p);
168                 v2 = sh_cmt_read_cmcnt(p);
169                 v3 = sh_cmt_read_cmcnt(p);
170                 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
171         } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
172                           || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
173 
174         *has_wrapped = o1;
175         return v2;
176 }
177 
178 static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
179 
180 static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
181 {
182         struct sh_timer_config *cfg = p->pdev->dev.platform_data;
183         unsigned long flags, value;
184 
185         /* start stop register shared by multiple timer channels */
186         raw_spin_lock_irqsave(&sh_cmt_lock, flags);
187         value = sh_cmt_read_cmstr(p);
188 
189         if (start)
190                 value |= 1 << cfg->timer_bit;
191         else
192                 value &= ~(1 << cfg->timer_bit);
193 
194         sh_cmt_write_cmstr(p, value);
195         raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
196 }
197 
198 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
199 {
200         int k, ret;
201 
202         pm_runtime_get_sync(&p->pdev->dev);
203         dev_pm_syscore_device(&p->pdev->dev, true);
204 
205         /* enable clock */
206         ret = clk_enable(p->clk);
207         if (ret) {
208                 dev_err(&p->pdev->dev, "cannot enable clock\n");
209                 goto err0;
210         }
211 
212         /* make sure channel is disabled */
213         sh_cmt_start_stop_ch(p, 0);
214 
215         /* configure channel, periodic mode and maximum timeout */
216         if (p->width == 16) {
217                 *rate = clk_get_rate(p->clk) / 512;
218                 sh_cmt_write_cmcsr(p, 0x43);
219         } else {
220                 *rate = clk_get_rate(p->clk) / 8;
221                 sh_cmt_write_cmcsr(p, 0x01a4);
222         }
223 
224         sh_cmt_write_cmcor(p, 0xffffffff);
225         sh_cmt_write_cmcnt(p, 0);
226 
227         /*
228          * According to the sh73a0 user's manual, as CMCNT can be operated
229          * only by the RCLK (Pseudo 32 KHz), there's one restriction on
230          * modifying CMCNT register; two RCLK cycles are necessary before
231          * this register is either read or any modification of the value
232          * it holds is reflected in the LSI's actual operation.
233          *
234          * While at it, we're supposed to clear out the CMCNT as of this
235          * moment, so make sure it's processed properly here.  This will
236          * take RCLKx2 at maximum.
237          */
238         for (k = 0; k < 100; k++) {
239                 if (!sh_cmt_read_cmcnt(p))
240                         break;
241                 udelay(1);
242         }
243 
244         if (sh_cmt_read_cmcnt(p)) {
245                 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
246                 ret = -ETIMEDOUT;
247                 goto err1;
248         }
249 
250         /* enable channel */
251         sh_cmt_start_stop_ch(p, 1);
252         return 0;
253  err1:
254         /* stop clock */
255         clk_disable(p->clk);
256 
257  err0:
258         return ret;
259 }
260 
261 static void sh_cmt_disable(struct sh_cmt_priv *p)
262 {
263         /* disable channel */
264         sh_cmt_start_stop_ch(p, 0);
265 
266         /* disable interrupts in CMT block */
267         sh_cmt_write_cmcsr(p, 0);
268 
269         /* stop clock */
270         clk_disable(p->clk);
271 
272         dev_pm_syscore_device(&p->pdev->dev, false);
273         pm_runtime_put(&p->pdev->dev);
274 }
275 
276 /* private flags */
277 #define FLAG_CLOCKEVENT (1 << 0)
278 #define FLAG_CLOCKSOURCE (1 << 1)
279 #define FLAG_REPROGRAM (1 << 2)
280 #define FLAG_SKIPEVENT (1 << 3)
281 #define FLAG_IRQCONTEXT (1 << 4)
282 
283 static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
284                                               int absolute)
285 {
286         unsigned long new_match;
287         unsigned long value = p->next_match_value;
288         unsigned long delay = 0;
289         unsigned long now = 0;
290         int has_wrapped;
291 
292         now = sh_cmt_get_counter(p, &has_wrapped);
293         p->flags |= FLAG_REPROGRAM; /* force reprogram */
294 
295         if (has_wrapped) {
296                 /* we're competing with the interrupt handler.
297                  *  -> let the interrupt handler reprogram the timer.
298                  *  -> interrupt number two handles the event.
299                  */
300                 p->flags |= FLAG_SKIPEVENT;
301                 return;
302         }
303 
304         if (absolute)
305                 now = 0;
306 
307         do {
308                 /* reprogram the timer hardware,
309                  * but don't save the new match value yet.
310                  */
311                 new_match = now + value + delay;
312                 if (new_match > p->max_match_value)
313                         new_match = p->max_match_value;
314 
315                 sh_cmt_write_cmcor(p, new_match);
316 
317                 now = sh_cmt_get_counter(p, &has_wrapped);
318                 if (has_wrapped && (new_match > p->match_value)) {
319                         /* we are changing to a greater match value,
320                          * so this wrap must be caused by the counter
321                          * matching the old value.
322                          * -> first interrupt reprograms the timer.
323                          * -> interrupt number two handles the event.
324                          */
325                         p->flags |= FLAG_SKIPEVENT;
326                         break;
327                 }
328 
329                 if (has_wrapped) {
330                         /* we are changing to a smaller match value,
331                          * so the wrap must be caused by the counter
332                          * matching the new value.
333                          * -> save programmed match value.
334                          * -> let isr handle the event.
335                          */
336                         p->match_value = new_match;
337                         break;
338                 }
339 
340                 /* be safe: verify hardware settings */
341                 if (now < new_match) {
342                         /* timer value is below match value, all good.
343                          * this makes sure we won't miss any match events.
344                          * -> save programmed match value.
345                          * -> let isr handle the event.
346                          */
347                         p->match_value = new_match;
348                         break;
349                 }
350 
351                 /* the counter has reached a value greater
352                  * than our new match value. and since the
353                  * has_wrapped flag isn't set we must have
354                  * programmed a too close event.
355                  * -> increase delay and retry.
356                  */
357                 if (delay)
358                         delay <<= 1;
359                 else
360                         delay = 1;
361 
362                 if (!delay)
363                         dev_warn(&p->pdev->dev, "too long delay\n");
364 
365         } while (delay);
366 }
367 
368 static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
369 {
370         if (delta > p->max_match_value)
371                 dev_warn(&p->pdev->dev, "delta out of range\n");
372 
373         p->next_match_value = delta;
374         sh_cmt_clock_event_program_verify(p, 0);
375 }
376 
377 static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
378 {
379         unsigned long flags;
380 
381         raw_spin_lock_irqsave(&p->lock, flags);
382         __sh_cmt_set_next(p, delta);
383         raw_spin_unlock_irqrestore(&p->lock, flags);
384 }
385 
386 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
387 {
388         struct sh_cmt_priv *p = dev_id;
389 
390         /* clear flags */
391         sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
392 
393         /* update clock source counter to begin with if enabled
394          * the wrap flag should be cleared by the timer specific
395          * isr before we end up here.
396          */
397         if (p->flags & FLAG_CLOCKSOURCE)
398                 p->total_cycles += p->match_value + 1;
399 
400         if (!(p->flags & FLAG_REPROGRAM))
401                 p->next_match_value = p->max_match_value;
402 
403         p->flags |= FLAG_IRQCONTEXT;
404 
405         if (p->flags & FLAG_CLOCKEVENT) {
406                 if (!(p->flags & FLAG_SKIPEVENT)) {
407                         if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
408                                 p->next_match_value = p->max_match_value;
409                                 p->flags |= FLAG_REPROGRAM;
410                         }
411 
412                         p->ced.event_handler(&p->ced);
413                 }
414         }
415 
416         p->flags &= ~FLAG_SKIPEVENT;
417 
418         if (p->flags & FLAG_REPROGRAM) {
419                 p->flags &= ~FLAG_REPROGRAM;
420                 sh_cmt_clock_event_program_verify(p, 1);
421 
422                 if (p->flags & FLAG_CLOCKEVENT)
423                         if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
424                             || (p->match_value == p->next_match_value))
425                                 p->flags &= ~FLAG_REPROGRAM;
426         }
427 
428         p->flags &= ~FLAG_IRQCONTEXT;
429 
430         return IRQ_HANDLED;
431 }
432 
433 static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
434 {
435         int ret = 0;
436         unsigned long flags;
437 
438         raw_spin_lock_irqsave(&p->lock, flags);
439 
440         if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
441                 ret = sh_cmt_enable(p, &p->rate);
442 
443         if (ret)
444                 goto out;
445         p->flags |= flag;
446 
447         /* setup timeout if no clockevent */
448         if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
449                 __sh_cmt_set_next(p, p->max_match_value);
450  out:
451         raw_spin_unlock_irqrestore(&p->lock, flags);
452 
453         return ret;
454 }
455 
456 static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
457 {
458         unsigned long flags;
459         unsigned long f;
460 
461         raw_spin_lock_irqsave(&p->lock, flags);
462 
463         f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
464         p->flags &= ~flag;
465 
466         if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
467                 sh_cmt_disable(p);
468 
469         /* adjust the timeout to maximum if only clocksource left */
470         if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
471                 __sh_cmt_set_next(p, p->max_match_value);
472 
473         raw_spin_unlock_irqrestore(&p->lock, flags);
474 }
475 
476 static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
477 {
478         return container_of(cs, struct sh_cmt_priv, cs);
479 }
480 
481 static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
482 {
483         struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
484         unsigned long flags, raw;
485         unsigned long value;
486         int has_wrapped;
487 
488         raw_spin_lock_irqsave(&p->lock, flags);
489         value = p->total_cycles;
490         raw = sh_cmt_get_counter(p, &has_wrapped);
491 
492         if (unlikely(has_wrapped))
493                 raw += p->match_value + 1;
494         raw_spin_unlock_irqrestore(&p->lock, flags);
495 
496         return value + raw;
497 }
498 
499 static int sh_cmt_clocksource_enable(struct clocksource *cs)
500 {
501         int ret;
502         struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
503 
504         WARN_ON(p->cs_enabled);
505 
506         p->total_cycles = 0;
507 
508         ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
509         if (!ret) {
510                 __clocksource_updatefreq_hz(cs, p->rate);
511                 p->cs_enabled = true;
512         }
513         return ret;
514 }
515 
516 static void sh_cmt_clocksource_disable(struct clocksource *cs)
517 {
518         struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
519 
520         WARN_ON(!p->cs_enabled);
521 
522         sh_cmt_stop(p, FLAG_CLOCKSOURCE);
523         p->cs_enabled = false;
524 }
525 
526 static void sh_cmt_clocksource_suspend(struct clocksource *cs)
527 {
528         struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
529 
530         sh_cmt_stop(p, FLAG_CLOCKSOURCE);
531         pm_genpd_syscore_poweroff(&p->pdev->dev);
532 }
533 
534 static void sh_cmt_clocksource_resume(struct clocksource *cs)
535 {
536         struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
537 
538         pm_genpd_syscore_poweron(&p->pdev->dev);
539         sh_cmt_start(p, FLAG_CLOCKSOURCE);
540 }
541 
542 static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
543                                        char *name, unsigned long rating)
544 {
545         struct clocksource *cs = &p->cs;
546 
547         cs->name = name;
548         cs->rating = rating;
549         cs->read = sh_cmt_clocksource_read;
550         cs->enable = sh_cmt_clocksource_enable;
551         cs->disable = sh_cmt_clocksource_disable;
552         cs->suspend = sh_cmt_clocksource_suspend;
553         cs->resume = sh_cmt_clocksource_resume;
554         cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
555         cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
556 
557         dev_info(&p->pdev->dev, "used as clock source\n");
558 
559         /* Register with dummy 1 Hz value, gets updated in ->enable() */
560         clocksource_register_hz(cs, 1);
561         return 0;
562 }
563 
564 static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
565 {
566         return container_of(ced, struct sh_cmt_priv, ced);
567 }
568 
569 static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
570 {
571         struct clock_event_device *ced = &p->ced;
572 
573         sh_cmt_start(p, FLAG_CLOCKEVENT);
574 
575         /* TODO: calculate good shift from rate and counter bit width */
576 
577         ced->shift = 32;
578         ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
579         ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
580         ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
581 
582         if (periodic)
583                 sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
584         else
585                 sh_cmt_set_next(p, p->max_match_value);
586 }
587 
588 static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
589                                     struct clock_event_device *ced)
590 {
591         struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
592 
593         /* deal with old setting first */
594         switch (ced->mode) {
595         case CLOCK_EVT_MODE_PERIODIC:
596         case CLOCK_EVT_MODE_ONESHOT:
597                 sh_cmt_stop(p, FLAG_CLOCKEVENT);
598                 break;
599         default:
600                 break;
601         }
602 
603         switch (mode) {
604         case CLOCK_EVT_MODE_PERIODIC:
605                 dev_info(&p->pdev->dev, "used for periodic clock events\n");
606                 sh_cmt_clock_event_start(p, 1);
607                 break;
608         case CLOCK_EVT_MODE_ONESHOT:
609                 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
610                 sh_cmt_clock_event_start(p, 0);
611                 break;
612         case CLOCK_EVT_MODE_SHUTDOWN:
613         case CLOCK_EVT_MODE_UNUSED:
614                 sh_cmt_stop(p, FLAG_CLOCKEVENT);
615                 break;
616         default:
617                 break;
618         }
619 }
620 
621 static int sh_cmt_clock_event_next(unsigned long delta,
622                                    struct clock_event_device *ced)
623 {
624         struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
625 
626         BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
627         if (likely(p->flags & FLAG_IRQCONTEXT))
628                 p->next_match_value = delta - 1;
629         else
630                 sh_cmt_set_next(p, delta - 1);
631 
632         return 0;
633 }
634 
635 static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
636 {
637         struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
638 
639         pm_genpd_syscore_poweroff(&p->pdev->dev);
640         clk_unprepare(p->clk);
641 }
642 
643 static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
644 {
645         struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
646 
647         clk_prepare(p->clk);
648         pm_genpd_syscore_poweron(&p->pdev->dev);
649 }
650 
651 static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
652                                        char *name, unsigned long rating)
653 {
654         struct clock_event_device *ced = &p->ced;
655 
656         memset(ced, 0, sizeof(*ced));
657 
658         ced->name = name;
659         ced->features = CLOCK_EVT_FEAT_PERIODIC;
660         ced->features |= CLOCK_EVT_FEAT_ONESHOT;
661         ced->rating = rating;
662         ced->cpumask = cpumask_of(0);
663         ced->set_next_event = sh_cmt_clock_event_next;
664         ced->set_mode = sh_cmt_clock_event_mode;
665         ced->suspend = sh_cmt_clock_event_suspend;
666         ced->resume = sh_cmt_clock_event_resume;
667 
668         dev_info(&p->pdev->dev, "used for clock events\n");
669         clockevents_register_device(ced);
670 }
671 
672 static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
673                            unsigned long clockevent_rating,
674                            unsigned long clocksource_rating)
675 {
676         if (clockevent_rating)
677                 sh_cmt_register_clockevent(p, name, clockevent_rating);
678 
679         if (clocksource_rating)
680                 sh_cmt_register_clocksource(p, name, clocksource_rating);
681 
682         return 0;
683 }
684 
685 static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
686 {
687         struct sh_timer_config *cfg = pdev->dev.platform_data;
688         struct resource *res, *res2;
689         int irq, ret;
690         ret = -ENXIO;
691 
692         memset(p, 0, sizeof(*p));
693         p->pdev = pdev;
694 
695         if (!cfg) {
696                 dev_err(&p->pdev->dev, "missing platform data\n");
697                 goto err0;
698         }
699 
700         res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
701         if (!res) {
702                 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
703                 goto err0;
704         }
705 
706         /* optional resource for the shared timer start/stop register */
707         res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
708 
709         irq = platform_get_irq(p->pdev, 0);
710         if (irq < 0) {
711                 dev_err(&p->pdev->dev, "failed to get irq\n");
712                 goto err0;
713         }
714 
715         /* map memory, let mapbase point to our channel */
716         p->mapbase = ioremap_nocache(res->start, resource_size(res));
717         if (p->mapbase == NULL) {
718                 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
719                 goto err0;
720         }
721 
722         /* map second resource for CMSTR */
723         p->mapbase_str = ioremap_nocache(res2 ? res2->start :
724                                          res->start - cfg->channel_offset,
725                                          res2 ? resource_size(res2) : 2);
726         if (p->mapbase_str == NULL) {
727                 dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
728                 goto err1;
729         }
730 
731         /* request irq using setup_irq() (too early for request_irq()) */
732         p->irqaction.name = dev_name(&p->pdev->dev);
733         p->irqaction.handler = sh_cmt_interrupt;
734         p->irqaction.dev_id = p;
735         p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
736 
737         /* get hold of clock */
738         p->clk = clk_get(&p->pdev->dev, "cmt_fck");
739         if (IS_ERR(p->clk)) {
740                 dev_err(&p->pdev->dev, "cannot get clock\n");
741                 ret = PTR_ERR(p->clk);
742                 goto err2;
743         }
744 
745         ret = clk_prepare(p->clk);
746         if (ret < 0)
747                 goto err3;
748 
749         if (res2 && (resource_size(res2) == 4)) {
750                 /* assume both CMSTR and CMCSR to be 32-bit */
751                 p->read_control = sh_cmt_read32;
752                 p->write_control = sh_cmt_write32;
753         } else {
754                 p->read_control = sh_cmt_read16;
755                 p->write_control = sh_cmt_write16;
756         }
757 
758         if (resource_size(res) == 6) {
759                 p->width = 16;
760                 p->read_count = sh_cmt_read16;
761                 p->write_count = sh_cmt_write16;
762                 p->overflow_bit = 0x80;
763                 p->clear_bits = ~0x80;
764         } else {
765                 p->width = 32;
766                 p->read_count = sh_cmt_read32;
767                 p->write_count = sh_cmt_write32;
768                 p->overflow_bit = 0x8000;
769                 p->clear_bits = ~0xc000;
770         }
771 
772         if (p->width == (sizeof(p->max_match_value) * 8))
773                 p->max_match_value = ~0;
774         else
775                 p->max_match_value = (1 << p->width) - 1;
776 
777         p->match_value = p->max_match_value;
778         raw_spin_lock_init(&p->lock);
779 
780         ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
781                               cfg->clockevent_rating,
782                               cfg->clocksource_rating);
783         if (ret) {
784                 dev_err(&p->pdev->dev, "registration failed\n");
785                 goto err4;
786         }
787         p->cs_enabled = false;
788 
789         ret = setup_irq(irq, &p->irqaction);
790         if (ret) {
791                 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
792                 goto err4;
793         }
794 
795         platform_set_drvdata(pdev, p);
796 
797         return 0;
798 err4:
799         clk_unprepare(p->clk);
800 err3:
801         clk_put(p->clk);
802 err2:
803         iounmap(p->mapbase_str);
804 err1:
805         iounmap(p->mapbase);
806 err0:
807         return ret;
808 }
809 
810 static int sh_cmt_probe(struct platform_device *pdev)
811 {
812         struct sh_cmt_priv *p = platform_get_drvdata(pdev);
813         struct sh_timer_config *cfg = pdev->dev.platform_data;
814         int ret;
815 
816         if (!is_early_platform_device(pdev)) {
817                 pm_runtime_set_active(&pdev->dev);
818                 pm_runtime_enable(&pdev->dev);
819         }
820 
821         if (p) {
822                 dev_info(&pdev->dev, "kept as earlytimer\n");
823                 goto out;
824         }
825 
826         p = kmalloc(sizeof(*p), GFP_KERNEL);
827         if (p == NULL) {
828                 dev_err(&pdev->dev, "failed to allocate driver data\n");
829                 return -ENOMEM;
830         }
831 
832         ret = sh_cmt_setup(p, pdev);
833         if (ret) {
834                 kfree(p);
835                 pm_runtime_idle(&pdev->dev);
836                 return ret;
837         }
838         if (is_early_platform_device(pdev))
839                 return 0;
840 
841  out:
842         if (cfg->clockevent_rating || cfg->clocksource_rating)
843                 pm_runtime_irq_safe(&pdev->dev);
844         else
845                 pm_runtime_idle(&pdev->dev);
846 
847         return 0;
848 }
849 
850 static int sh_cmt_remove(struct platform_device *pdev)
851 {
852         return -EBUSY; /* cannot unregister clockevent and clocksource */
853 }
854 
855 static struct platform_driver sh_cmt_device_driver = {
856         .probe          = sh_cmt_probe,
857         .remove         = sh_cmt_remove,
858         .driver         = {
859                 .name   = "sh_cmt",
860         }
861 };
862 
863 static int __init sh_cmt_init(void)
864 {
865         return platform_driver_register(&sh_cmt_device_driver);
866 }
867 
868 static void __exit sh_cmt_exit(void)
869 {
870         platform_driver_unregister(&sh_cmt_device_driver);
871 }
872 
873 early_platform_init("earlytimer", &sh_cmt_device_driver);
874 subsys_initcall(sh_cmt_init);
875 module_exit(sh_cmt_exit);
876 
877 MODULE_AUTHOR("Magnus Damm");
878 MODULE_DESCRIPTION("SuperH CMT Timer Driver");
879 MODULE_LICENSE("GPL v2");
880 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us