]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/sh/clk.c
sh: clkfwk: Fix fault in frequency iterator.
[net-next-2.6.git] / drivers / sh / clk.c
CommitLineData
8b5ee113
MD
1/*
2 * drivers/sh/clk.c - SuperH clock framework
3 *
960bc368 4 * Copyright (C) 2005 - 2010 Paul Mundt
8b5ee113
MD
5 *
6 * This clock framework is derived from the OMAP version by:
7 *
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
550a1ef1
PM
17#define pr_fmt(fmt) "clock: " fmt
18
8b5ee113
MD
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/list.h>
24#include <linux/kobject.h>
25#include <linux/sysdev.h>
26#include <linux/seq_file.h>
27#include <linux/err.h>
28085bc5 28#include <linux/io.h>
8b5ee113
MD
29#include <linux/debugfs.h>
30#include <linux/cpufreq.h>
31#include <linux/clk.h>
32#include <linux/sh_clk.h>
33
34static LIST_HEAD(clock_list);
35static DEFINE_SPINLOCK(clock_lock);
36static DEFINE_MUTEX(clock_list_sem);
37
38void clk_rate_table_build(struct clk *clk,
39 struct cpufreq_frequency_table *freq_table,
40 int nr_freqs,
41 struct clk_div_mult_table *src_table,
42 unsigned long *bitmap)
43{
44 unsigned long mult, div;
45 unsigned long freq;
46 int i;
47
f586903d
PM
48 clk->nr_freqs = nr_freqs;
49
8b5ee113
MD
50 for (i = 0; i < nr_freqs; i++) {
51 div = 1;
52 mult = 1;
53
54 if (src_table->divisors && i < src_table->nr_divisors)
55 div = src_table->divisors[i];
56
57 if (src_table->multipliers && i < src_table->nr_multipliers)
58 mult = src_table->multipliers[i];
59
60 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
61 freq = CPUFREQ_ENTRY_INVALID;
62 else
63 freq = clk->parent->rate * mult / div;
64
65 freq_table[i].index = i;
66 freq_table[i].frequency = freq;
67 }
68
69 /* Termination entry */
70 freq_table[i].index = i;
71 freq_table[i].frequency = CPUFREQ_TABLE_END;
72}
73
f586903d
PM
74struct clk_rate_round_data;
75
76struct clk_rate_round_data {
77 unsigned long rate;
78 unsigned int min, max;
8e122db6 79 long (*func)(unsigned int, struct clk_rate_round_data *);
f586903d
PM
80 void *arg;
81};
82
83#define for_each_frequency(pos, r, freq) \
e5690e0d 84 for (pos = r->min, freq = r->func(pos, r); \
f586903d
PM
85 pos < r->max; pos++, freq = r->func(pos, r)) \
86 if (unlikely(freq == 0)) \
87 ; \
88 else
89
90static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
8b5ee113
MD
91{
92 unsigned long rate_error, rate_error_prev = ~0UL;
f586903d
PM
93 unsigned long rate_best_fit = rounder->rate;
94 unsigned long highest, lowest, freq;
8b5ee113
MD
95 int i;
96
960bc368
PM
97 highest = 0;
98 lowest = ~0UL;
99
f586903d 100 for_each_frequency(i, rounder, freq) {
960bc368
PM
101 if (freq > highest)
102 highest = freq;
103 if (freq < lowest)
104 lowest = freq;
105
f586903d 106 rate_error = abs(freq - rounder->rate);
8b5ee113
MD
107 if (rate_error < rate_error_prev) {
108 rate_best_fit = freq;
109 rate_error_prev = rate_error;
110 }
111
112 if (rate_error == 0)
113 break;
114 }
115
f586903d 116 if (rounder->rate >= highest)
960bc368 117 rate_best_fit = highest;
f586903d 118 if (rounder->rate <= lowest)
960bc368
PM
119 rate_best_fit = lowest;
120
8b5ee113
MD
121 return rate_best_fit;
122}
123
f586903d
PM
124static long clk_rate_table_iter(unsigned int pos,
125 struct clk_rate_round_data *rounder)
126{
127 struct cpufreq_frequency_table *freq_table = rounder->arg;
128 unsigned long freq = freq_table[pos].frequency;
129
130 if (freq == CPUFREQ_ENTRY_INVALID)
131 freq = 0;
132
133 return freq;
134}
135
136long clk_rate_table_round(struct clk *clk,
137 struct cpufreq_frequency_table *freq_table,
138 unsigned long rate)
139{
140 struct clk_rate_round_data table_round = {
141 .min = 0,
142 .max = clk->nr_freqs,
143 .func = clk_rate_table_iter,
144 .arg = freq_table,
145 .rate = rate,
146 };
147
148 return clk_rate_round_helper(&table_round);
149}
150
8e122db6
PM
151static long clk_rate_div_range_iter(unsigned int pos,
152 struct clk_rate_round_data *rounder)
153{
154 return clk_get_rate(rounder->arg) / pos;
155}
156
157long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
158 unsigned int div_max, unsigned long rate)
159{
160 struct clk_rate_round_data div_range_round = {
161 .min = div_min,
162 .max = div_max,
163 .func = clk_rate_div_range_iter,
164 .arg = clk_get_parent(clk),
165 .rate = rate,
166 };
167
168 return clk_rate_round_helper(&div_range_round);
169}
170
8b5ee113
MD
171int clk_rate_table_find(struct clk *clk,
172 struct cpufreq_frequency_table *freq_table,
173 unsigned long rate)
174{
175 int i;
176
177 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
178 unsigned long freq = freq_table[i].frequency;
179
180 if (freq == CPUFREQ_ENTRY_INVALID)
181 continue;
182
183 if (freq == rate)
184 return i;
185 }
186
187 return -ENOENT;
188}
189
190/* Used for clocks that always have same value as the parent clock */
191unsigned long followparent_recalc(struct clk *clk)
192{
193 return clk->parent ? clk->parent->rate : 0;
194}
195
196int clk_reparent(struct clk *child, struct clk *parent)
197{
198 list_del_init(&child->sibling);
199 if (parent)
200 list_add(&child->sibling, &parent->children);
201 child->parent = parent;
202
203 /* now do the debugfs renaming to reattach the child
204 to the proper parent */
205
206 return 0;
207}
208
209/* Propagate rate to children */
210void propagate_rate(struct clk *tclk)
211{
212 struct clk *clkp;
213
214 list_for_each_entry(clkp, &tclk->children, sibling) {
215 if (clkp->ops && clkp->ops->recalc)
216 clkp->rate = clkp->ops->recalc(clkp);
217
218 propagate_rate(clkp);
219 }
220}
221
222static void __clk_disable(struct clk *clk)
223{
69395396
MD
224 if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
225 clk))
8b5ee113 226 return;
8b5ee113
MD
227
228 if (!(--clk->usecount)) {
229 if (likely(clk->ops && clk->ops->disable))
230 clk->ops->disable(clk);
231 if (likely(clk->parent))
232 __clk_disable(clk->parent);
233 }
234}
235
236void clk_disable(struct clk *clk)
237{
238 unsigned long flags;
239
240 if (!clk)
241 return;
242
243 spin_lock_irqsave(&clock_lock, flags);
244 __clk_disable(clk);
245 spin_unlock_irqrestore(&clock_lock, flags);
246}
247EXPORT_SYMBOL_GPL(clk_disable);
248
249static int __clk_enable(struct clk *clk)
250{
251 int ret = 0;
252
253 if (clk->usecount++ == 0) {
254 if (clk->parent) {
255 ret = __clk_enable(clk->parent);
256 if (unlikely(ret))
257 goto err;
258 }
259
260 if (clk->ops && clk->ops->enable) {
261 ret = clk->ops->enable(clk);
262 if (ret) {
263 if (clk->parent)
264 __clk_disable(clk->parent);
265 goto err;
266 }
267 }
268 }
269
270 return ret;
271err:
272 clk->usecount--;
273 return ret;
274}
275
276int clk_enable(struct clk *clk)
277{
278 unsigned long flags;
279 int ret;
280
281 if (!clk)
282 return -EINVAL;
283
284 spin_lock_irqsave(&clock_lock, flags);
285 ret = __clk_enable(clk);
286 spin_unlock_irqrestore(&clock_lock, flags);
287
288 return ret;
289}
290EXPORT_SYMBOL_GPL(clk_enable);
291
292static LIST_HEAD(root_clks);
293
294/**
295 * recalculate_root_clocks - recalculate and propagate all root clocks
296 *
297 * Recalculates all root clocks (clocks with no parent), which if the
298 * clock's .recalc is set correctly, should also propagate their rates.
299 * Called at init.
300 */
301void recalculate_root_clocks(void)
302{
303 struct clk *clkp;
304
305 list_for_each_entry(clkp, &root_clks, sibling) {
306 if (clkp->ops && clkp->ops->recalc)
307 clkp->rate = clkp->ops->recalc(clkp);
308 propagate_rate(clkp);
309 }
310}
311
28085bc5
PM
312static struct clk_mapping dummy_mapping;
313
314static struct clk *lookup_root_clock(struct clk *clk)
315{
316 while (clk->parent)
317 clk = clk->parent;
318
319 return clk;
320}
321
322static int clk_establish_mapping(struct clk *clk)
323{
324 struct clk_mapping *mapping = clk->mapping;
325
326 /*
327 * Propagate mappings.
328 */
329 if (!mapping) {
330 struct clk *clkp;
331
332 /*
333 * dummy mapping for root clocks with no specified ranges
334 */
335 if (!clk->parent) {
336 clk->mapping = &dummy_mapping;
337 return 0;
338 }
339
340 /*
341 * If we're on a child clock and it provides no mapping of its
342 * own, inherit the mapping from its root clock.
343 */
344 clkp = lookup_root_clock(clk);
345 mapping = clkp->mapping;
346 BUG_ON(!mapping);
347 }
348
349 /*
350 * Establish initial mapping.
351 */
352 if (!mapping->base && mapping->phys) {
353 kref_init(&mapping->ref);
354
355 mapping->base = ioremap_nocache(mapping->phys, mapping->len);
356 if (unlikely(!mapping->base))
357 return -ENXIO;
358 } else if (mapping->base) {
359 /*
360 * Bump the refcount for an existing mapping
361 */
362 kref_get(&mapping->ref);
363 }
364
365 clk->mapping = mapping;
366 return 0;
367}
368
369static void clk_destroy_mapping(struct kref *kref)
370{
371 struct clk_mapping *mapping;
372
373 mapping = container_of(kref, struct clk_mapping, ref);
374
375 iounmap(mapping->base);
376}
377
378static void clk_teardown_mapping(struct clk *clk)
379{
380 struct clk_mapping *mapping = clk->mapping;
381
382 /* Nothing to do */
383 if (mapping == &dummy_mapping)
384 return;
385
386 kref_put(&mapping->ref, clk_destroy_mapping);
387 clk->mapping = NULL;
388}
389
8b5ee113
MD
390int clk_register(struct clk *clk)
391{
28085bc5
PM
392 int ret;
393
8b5ee113
MD
394 if (clk == NULL || IS_ERR(clk))
395 return -EINVAL;
396
397 /*
398 * trap out already registered clocks
399 */
400 if (clk->node.next || clk->node.prev)
401 return 0;
402
403 mutex_lock(&clock_list_sem);
404
405 INIT_LIST_HEAD(&clk->children);
406 clk->usecount = 0;
407
28085bc5
PM
408 ret = clk_establish_mapping(clk);
409 if (unlikely(ret))
410 goto out_unlock;
411
8b5ee113
MD
412 if (clk->parent)
413 list_add(&clk->sibling, &clk->parent->children);
414 else
415 list_add(&clk->sibling, &root_clks);
416
417 list_add(&clk->node, &clock_list);
418 if (clk->ops && clk->ops->init)
419 clk->ops->init(clk);
28085bc5
PM
420
421out_unlock:
8b5ee113
MD
422 mutex_unlock(&clock_list_sem);
423
28085bc5 424 return ret;
8b5ee113
MD
425}
426EXPORT_SYMBOL_GPL(clk_register);
427
428void clk_unregister(struct clk *clk)
429{
430 mutex_lock(&clock_list_sem);
431 list_del(&clk->sibling);
432 list_del(&clk->node);
28085bc5 433 clk_teardown_mapping(clk);
8b5ee113
MD
434 mutex_unlock(&clock_list_sem);
435}
436EXPORT_SYMBOL_GPL(clk_unregister);
437
438void clk_enable_init_clocks(void)
439{
440 struct clk *clkp;
441
442 list_for_each_entry(clkp, &clock_list, node)
443 if (clkp->flags & CLK_ENABLE_ON_INIT)
444 clk_enable(clkp);
445}
446
447unsigned long clk_get_rate(struct clk *clk)
448{
449 return clk->rate;
450}
451EXPORT_SYMBOL_GPL(clk_get_rate);
452
453int clk_set_rate(struct clk *clk, unsigned long rate)
454{
455 return clk_set_rate_ex(clk, rate, 0);
456}
457EXPORT_SYMBOL_GPL(clk_set_rate);
458
459int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
460{
461 int ret = -EOPNOTSUPP;
462 unsigned long flags;
463
464 spin_lock_irqsave(&clock_lock, flags);
465
466 if (likely(clk->ops && clk->ops->set_rate)) {
467 ret = clk->ops->set_rate(clk, rate, algo_id);
468 if (ret != 0)
469 goto out_unlock;
470 } else {
471 clk->rate = rate;
472 ret = 0;
473 }
474
475 if (clk->ops && clk->ops->recalc)
476 clk->rate = clk->ops->recalc(clk);
477
478 propagate_rate(clk);
479
480out_unlock:
481 spin_unlock_irqrestore(&clock_lock, flags);
482
483 return ret;
484}
485EXPORT_SYMBOL_GPL(clk_set_rate_ex);
486
487int clk_set_parent(struct clk *clk, struct clk *parent)
488{
489 unsigned long flags;
490 int ret = -EINVAL;
491
492 if (!parent || !clk)
493 return ret;
494 if (clk->parent == parent)
495 return 0;
496
497 spin_lock_irqsave(&clock_lock, flags);
498 if (clk->usecount == 0) {
499 if (clk->ops->set_parent)
500 ret = clk->ops->set_parent(clk, parent);
501 else
502 ret = clk_reparent(clk, parent);
503
504 if (ret == 0) {
8b5ee113
MD
505 if (clk->ops->recalc)
506 clk->rate = clk->ops->recalc(clk);
550a1ef1 507 pr_debug("set parent of %p to %p (new rate %ld)\n",
69395396 508 clk, clk->parent, clk->rate);
8b5ee113
MD
509 propagate_rate(clk);
510 }
511 } else
512 ret = -EBUSY;
513 spin_unlock_irqrestore(&clock_lock, flags);
514
515 return ret;
516}
517EXPORT_SYMBOL_GPL(clk_set_parent);
518
519struct clk *clk_get_parent(struct clk *clk)
520{
521 return clk->parent;
522}
523EXPORT_SYMBOL_GPL(clk_get_parent);
524
525long clk_round_rate(struct clk *clk, unsigned long rate)
526{
527 if (likely(clk->ops && clk->ops->round_rate)) {
528 unsigned long flags, rounded;
529
530 spin_lock_irqsave(&clock_lock, flags);
531 rounded = clk->ops->round_rate(clk, rate);
532 spin_unlock_irqrestore(&clock_lock, flags);
533
534 return rounded;
535 }
536
537 return clk_get_rate(clk);
538}
539EXPORT_SYMBOL_GPL(clk_round_rate);
540
541#ifdef CONFIG_PM
542static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
543{
544 static pm_message_t prev_state;
545 struct clk *clkp;
546
547 switch (state.event) {
548 case PM_EVENT_ON:
549 /* Resumeing from hibernation */
550 if (prev_state.event != PM_EVENT_FREEZE)
551 break;
552
553 list_for_each_entry(clkp, &clock_list, node) {
554 if (likely(clkp->ops)) {
555 unsigned long rate = clkp->rate;
556
557 if (likely(clkp->ops->set_parent))
558 clkp->ops->set_parent(clkp,
559 clkp->parent);
560 if (likely(clkp->ops->set_rate))
561 clkp->ops->set_rate(clkp,
562 rate, NO_CHANGE);
563 else if (likely(clkp->ops->recalc))
564 clkp->rate = clkp->ops->recalc(clkp);
565 }
566 }
567 break;
568 case PM_EVENT_FREEZE:
569 break;
570 case PM_EVENT_SUSPEND:
571 break;
572 }
573
574 prev_state = state;
575 return 0;
576}
577
578static int clks_sysdev_resume(struct sys_device *dev)
579{
580 return clks_sysdev_suspend(dev, PMSG_ON);
581}
582
583static struct sysdev_class clks_sysdev_class = {
584 .name = "clks",
585};
586
587static struct sysdev_driver clks_sysdev_driver = {
588 .suspend = clks_sysdev_suspend,
589 .resume = clks_sysdev_resume,
590};
591
592static struct sys_device clks_sysdev_dev = {
593 .cls = &clks_sysdev_class,
594};
595
596static int __init clk_sysdev_init(void)
597{
598 sysdev_class_register(&clks_sysdev_class);
599 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
600 sysdev_register(&clks_sysdev_dev);
601
602 return 0;
603}
604subsys_initcall(clk_sysdev_init);
605#endif
606
607/*
608 * debugfs support to trace clock tree hierarchy and attributes
609 */
610static struct dentry *clk_debugfs_root;
611
612static int clk_debugfs_register_one(struct clk *c)
613{
614 int err;
615 struct dentry *d, *child, *child_tmp;
616 struct clk *pa = c->parent;
617 char s[255];
618 char *p = s;
619
69395396 620 p += sprintf(p, "%p", c);
8b5ee113
MD
621 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
622 if (!d)
623 return -ENOMEM;
624 c->dentry = d;
625
626 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
627 if (!d) {
628 err = -ENOMEM;
629 goto err_out;
630 }
631 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
632 if (!d) {
633 err = -ENOMEM;
634 goto err_out;
635 }
636 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
637 if (!d) {
638 err = -ENOMEM;
639 goto err_out;
640 }
641 return 0;
642
643err_out:
644 d = c->dentry;
645 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
646 debugfs_remove(child);
647 debugfs_remove(c->dentry);
648 return err;
649}
650
651static int clk_debugfs_register(struct clk *c)
652{
653 int err;
654 struct clk *pa = c->parent;
655
656 if (pa && !pa->dentry) {
657 err = clk_debugfs_register(pa);
658 if (err)
659 return err;
660 }
661
69395396 662 if (!c->dentry) {
8b5ee113
MD
663 err = clk_debugfs_register_one(c);
664 if (err)
665 return err;
666 }
667 return 0;
668}
669
670static int __init clk_debugfs_init(void)
671{
672 struct clk *c;
673 struct dentry *d;
674 int err;
675
676 d = debugfs_create_dir("clock", NULL);
677 if (!d)
678 return -ENOMEM;
679 clk_debugfs_root = d;
680
681 list_for_each_entry(c, &clock_list, node) {
682 err = clk_debugfs_register(c);
683 if (err)
684 goto err_out;
685 }
686 return 0;
687err_out:
688 debugfs_remove_recursive(clk_debugfs_root);
689 return err;
690}
691late_initcall(clk_debugfs_init);