]>
Commit | Line | Data |
---|---|---|
8b5f79f9 | 1 | /* |
8b5f79f9 VM |
2 | * Based on arm clockevents implementation and old bfin time tick. |
3 | * | |
96f1050d RG |
4 | * Copyright 2008-2009 Analog Devics Inc. |
5 | * 2008 GeoTechnologies | |
6 | * Vitja Makarov | |
8b5f79f9 | 7 | * |
96f1050d | 8 | * Licensed under the GPL-2 |
8b5f79f9 | 9 | */ |
96f1050d | 10 | |
8b5f79f9 VM |
11 | #include <linux/module.h> |
12 | #include <linux/profile.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/time.h> | |
764cb81c | 15 | #include <linux/timex.h> |
8b5f79f9 VM |
16 | #include <linux/irq.h> |
17 | #include <linux/clocksource.h> | |
18 | #include <linux/clockchips.h> | |
e6c91b64 | 19 | #include <linux/cpufreq.h> |
8b5f79f9 VM |
20 | |
21 | #include <asm/blackfin.h> | |
e6c91b64 | 22 | #include <asm/time.h> |
1fa9be72 | 23 | #include <asm/gptimers.h> |
60ffdb36 | 24 | #include <asm/nmi.h> |
8b5f79f9 | 25 | |
e6c91b64 MH |
26 | /* Accelerators for sched_clock() |
27 | * convert from cycles(64bits) => nanoseconds (64bits) | |
28 | * basic equation: | |
29 | * ns = cycles / (freq / ns_per_sec) | |
30 | * ns = cycles * (ns_per_sec / freq) | |
31 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
32 | * ns = cycles * (10^6 / cpu_khz) | |
33 | * | |
34 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
35 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
36 | * ns = cycles * cyc2ns_scale / SC | |
37 | * | |
38 | * And since SC is a constant power of two, we can convert the div | |
39 | * into a shift. | |
40 | * | |
41 | * We can use khz divisor instead of mhz to keep a better precision, since | |
42 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
43 | * (mathieu.desnoyers@polymtl.ca) | |
44 | * | |
45 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
46 | */ | |
47 | ||
8b5f79f9 VM |
48 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
49 | ||
ceb33be9 YL |
50 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
51 | ||
ceb33be9 | 52 | static notrace cycle_t bfin_read_cycles(struct clocksource *cs) |
8b5f79f9 | 53 | { |
1bfb4b21 | 54 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); |
8b5f79f9 VM |
55 | } |
56 | ||
1fa9be72 GY |
57 | static struct clocksource bfin_cs_cycles = { |
58 | .name = "bfin_cs_cycles", | |
e78feaae | 59 | .rating = 400, |
1fa9be72 | 60 | .read = bfin_read_cycles, |
8b5f79f9 | 61 | .mask = CLOCKSOURCE_MASK(64), |
29857124 | 62 | .shift = CYC2NS_SCALE_FACTOR, |
8b5f79f9 VM |
63 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
64 | }; | |
65 | ||
ceb33be9 | 66 | static inline unsigned long long bfin_cs_cycles_sched_clock(void) |
8e19608e | 67 | { |
c768a943 MF |
68 | return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles), |
69 | bfin_cs_cycles.mult, bfin_cs_cycles.shift); | |
8e19608e MD |
70 | } |
71 | ||
1fa9be72 | 72 | static int __init bfin_cs_cycles_init(void) |
8b5f79f9 | 73 | { |
1fa9be72 GY |
74 | bfin_cs_cycles.mult = \ |
75 | clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); | |
8b5f79f9 | 76 | |
1fa9be72 | 77 | if (clocksource_register(&bfin_cs_cycles)) |
8b5f79f9 VM |
78 | panic("failed to register clocksource"); |
79 | ||
80 | return 0; | |
81 | } | |
1fa9be72 GY |
82 | #else |
83 | # define bfin_cs_cycles_init() | |
84 | #endif | |
85 | ||
86 | #ifdef CONFIG_GPTMR0_CLOCKSOURCE | |
87 | ||
88 | void __init setup_gptimer0(void) | |
89 | { | |
90 | disable_gptimers(TIMER0bit); | |
91 | ||
92 | set_gptimer_config(TIMER0_id, \ | |
93 | TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM); | |
94 | set_gptimer_period(TIMER0_id, -1); | |
95 | set_gptimer_pwidth(TIMER0_id, -2); | |
96 | SSYNC(); | |
97 | enable_gptimers(TIMER0bit); | |
98 | } | |
99 | ||
f7036d64 | 100 | static cycle_t bfin_read_gptimer0(struct clocksource *cs) |
1fa9be72 GY |
101 | { |
102 | return bfin_read_TIMER0_COUNTER(); | |
103 | } | |
104 | ||
105 | static struct clocksource bfin_cs_gptimer0 = { | |
106 | .name = "bfin_cs_gptimer0", | |
e78feaae | 107 | .rating = 350, |
1fa9be72 GY |
108 | .read = bfin_read_gptimer0, |
109 | .mask = CLOCKSOURCE_MASK(32), | |
29857124 | 110 | .shift = CYC2NS_SCALE_FACTOR, |
1fa9be72 GY |
111 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
112 | }; | |
113 | ||
ceb33be9 YL |
114 | static inline unsigned long long bfin_cs_gptimer0_sched_clock(void) |
115 | { | |
c768a943 MF |
116 | return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(), |
117 | bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift); | |
ceb33be9 YL |
118 | } |
119 | ||
1fa9be72 GY |
120 | static int __init bfin_cs_gptimer0_init(void) |
121 | { | |
122 | setup_gptimer0(); | |
8b5f79f9 | 123 | |
1fa9be72 GY |
124 | bfin_cs_gptimer0.mult = \ |
125 | clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); | |
126 | ||
127 | if (clocksource_register(&bfin_cs_gptimer0)) | |
128 | panic("failed to register clocksource"); | |
129 | ||
130 | return 0; | |
131 | } | |
8b5f79f9 | 132 | #else |
1fa9be72 | 133 | # define bfin_cs_gptimer0_init() |
8b5f79f9 VM |
134 | #endif |
135 | ||
ceb33be9 YL |
136 | #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) |
137 | /* prefer to use cycles since it has higher rating */ | |
138 | notrace unsigned long long sched_clock(void) | |
139 | { | |
140 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) | |
141 | return bfin_cs_cycles_sched_clock(); | |
142 | #else | |
143 | return bfin_cs_gptimer0_sched_clock(); | |
144 | #endif | |
145 | } | |
146 | #endif | |
147 | ||
1fa9be72 | 148 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
0d152c27 | 149 | static int bfin_gptmr0_set_next_event(unsigned long cycles, |
8b5f79f9 VM |
150 | struct clock_event_device *evt) |
151 | { | |
1fa9be72 GY |
152 | disable_gptimers(TIMER0bit); |
153 | ||
154 | /* it starts counting three SCLK cycles after the TIMENx bit is set */ | |
155 | set_gptimer_pwidth(TIMER0_id, cycles - 3); | |
156 | enable_gptimers(TIMER0bit); | |
157 | return 0; | |
158 | } | |
159 | ||
0d152c27 | 160 | static void bfin_gptmr0_set_mode(enum clock_event_mode mode, |
1fa9be72 GY |
161 | struct clock_event_device *evt) |
162 | { | |
163 | switch (mode) { | |
164 | case CLOCK_EVT_MODE_PERIODIC: { | |
165 | set_gptimer_config(TIMER0_id, \ | |
166 | TIMER_OUT_DIS | TIMER_IRQ_ENA | \ | |
167 | TIMER_PERIOD_CNT | TIMER_MODE_PWM); | |
168 | set_gptimer_period(TIMER0_id, get_sclk() / HZ); | |
169 | set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1); | |
170 | enable_gptimers(TIMER0bit); | |
171 | break; | |
172 | } | |
173 | case CLOCK_EVT_MODE_ONESHOT: | |
174 | disable_gptimers(TIMER0bit); | |
175 | set_gptimer_config(TIMER0_id, \ | |
176 | TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM); | |
177 | set_gptimer_period(TIMER0_id, 0); | |
178 | break; | |
179 | case CLOCK_EVT_MODE_UNUSED: | |
180 | case CLOCK_EVT_MODE_SHUTDOWN: | |
181 | disable_gptimers(TIMER0bit); | |
182 | break; | |
183 | case CLOCK_EVT_MODE_RESUME: | |
184 | break; | |
185 | } | |
186 | } | |
187 | ||
0d152c27 | 188 | static void bfin_gptmr0_ack(void) |
1fa9be72 GY |
189 | { |
190 | set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); | |
191 | } | |
192 | ||
0d152c27 | 193 | static void __init bfin_gptmr0_init(void) |
1fa9be72 GY |
194 | { |
195 | disable_gptimers(TIMER0bit); | |
196 | } | |
197 | ||
0d152c27 YL |
198 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
199 | __attribute__((l1_text)) | |
200 | #endif | |
201 | irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) | |
1fa9be72 | 202 | { |
0d152c27 YL |
203 | struct clock_event_device *evt = dev_id; |
204 | smp_mb(); | |
205 | evt->event_handler(evt); | |
206 | bfin_gptmr0_ack(); | |
207 | return IRQ_HANDLED; | |
1fa9be72 GY |
208 | } |
209 | ||
0d152c27 YL |
210 | static struct irqaction gptmr0_irq = { |
211 | .name = "Blackfin GPTimer0", | |
212 | .flags = IRQF_DISABLED | IRQF_TIMER | \ | |
213 | IRQF_IRQPOLL | IRQF_PERCPU, | |
214 | .handler = bfin_gptmr0_interrupt, | |
215 | }; | |
1fa9be72 | 216 | |
0d152c27 YL |
217 | static struct clock_event_device clockevent_gptmr0 = { |
218 | .name = "bfin_gptimer0", | |
219 | .rating = 300, | |
220 | .irq = IRQ_TIMER0, | |
221 | .shift = 32, | |
222 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
223 | .set_next_event = bfin_gptmr0_set_next_event, | |
224 | .set_mode = bfin_gptmr0_set_mode, | |
225 | }; | |
226 | ||
227 | static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt) | |
228 | { | |
229 | unsigned long clock_tick; | |
230 | ||
231 | clock_tick = get_sclk(); | |
232 | evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); | |
233 | evt->max_delta_ns = clockevent_delta2ns(-1, evt); | |
234 | evt->min_delta_ns = clockevent_delta2ns(100, evt); | |
235 | ||
236 | evt->cpumask = cpumask_of(0); | |
237 | ||
238 | clockevents_register_device(evt); | |
239 | } | |
240 | #endif /* CONFIG_TICKSOURCE_GPTMR0 */ | |
241 | ||
242 | #if defined(CONFIG_TICKSOURCE_CORETMR) | |
243 | /* per-cpu local core timer */ | |
244 | static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); | |
245 | ||
246 | static int bfin_coretmr_set_next_event(unsigned long cycles, | |
1fa9be72 GY |
247 | struct clock_event_device *evt) |
248 | { | |
249 | bfin_write_TCNTL(TMPWR); | |
250 | CSYNC(); | |
8b5f79f9 VM |
251 | bfin_write_TCOUNT(cycles); |
252 | CSYNC(); | |
1fa9be72 | 253 | bfin_write_TCNTL(TMPWR | TMREN); |
8b5f79f9 VM |
254 | return 0; |
255 | } | |
256 | ||
0d152c27 | 257 | static void bfin_coretmr_set_mode(enum clock_event_mode mode, |
1fa9be72 | 258 | struct clock_event_device *evt) |
8b5f79f9 VM |
259 | { |
260 | switch (mode) { | |
261 | case CLOCK_EVT_MODE_PERIODIC: { | |
e6c91b64 | 262 | unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); |
8b5f79f9 VM |
263 | bfin_write_TCNTL(TMPWR); |
264 | CSYNC(); | |
1fa9be72 | 265 | bfin_write_TSCALE(TIME_SCALE - 1); |
8b5f79f9 VM |
266 | bfin_write_TPERIOD(tcount); |
267 | bfin_write_TCOUNT(tcount); | |
8b5f79f9 | 268 | CSYNC(); |
1fa9be72 | 269 | bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); |
8b5f79f9 VM |
270 | break; |
271 | } | |
272 | case CLOCK_EVT_MODE_ONESHOT: | |
1fa9be72 GY |
273 | bfin_write_TCNTL(TMPWR); |
274 | CSYNC(); | |
1bfb4b21 | 275 | bfin_write_TSCALE(TIME_SCALE - 1); |
1fa9be72 | 276 | bfin_write_TPERIOD(0); |
8b5f79f9 | 277 | bfin_write_TCOUNT(0); |
8b5f79f9 VM |
278 | break; |
279 | case CLOCK_EVT_MODE_UNUSED: | |
280 | case CLOCK_EVT_MODE_SHUTDOWN: | |
281 | bfin_write_TCNTL(0); | |
282 | CSYNC(); | |
283 | break; | |
284 | case CLOCK_EVT_MODE_RESUME: | |
285 | break; | |
286 | } | |
287 | } | |
288 | ||
0d152c27 | 289 | void bfin_coretmr_init(void) |
8b5f79f9 VM |
290 | { |
291 | /* power up the timer, but don't enable it just yet */ | |
292 | bfin_write_TCNTL(TMPWR); | |
293 | CSYNC(); | |
294 | ||
0d152c27 | 295 | /* the TSCALE prescaler counter. */ |
e6c91b64 | 296 | bfin_write_TSCALE(TIME_SCALE - 1); |
8b5f79f9 VM |
297 | bfin_write_TPERIOD(0); |
298 | bfin_write_TCOUNT(0); | |
299 | ||
8b5f79f9 VM |
300 | CSYNC(); |
301 | } | |
302 | ||
0d152c27 YL |
303 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
304 | __attribute__((l1_text)) | |
305 | #endif | |
306 | irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) | |
1fa9be72 | 307 | { |
0d152c27 YL |
308 | int cpu = smp_processor_id(); |
309 | struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); | |
1fa9be72 | 310 | |
1fa9be72 | 311 | smp_mb(); |
8b5f79f9 | 312 | evt->event_handler(evt); |
60ffdb36 GY |
313 | |
314 | touch_nmi_watchdog(); | |
315 | ||
8b5f79f9 VM |
316 | return IRQ_HANDLED; |
317 | } | |
318 | ||
0d152c27 YL |
319 | static struct irqaction coretmr_irq = { |
320 | .name = "Blackfin CoreTimer", | |
321 | .flags = IRQF_DISABLED | IRQF_TIMER | \ | |
322 | IRQF_IRQPOLL | IRQF_PERCPU, | |
323 | .handler = bfin_coretmr_interrupt, | |
324 | }; | |
8b5f79f9 | 325 | |
0d152c27 YL |
326 | void bfin_coretmr_clockevent_init(void) |
327 | { | |
328 | unsigned long clock_tick; | |
329 | unsigned int cpu = smp_processor_id(); | |
330 | struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); | |
331 | ||
332 | evt->name = "bfin_core_timer"; | |
333 | evt->rating = 350; | |
334 | evt->irq = -1; | |
335 | evt->shift = 32; | |
336 | evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | |
337 | evt->set_next_event = bfin_coretmr_set_next_event; | |
338 | evt->set_mode = bfin_coretmr_set_mode; | |
339 | ||
340 | clock_tick = get_cclk() / TIME_SCALE; | |
341 | evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); | |
342 | evt->max_delta_ns = clockevent_delta2ns(-1, evt); | |
343 | evt->min_delta_ns = clockevent_delta2ns(100, evt); | |
344 | ||
345 | evt->cpumask = cpumask_of(cpu); | |
346 | ||
347 | clockevents_register_device(evt); | |
8b5f79f9 | 348 | } |
0d152c27 YL |
349 | #endif /* CONFIG_TICKSOURCE_CORETMR */ |
350 | ||
8b5f79f9 VM |
351 | |
352 | void __init time_init(void) | |
353 | { | |
354 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ | |
355 | ||
356 | #ifdef CONFIG_RTC_DRV_BFIN | |
357 | /* [#2663] hack to filter junk RTC values that would cause | |
358 | * userspace to have to deal with time values greater than | |
359 | * 2^31 seconds (which uClibc cannot cope with yet) | |
360 | */ | |
361 | if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { | |
362 | printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); | |
363 | bfin_write_RTC_STAT(0); | |
364 | } | |
365 | #endif | |
366 | ||
367 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ | |
368 | xtime.tv_sec = secs_since_1970; | |
369 | xtime.tv_nsec = 0; | |
370 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | |
371 | ||
1fa9be72 GY |
372 | bfin_cs_cycles_init(); |
373 | bfin_cs_gptimer0_init(); | |
0d152c27 YL |
374 | |
375 | #if defined(CONFIG_TICKSOURCE_CORETMR) | |
376 | bfin_coretmr_init(); | |
377 | setup_irq(IRQ_CORETMR, &coretmr_irq); | |
378 | bfin_coretmr_clockevent_init(); | |
379 | #endif | |
380 | ||
381 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | |
382 | bfin_gptmr0_init(); | |
383 | setup_irq(IRQ_TIMER0, &gptmr0_irq); | |
384 | gptmr0_irq.dev_id = &clockevent_gptmr0; | |
385 | bfin_gptmr0_clockevent_init(&clockevent_gptmr0); | |
386 | #endif | |
387 | ||
388 | #if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0) | |
389 | # error at least one clock event device is required | |
390 | #endif | |
8b5f79f9 | 391 | } |