]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/sparc64/sparc64/tick.c
MFC r286886: Fixing typo as well as improving readability of a few comments.
[FreeBSD/stable/8.git] / sys / sparc64 / sparc64 / tick.c
1 /*-
2  * Copyright (c) 2001 Jake Burkholder.
3  * Copyright (c) 2005, 2008 Marius Strobl <marius@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/pcpu.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/timetc.h>
42
43 #include <dev/ofw/openfirm.h>
44
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47
48 #include <machine/cpu.h>
49 #include <machine/frame.h>
50 #include <machine/intr_machdep.h>
51 #include <machine/smp.h>
52 #include <machine/tick.h>
53 #include <machine/ver.h>
54
55 /* 10000 ticks proved okay for 500MHz. */
56 #define TICK_GRACE(clock)       ((clock) / 1000000 * 2 * 10)
57
58 #define TICK_QUALITY_MP 10
59 #define TICK_QUALITY_UP 1000
60
61 SYSCTL_NODE(_machdep, OID_AUTO, tick, CTLFLAG_RD, 0, "tick statistics");
62
63 static int adjust_edges = 0;
64 SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_edges, CTLFLAG_RD, &adjust_edges,
65     0, "total number of times tick interrupts got more than 12.5% behind");
66
67 static int adjust_excess = 0;
68 SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_excess, CTLFLAG_RD, &adjust_excess,
69     0, "total number of ignored tick interrupts");
70
71 static int adjust_missed = 0;
72 SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_missed, CTLFLAG_RD, &adjust_missed,
73     0, "total number of missed tick interrupts");
74
75 static int adjust_ticks = 0;
76 SYSCTL_INT(_machdep_tick, OID_AUTO, adjust_ticks, CTLFLAG_RD, &adjust_ticks,
77     0, "total number of tick interrupts with adjustment");
78
79 u_int hardclock_use_stick = 0;
80 SYSCTL_INT(_machdep_tick, OID_AUTO, hardclock_use_stick, CTLFLAG_RD,
81     &hardclock_use_stick, 0, "hardclock uses STICK instead of TICK timer");
82
83 static struct timecounter stick_tc;
84 static struct timecounter tick_tc;
85 static u_long tick_increment;
86
87 static uint64_t tick_cputicks(void);
88 static timecounter_get_t stick_get_timecount_up;
89 #ifdef SMP
90 static timecounter_get_t stick_get_timecount_mp;
91 #endif
92 static timecounter_get_t tick_get_timecount_up;
93 #ifdef SMP
94 static timecounter_get_t tick_get_timecount_mp;
95 #endif
96 static void tick_hardclock(struct trapframe *tf);
97 static void tick_hardclock_bbwar(struct trapframe *tf);
98 static inline void tick_hardclock_common(struct trapframe *tf, u_long tick,
99     u_long adj);
100 static inline void tick_process(struct trapframe *tf);
101 static void stick_hardclock(struct trapframe *tf);
102
103 static uint64_t
104 tick_cputicks(void)
105 {
106
107         return (rd(tick));
108 }
109
110 void
111 cpu_initclocks(void)
112 {
113         uint32_t clock, sclock;
114
115         stathz = hz;
116
117         clock = PCPU_GET(clock);
118         sclock = 0;
119         if (PCPU_GET(impl) == CPU_IMPL_SPARC64V ||
120             PCPU_GET(impl) >= CPU_IMPL_ULTRASPARCIII) {
121                 if (OF_getprop(OF_peer(0), "stick-frequency", &sclock,
122                     sizeof(sclock)) == -1) {
123                         panic("%s: could not determine STICK frequency",
124                             __func__);
125                 }
126         }
127         /*
128          * Given that the STICK timers typically are driven at rather low
129          * frequencies they shouldn't be used except when really necessary.
130          */
131         if (hardclock_use_stick != 0) {
132                 intr_setup(PIL_TICK, stick_hardclock, -1, NULL, NULL);
133                 /*
134                  * We don't provide a CPU ticker as long as the frequency
135                  * supplied isn't actually used per-CPU.
136                  */
137         } else {
138                 intr_setup(PIL_TICK, PCPU_GET(impl) >= CPU_IMPL_ULTRASPARCI &&
139                     PCPU_GET(impl) < CPU_IMPL_ULTRASPARCIII ?
140                     tick_hardclock_bbwar : tick_hardclock, -1, NULL, NULL);
141                 set_cputicker(tick_cputicks, clock, 0);
142         }
143         tick_increment = clock / hz;
144         /*
145          * Avoid stopping of hardclock in terms of a lost (S)TICK interrupt
146          * by ensuring that the (S)TICK period is at least TICK_GRACE ticks.
147          */
148         if (tick_increment < TICK_GRACE(clock))
149                 panic("%s: HZ too high, decrease to at least %d",
150                     __func__, clock / TICK_GRACE(clock));
151         tick_start();
152
153         /*
154          * Initialize the (S)TICK-based timecounter(s).
155          * Note that we (try to) sync the (S)TICK timers of APs with the BSP
156          * during their startup but not afterwards.  The resulting drift can
157          * cause problems when the time is calculated based on (S)TICK values
158          * read on different CPUs.  Thus we always read the register on the
159          * BSP (if necessary via an IPI as sched_bind(9) isn't available in
160          * all circumstances) and use a low quality for the otherwise high
161          * quality (S)TICK timers in the MP case.
162          */
163         tick_tc.tc_get_timecount = tick_get_timecount_up;
164         tick_tc.tc_counter_mask = ~0u;
165         tick_tc.tc_frequency = clock;
166         tick_tc.tc_name = "tick";
167         tick_tc.tc_quality = TICK_QUALITY_UP;
168 #ifdef SMP
169         if (cpu_mp_probe()) {
170                 tick_tc.tc_get_timecount = tick_get_timecount_mp;
171                 tick_tc.tc_quality = TICK_QUALITY_MP;
172         }
173 #endif
174         tc_init(&tick_tc);
175         if (sclock != 0) {
176                 stick_tc.tc_get_timecount = stick_get_timecount_up;
177                 stick_tc.tc_counter_mask = ~0u;
178                 stick_tc.tc_frequency = sclock;
179                 stick_tc.tc_name = "stick";
180                 stick_tc.tc_quality = TICK_QUALITY_UP;
181 #ifdef SMP
182                 if (cpu_mp_probe()) {
183                         stick_tc.tc_get_timecount = stick_get_timecount_mp;
184                         stick_tc.tc_quality = TICK_QUALITY_MP;
185                 }
186 #endif
187                 tc_init(&stick_tc);
188         }
189 }
190
191 static inline void
192 tick_process(struct trapframe *tf)
193 {
194
195         if (curcpu == 0)
196                 hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
197         else
198                 hardclock_cpu(TRAPF_USERMODE(tf));
199         if (profprocs != 0)
200                 profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
201         statclock(TRAPF_USERMODE(tf));
202 }
203
204 /*
205  * NB: the sequence of reading the (S)TICK register, calculating the value
206  * of the next tick and writing it to the (S)TICK_COMPARE register must not
207  * be interrupted, not even by an IPI, otherwise a value that is in the past
208  * could be written in the worst case, causing hardclock to stop.
209  */
210
211 static void
212 tick_hardclock(struct trapframe *tf)
213 {
214         u_long adj, tick;
215         register_t s;
216
217         critical_enter();
218         adj = PCPU_GET(tickadj);
219         s = intr_disable();
220         tick = rd(tick);
221         wrtickcmpr(tick + tick_increment - adj, 0);
222         intr_restore(s);
223         tick_hardclock_common(tf, tick, adj);
224         critical_exit();
225 }
226
227 static void
228 tick_hardclock_bbwar(struct trapframe *tf)
229 {
230         u_long adj, tick;
231         register_t s;
232
233         critical_enter();
234         adj = PCPU_GET(tickadj);
235         s = intr_disable();
236         tick = rd(tick);
237         wrtickcmpr_bbwar(tick + tick_increment - adj, 0);
238         intr_restore(s);
239         tick_hardclock_common(tf, tick, adj);
240         critical_exit();
241 }
242
243 static void
244 stick_hardclock(struct trapframe *tf)
245 {
246         u_long adj, stick;
247         register_t s;
248
249         critical_enter();
250         adj = PCPU_GET(tickadj);
251         s = intr_disable();
252         stick = rdstick();
253         wrstickcmpr(stick + tick_increment - adj, 0);
254         intr_restore(s);
255         tick_hardclock_common(tf, stick, adj);
256         critical_exit();
257 }
258
259 static inline void
260 tick_hardclock_common(struct trapframe *tf, u_long tick, u_long adj)
261 {
262         u_long ref;
263         long delta;
264         int count;
265
266         ref = PCPU_GET(tickref);
267         delta = tick - ref;
268         count = 0;
269         while (delta >= tick_increment) {
270                 tick_process(tf);
271                 delta -= tick_increment;
272                 ref += tick_increment;
273                 if (adj != 0)
274                         adjust_ticks++;
275                 count++;
276         }
277         if (count > 0) {
278                 adjust_missed += count - 1;
279                 if (delta > (tick_increment >> 3)) {
280                         if (adj == 0)
281                                 adjust_edges++;
282                         adj = tick_increment >> 4;
283                 } else
284                         adj = 0;
285         } else {
286                 adj = 0;
287                 adjust_excess++;
288         }
289         PCPU_SET(tickref, ref);
290         PCPU_SET(tickadj, adj);
291 }
292
293 static u_int
294 stick_get_timecount_up(struct timecounter *tc)
295 {
296
297         return ((u_int)rdstick());
298 }
299
300 static u_int
301 tick_get_timecount_up(struct timecounter *tc)
302 {
303
304         return ((u_int)rd(tick));
305 }
306
307 #ifdef SMP
308 static u_int
309 stick_get_timecount_mp(struct timecounter *tc)
310 {
311         u_long stick;
312
313         sched_pin();
314         if (curcpu == 0)
315                 stick = rdstick();
316         else
317                 ipi_wait(ipi_rd(0, tl_ipi_stick_rd, &stick));
318         sched_unpin();
319         return (stick);
320 }
321
322 static u_int
323 tick_get_timecount_mp(struct timecounter *tc)
324 {
325         u_long tick;
326
327         sched_pin();
328         if (curcpu == 0)
329                 tick = rd(tick);
330         else
331                 ipi_wait(ipi_rd(0, tl_ipi_tick_rd, &tick));
332         sched_unpin();
333         return (tick);
334 }
335 #endif
336
337 void
338 tick_start(void)
339 {
340         u_long base;
341         register_t s;
342
343         /*
344          * Try to make the (S)TICK interrupts as synchronously as possible
345          * on all CPUs to avoid inaccuracies for migrating processes.  Leave
346          * out one tick to make sure that it is not missed.
347          */
348         critical_enter();
349         PCPU_SET(tickadj, 0);
350         s = intr_disable();
351         if (hardclock_use_stick != 0)
352                 base = rdstick();
353         else
354                 base = rd(tick);
355         base = roundup(base, tick_increment);
356         PCPU_SET(tickref, base);
357         if (hardclock_use_stick != 0)
358                 wrstickcmpr(base + tick_increment, 0);
359         else
360                 wrtickcmpr(base + tick_increment, 0);
361         intr_restore(s);
362         critical_exit();
363 }
364
365 void
366 tick_clear(u_int cpu_impl)
367 {
368
369         if (cpu_impl == CPU_IMPL_SPARC64V ||
370             cpu_impl >= CPU_IMPL_ULTRASPARCIII)
371                 wrstick(0, 0);
372         wrpr(tick, 0, 0);
373 }
374
375 void
376 tick_stop(u_int cpu_impl)
377 {
378
379         if (cpu_impl == CPU_IMPL_SPARC64V ||
380             cpu_impl >= CPU_IMPL_ULTRASPARCIII)
381                 wrstickcmpr(1L << 63, 0);
382         wrtickcmpr(1L << 63, 0);
383 }