]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/x86/x86/tsc.c
MFC r237433:
[FreeBSD/stable/9.git] / sys / x86 / x86 / tsc.c
1 /*-
2  * Copyright (c) 1998-2003 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_compat.h"
31 #include "opt_clock.h"
32
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/cpu.h>
36 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
40 #include <sys/time.h>
41 #include <sys/timetc.h>
42 #include <sys/kernel.h>
43 #include <sys/power.h>
44 #include <sys/smp.h>
45 #include <sys/vdso.h>
46 #include <machine/clock.h>
47 #include <machine/cputypes.h>
48 #include <machine/md_var.h>
49 #include <machine/specialreg.h>
50
51 #include "cpufreq_if.h"
52
53 uint64_t        tsc_freq;
54 int             tsc_is_invariant;
55 int             tsc_perf_stat;
56
57 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag;
58
59 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN,
60     &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant");
61 TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant);
62
63 #ifdef SMP
64 static int      smp_tsc;
65 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
66     "Indicates whether the TSC is safe to use in SMP mode");
67 TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc);
68 #endif
69
70 static int      tsc_disabled;
71 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
72     "Disable x86 Time Stamp Counter");
73 TUNABLE_INT("machdep.disable_tsc", &tsc_disabled);
74
75 static int      tsc_skip_calibration;
76 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN,
77     &tsc_skip_calibration, 0, "Disable TSC frequency calibration");
78 TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration);
79
80 static void tsc_freq_changed(void *arg, const struct cf_level *level,
81     int status);
82 static void tsc_freq_changing(void *arg, const struct cf_level *level,
83     int *status);
84 static unsigned tsc_get_timecount(struct timecounter *tc);
85 static unsigned tsc_get_timecount_low(struct timecounter *tc);
86 static void tsc_levels_changed(void *arg, int unit);
87
88 static struct timecounter tsc_timecounter = {
89         tsc_get_timecount,      /* get_timecount */
90         0,                      /* no poll_pps */
91         ~0u,                    /* counter_mask */
92         0,                      /* frequency */
93         "TSC",                  /* name */
94         800,                    /* quality (adjusted in code) */
95 };
96
97 #define VMW_HVMAGIC             0x564d5868
98 #define VMW_HVPORT              0x5658
99 #define VMW_HVCMD_GETVERSION    10
100 #define VMW_HVCMD_GETHZ         45
101
102 static __inline void
103 vmware_hvcall(u_int cmd, u_int *p)
104 {
105
106         __asm __volatile("inl %w3, %0"
107         : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
108         : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT)
109         : "memory");
110 }
111
112 static int
113 tsc_freq_vmware(void)
114 {
115         char hv_sig[13];
116         u_int regs[4];
117         char *p;
118         u_int hv_high;
119         int i;
120
121         /*
122          * [RFC] CPUID usage for interaction between Hypervisors and Linux.
123          * http://lkml.org/lkml/2008/10/1/246
124          *
125          * KB1009458: Mechanisms to determine if software is running in
126          * a VMware virtual machine
127          * http://kb.vmware.com/kb/1009458
128          */
129         hv_high = 0;
130         if ((cpu_feature2 & CPUID2_HV) != 0) {
131                 do_cpuid(0x40000000, regs);
132                 hv_high = regs[0];
133                 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4)
134                         memcpy(p, &regs[i], sizeof(regs[i]));
135                 *p = '\0';
136                 if (bootverbose) {
137                         /*
138                          * HV vendor    ID string
139                          * ------------+--------------
140                          * KVM          "KVMKVMKVM"
141                          * Microsoft    "Microsoft Hv"
142                          * VMware       "VMwareVMware"
143                          * Xen          "XenVMMXenVMM"
144                          */
145                         printf("Hypervisor: Origin = \"%s\"\n", hv_sig);
146                 }
147                 if (strncmp(hv_sig, "VMwareVMware", 12) != 0)
148                         return (0);
149         } else {
150                 p = getenv("smbios.system.serial");
151                 if (p == NULL)
152                         return (0);
153                 if (strncmp(p, "VMware-", 7) != 0 &&
154                     strncmp(p, "VMW", 3) != 0) {
155                         freeenv(p);
156                         return (0);
157                 }
158                 freeenv(p);
159                 vmware_hvcall(VMW_HVCMD_GETVERSION, regs);
160                 if (regs[1] != VMW_HVMAGIC)
161                         return (0);
162         }
163         if (hv_high >= 0x40000010) {
164                 do_cpuid(0x40000010, regs);
165                 tsc_freq = regs[0] * 1000;
166         } else {
167                 vmware_hvcall(VMW_HVCMD_GETHZ, regs);
168                 if (regs[1] != UINT_MAX)
169                         tsc_freq = regs[0] | ((uint64_t)regs[1] << 32);
170         }
171         tsc_is_invariant = 1;
172         return (1);
173 }
174
175 static void
176 tsc_freq_intel(void)
177 {
178         char brand[48];
179         u_int regs[4];
180         uint64_t freq;
181         char *p;
182         u_int i;
183
184         /*
185          * Intel Processor Identification and the CPUID Instruction
186          * Application Note 485.
187          * http://www.intel.com/assets/pdf/appnote/241618.pdf
188          */
189         if (cpu_exthigh >= 0x80000004) {
190                 p = brand;
191                 for (i = 0x80000002; i < 0x80000005; i++) {
192                         do_cpuid(i, regs);
193                         memcpy(p, regs, sizeof(regs));
194                         p += sizeof(regs);
195                 }
196                 p = NULL;
197                 for (i = 0; i < sizeof(brand) - 1; i++)
198                         if (brand[i] == 'H' && brand[i + 1] == 'z')
199                                 p = brand + i;
200                 if (p != NULL) {
201                         p -= 5;
202                         switch (p[4]) {
203                         case 'M':
204                                 i = 1;
205                                 break;
206                         case 'G':
207                                 i = 1000;
208                                 break;
209                         case 'T':
210                                 i = 1000000;
211                                 break;
212                         default:
213                                 return;
214                         }
215 #define C2D(c)  ((c) - '0')
216                         if (p[1] == '.') {
217                                 freq = C2D(p[0]) * 1000;
218                                 freq += C2D(p[2]) * 100;
219                                 freq += C2D(p[3]) * 10;
220                                 freq *= i * 1000;
221                         } else {
222                                 freq = C2D(p[0]) * 1000;
223                                 freq += C2D(p[1]) * 100;
224                                 freq += C2D(p[2]) * 10;
225                                 freq += C2D(p[3]);
226                                 freq *= i * 1000000;
227                         }
228 #undef C2D
229                         tsc_freq = freq;
230                 }
231         }
232 }
233
234 static void
235 probe_tsc_freq(void)
236 {
237         u_int regs[4];
238         uint64_t tsc1, tsc2;
239
240         if (cpu_high >= 6) {
241                 do_cpuid(6, regs);
242                 if ((regs[2] & CPUID_PERF_STAT) != 0) {
243                         /*
244                          * XXX Some emulators expose host CPUID without actual
245                          * support for these MSRs.  We must test whether they
246                          * really work.
247                          */
248                         wrmsr(MSR_MPERF, 0);
249                         wrmsr(MSR_APERF, 0);
250                         DELAY(10);
251                         if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0)
252                                 tsc_perf_stat = 1;
253                 }
254         }
255
256         if (tsc_freq_vmware())
257                 return;
258
259         switch (cpu_vendor_id) {
260         case CPU_VENDOR_AMD:
261                 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
262                     (vm_guest == VM_GUEST_NO &&
263                     CPUID_TO_FAMILY(cpu_id) >= 0x10))
264                         tsc_is_invariant = 1;
265                 break;
266         case CPU_VENDOR_INTEL:
267                 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
268                     (vm_guest == VM_GUEST_NO &&
269                     ((CPUID_TO_FAMILY(cpu_id) == 0x6 &&
270                     CPUID_TO_MODEL(cpu_id) >= 0xe) ||
271                     (CPUID_TO_FAMILY(cpu_id) == 0xf &&
272                     CPUID_TO_MODEL(cpu_id) >= 0x3))))
273                         tsc_is_invariant = 1;
274                 break;
275         case CPU_VENDOR_CENTAUR:
276                 if (vm_guest == VM_GUEST_NO &&
277                     CPUID_TO_FAMILY(cpu_id) == 0x6 &&
278                     CPUID_TO_MODEL(cpu_id) >= 0xf &&
279                     (rdmsr(0x1203) & 0x100000000ULL) == 0)
280                         tsc_is_invariant = 1;
281                 break;
282         }
283
284         if (tsc_skip_calibration) {
285                 if (cpu_vendor_id == CPU_VENDOR_INTEL)
286                         tsc_freq_intel();
287                 return;
288         }
289
290         if (bootverbose)
291                 printf("Calibrating TSC clock ... ");
292         tsc1 = rdtsc();
293         DELAY(1000000);
294         tsc2 = rdtsc();
295         tsc_freq = tsc2 - tsc1;
296         if (bootverbose)
297                 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq);
298 }
299
300 void
301 init_TSC(void)
302 {
303
304         if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
305                 return;
306
307         probe_tsc_freq();
308
309         /*
310          * Inform CPU accounting about our boot-time clock rate.  This will
311          * be updated if someone loads a cpufreq driver after boot that
312          * discovers a new max frequency.
313          */
314         if (tsc_freq != 0)
315                 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
316
317         if (tsc_is_invariant)
318                 return;
319
320         /* Register to find out about changes in CPU frequency. */
321         tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change,
322             tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST);
323         tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change,
324             tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST);
325         tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed,
326             tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY);
327 }
328
329 #ifdef SMP
330
331 /* rmb is required here because rdtsc is not a serializing instruction. */
332 #define TSC_READ(x)                     \
333 static void                             \
334 tsc_read_##x(void *arg)                 \
335 {                                       \
336         uint32_t *tsc = arg;            \
337         u_int cpu = PCPU_GET(cpuid);    \
338                                         \
339         rmb();                          \
340         tsc[cpu * 3 + x] = rdtsc32();   \
341 }
342 TSC_READ(0)
343 TSC_READ(1)
344 TSC_READ(2)
345 #undef TSC_READ
346
347 #define N       1000
348
349 static void
350 comp_smp_tsc(void *arg)
351 {
352         uint32_t *tsc;
353         int32_t d1, d2;
354         u_int cpu = PCPU_GET(cpuid);
355         u_int i, j, size;
356
357         size = (mp_maxid + 1) * 3;
358         for (i = 0, tsc = arg; i < N; i++, tsc += size)
359                 CPU_FOREACH(j) {
360                         if (j == cpu)
361                                 continue;
362                         d1 = tsc[cpu * 3 + 1] - tsc[j * 3];
363                         d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1];
364                         if (d1 <= 0 || d2 <= 0) {
365                                 smp_tsc = 0;
366                                 return;
367                         }
368                 }
369 }
370
371 static int
372 test_smp_tsc(void)
373 {
374         uint32_t *data, *tsc;
375         u_int i, size;
376
377         if (!smp_tsc && !tsc_is_invariant)
378                 return (-100);
379         size = (mp_maxid + 1) * 3;
380         data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK);
381         for (i = 0, tsc = data; i < N; i++, tsc += size)
382                 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc);
383         smp_tsc = 1;    /* XXX */
384         smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc,
385             smp_no_rendevous_barrier, data);
386         free(data, M_TEMP);
387         if (bootverbose)
388                 printf("SMP: %sed TSC synchronization test\n",
389                     smp_tsc ? "pass" : "fail");
390         if (smp_tsc && tsc_is_invariant) {
391                 switch (cpu_vendor_id) {
392                 case CPU_VENDOR_AMD:
393                         /*
394                          * Starting with Family 15h processors, TSC clock
395                          * source is in the north bridge.  Check whether
396                          * we have a single-socket/multi-core platform.
397                          * XXX Need more work for complex cases.
398                          */
399                         if (CPUID_TO_FAMILY(cpu_id) < 0x15 ||
400                             (amd_feature2 & AMDID2_CMP) == 0 ||
401                             smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1)
402                                 break;
403                         return (1000);
404                 case CPU_VENDOR_INTEL:
405                         /*
406                          * XXX Assume Intel platforms have synchronized TSCs.
407                          */
408                         return (1000);
409                 }
410                 return (800);
411         }
412         return (-100);
413 }
414
415 #undef N
416
417 #endif /* SMP */
418
419 static void
420 init_TSC_tc(void)
421 {
422         uint64_t max_freq;
423         int shift;
424
425         if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
426                 return;
427
428         /*
429          * Limit timecounter frequency to fit in an int and prevent it from
430          * overflowing too fast.
431          */
432         max_freq = UINT_MAX;
433
434         /*
435          * We can not use the TSC if we support APM.  Precise timekeeping
436          * on an APM'ed machine is at best a fools pursuit, since 
437          * any and all of the time spent in various SMM code can't 
438          * be reliably accounted for.  Reading the RTC is your only
439          * source of reliable time info.  The i8254 loses too, of course,
440          * but we need to have some kind of time...
441          * We don't know at this point whether APM is going to be used
442          * or not, nor when it might be activated.  Play it safe.
443          */
444         if (power_pm_get_type() == POWER_PM_TYPE_APM) {
445                 tsc_timecounter.tc_quality = -1000;
446                 if (bootverbose)
447                         printf("TSC timecounter disabled: APM enabled.\n");
448                 goto init;
449         }
450
451         /*
452          * We cannot use the TSC if it stops incrementing in deep sleep.
453          * Currently only Intel CPUs are known for this problem unless
454          * the invariant TSC bit is set.
455          */
456         if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL &&
457             (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
458                 tsc_timecounter.tc_quality = -1000;
459                 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP;
460                 if (bootverbose)
461                         printf("TSC timecounter disabled: C3 enabled.\n");
462                 goto init;
463         }
464
465 #ifdef SMP
466         /*
467          * We can not use the TSC in SMP mode unless the TSCs on all CPUs are
468          * synchronized.  If the user is sure that the system has synchronized
469          * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value.
470          * We also limit the frequency even lower to avoid "temporal anomalies"
471          * as much as possible.  The TSC seems unreliable in virtualized SMP
472          * environments, so it is set to a negative quality in those cases.
473          */
474         if (smp_cpus > 1) {
475                 if (vm_guest != 0) {
476                         tsc_timecounter.tc_quality = -100;
477                 } else {
478                         tsc_timecounter.tc_quality = test_smp_tsc();
479                         max_freq >>= 8;
480                 }
481         } else
482 #endif
483         if (tsc_is_invariant)
484                 tsc_timecounter.tc_quality = 1000;
485
486 init:
487         for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++)
488                 ;
489         if (shift > 0) {
490                 tsc_timecounter.tc_get_timecount = tsc_get_timecount_low;
491                 tsc_timecounter.tc_name = "TSC-low";
492                 if (bootverbose)
493                         printf("TSC timecounter discards lower %d bit(s)\n",
494                             shift);
495         }
496         if (tsc_freq != 0) {
497                 tsc_timecounter.tc_frequency = tsc_freq >> shift;
498                 tsc_timecounter.tc_priv = (void *)(intptr_t)shift;
499                 tc_init(&tsc_timecounter);
500         }
501 }
502 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL);
503
504 /*
505  * When cpufreq levels change, find out about the (new) max frequency.  We
506  * use this to update CPU accounting in case it got a lower estimate at boot.
507  */
508 static void
509 tsc_levels_changed(void *arg, int unit)
510 {
511         device_t cf_dev;
512         struct cf_level *levels;
513         int count, error;
514         uint64_t max_freq;
515
516         /* Only use values from the first CPU, assuming all are equal. */
517         if (unit != 0)
518                 return;
519
520         /* Find the appropriate cpufreq device instance. */
521         cf_dev = devclass_get_device(devclass_find("cpufreq"), unit);
522         if (cf_dev == NULL) {
523                 printf("tsc_levels_changed() called but no cpufreq device?\n");
524                 return;
525         }
526
527         /* Get settings from the device and find the max frequency. */
528         count = 64;
529         levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
530         if (levels == NULL)
531                 return;
532         error = CPUFREQ_LEVELS(cf_dev, levels, &count);
533         if (error == 0 && count != 0) {
534                 max_freq = (uint64_t)levels[0].total_set.freq * 1000000;
535                 set_cputicker(rdtsc, max_freq, 1);
536         } else
537                 printf("tsc_levels_changed: no max freq found\n");
538         free(levels, M_TEMP);
539 }
540
541 /*
542  * If the TSC timecounter is in use, veto the pending change.  It may be
543  * possible in the future to handle a dynamically-changing timecounter rate.
544  */
545 static void
546 tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
547 {
548
549         if (*status != 0 || timecounter != &tsc_timecounter)
550                 return;
551
552         printf("timecounter TSC must not be in use when "
553             "changing frequencies; change denied\n");
554         *status = EBUSY;
555 }
556
557 /* Update TSC freq with the value indicated by the caller. */
558 static void
559 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
560 {
561         uint64_t freq;
562
563         /* If there was an error during the transition, don't do anything. */
564         if (tsc_disabled || status != 0)
565                 return;
566
567         /* Total setting for this level gives the new frequency in MHz. */
568         freq = (uint64_t)level->total_set.freq * 1000000;
569         atomic_store_rel_64(&tsc_freq, freq);
570         tsc_timecounter.tc_frequency =
571             freq >> (int)(intptr_t)tsc_timecounter.tc_priv;
572 }
573
574 static int
575 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
576 {
577         int error;
578         uint64_t freq;
579
580         freq = atomic_load_acq_64(&tsc_freq);
581         if (freq == 0)
582                 return (EOPNOTSUPP);
583         error = sysctl_handle_64(oidp, &freq, 0, req);
584         if (error == 0 && req->newptr != NULL) {
585                 atomic_store_rel_64(&tsc_freq, freq);
586                 atomic_store_rel_64(&tsc_timecounter.tc_frequency,
587                     freq >> (int)(intptr_t)tsc_timecounter.tc_priv);
588         }
589         return (error);
590 }
591
592 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW,
593     0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency");
594
595 static u_int
596 tsc_get_timecount(struct timecounter *tc __unused)
597 {
598
599         return (rdtsc32());
600 }
601
602 static u_int
603 tsc_get_timecount_low(struct timecounter *tc)
604 {
605         uint32_t rv;
606
607         __asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
608         : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
609         return (rv);
610 }
611
612 uint32_t
613 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th)
614 {
615
616         vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv;
617         bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
618         return (timecounter == &tsc_timecounter);
619 }
620
621 #ifdef COMPAT_FREEBSD32
622 uint32_t
623 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
624 {
625
626         vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv;
627         bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
628         return (timecounter == &tsc_timecounter);
629 }
630 #endif