2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2011 The FreeBSD Foundation
5 * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
8 * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
40 #include "opt_platform.h"
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/timeet.h>
53 #include <sys/timetc.h>
56 #include <sys/watchdog.h>
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61 #include <machine/machdep.h>
62 #include <machine/md_var.h>
64 #if defined(__aarch64__)
65 #include <machine/undefined.h>
69 #include <dev/ofw/openfirm.h>
70 #include <dev/ofw/ofw_bus.h>
71 #include <dev/ofw/ofw_bus_subr.h>
75 #include <contrib/dev/acpica/include/acpi.h>
76 #include <dev/acpica/acpivar.h>
79 #define GT_PHYS_SECURE 0
80 #define GT_PHYS_NONSECURE 1
84 #define GT_IRQ_COUNT 5
86 #define GT_CTRL_ENABLE (1 << 0)
87 #define GT_CTRL_INT_MASK (1 << 1)
88 #define GT_CTRL_INT_STAT (1 << 2)
92 #define GT_CNTKCTL_PL0PTEN (1 << 9) /* PL0 Physical timer reg access */
93 #define GT_CNTKCTL_PL0VTEN (1 << 8) /* PL0 Virtual timer reg access */
94 #define GT_CNTKCTL_EVNTI (0xf << 4) /* Virtual counter event bits */
95 #define GT_CNTKCTL_EVNTDIR (1 << 3) /* Virtual counter event transition */
96 #define GT_CNTKCTL_EVNTEN (1 << 2) /* Enables virtual counter events */
97 #define GT_CNTKCTL_PL0VCTEN (1 << 1) /* PL0 CNTVCT and CNTFRQ access */
98 #define GT_CNTKCTL_PL0PCTEN (1 << 0) /* PL0 CNTPCT and CNTFRQ access */
100 struct arm_tmr_softc;
103 struct resource *res;
109 struct arm_tmr_softc {
110 struct arm_tmr_irq irqs[GT_IRQ_COUNT];
111 uint64_t (*get_cntxct)(bool);
114 struct eventtimer et;
118 static struct arm_tmr_softc *arm_tmr_sc = NULL;
120 static const struct arm_tmr_irq_defs {
124 } arm_tmr_irq_defs[] = {
126 .idx = GT_PHYS_SECURE,
128 .flags = RF_ACTIVE | RF_OPTIONAL,
131 .idx = GT_PHYS_NONSECURE,
143 .flags = RF_ACTIVE | RF_OPTIONAL,
148 .flags = RF_ACTIVE | RF_OPTIONAL,
152 static int arm_tmr_attach(device_t);
154 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
155 struct timecounter *tc);
156 static void arm_tmr_do_delay(int usec, void *);
158 static timecounter_get_t arm_tmr_get_timecount;
160 static struct timecounter arm_tmr_timecount = {
161 .tc_name = "ARM MPCore Timecounter",
162 .tc_get_timecount = arm_tmr_get_timecount,
164 .tc_counter_mask = ~0u,
167 .tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
171 #define get_el0(x) cp15_## x ##_get()
172 #define get_el1(x) cp15_## x ##_get()
173 #define set_el0(x, val) cp15_## x ##_set(val)
174 #define set_el1(x, val) cp15_## x ##_set(val)
175 #define HAS_PHYS true
176 #else /* __aarch64__ */
177 #define get_el0(x) READ_SPECIALREG(x ##_el0)
178 #define get_el1(x) READ_SPECIALREG(x ##_el1)
179 #define set_el0(x, val) WRITE_SPECIALREG(x ##_el0, val)
180 #define set_el1(x, val) WRITE_SPECIALREG(x ##_el1, val)
181 #define HAS_PHYS has_hyp()
187 return (get_el0(cntfrq));
191 get_cntxct_a64_unstable(bool physical)
198 val = get_el0(cntpct);
200 while (((val + 1) & 0x7FF) <= 1);
204 val = get_el0(cntvct);
206 while (((val + 1) & 0x7FF) <= 1);
213 get_cntxct(bool physical)
219 val = get_el0(cntpct);
221 val = get_el0(cntvct);
227 set_ctrl(uint32_t val, bool physical)
231 set_el0(cntp_ctl, val);
233 set_el0(cntv_ctl, val);
240 set_tval(uint32_t val, bool physical)
244 set_el0(cntp_tval, val);
246 set_el0(cntv_tval, val);
253 get_ctrl(bool physical)
258 val = get_el0(cntp_ctl);
260 val = get_el0(cntv_ctl);
266 setup_user_access(void *arg __unused)
270 cntkctl = get_el1(cntkctl);
271 cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
272 GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
273 /* Always enable the virtual timer */
274 cntkctl |= GT_CNTKCTL_PL0VCTEN;
275 /* Enable the physical timer if supported */
276 if (arm_tmr_sc->physical) {
277 cntkctl |= GT_CNTKCTL_PL0PCTEN;
279 set_el1(cntkctl, cntkctl);
285 cntpct_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
291 if ((insn & MRS_MASK) != MRS_VALUE)
294 if (MRS_SPECIAL(insn) != MRS_SPECIAL(CNTPCT_EL0))
297 reg = MRS_REGISTER(insn);
298 val = READ_SPECIALREG(cntvct_el0);
299 if (reg < nitems(frame->tf_x)) {
300 frame->tf_x[reg] = val;
301 } else if (reg == 30) {
306 * We will handle this instruction, move to the next so we
307 * don't trap here again.
309 frame->tf_elr += INSN_SIZE;
316 tmr_setup_user_access(void *arg __unused)
322 if (arm_tmr_sc != NULL) {
323 smp_rendezvous(NULL, setup_user_access, NULL, NULL);
325 if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
327 install_undef_handler(true, cntpct_handler);
332 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
335 arm_tmr_get_timecount(struct timecounter *tc)
338 return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical));
342 arm_tmr_start(struct eventtimer *et, sbintime_t first,
343 sbintime_t period __unused)
345 struct arm_tmr_softc *sc;
348 sc = (struct arm_tmr_softc *)et->et_priv;
351 counts = ((uint32_t)et->et_frequency * first) >> 32;
352 ctrl = get_ctrl(sc->physical);
353 ctrl &= ~GT_CTRL_INT_MASK;
354 ctrl |= GT_CTRL_ENABLE;
355 set_tval(counts, sc->physical);
356 set_ctrl(ctrl, sc->physical);
365 arm_tmr_disable(bool physical)
369 ctrl = get_ctrl(physical);
370 ctrl &= ~GT_CTRL_ENABLE;
371 set_ctrl(ctrl, physical);
375 arm_tmr_stop(struct eventtimer *et)
377 struct arm_tmr_softc *sc;
379 sc = (struct arm_tmr_softc *)et->et_priv;
380 arm_tmr_disable(sc->physical);
386 arm_tmr_intr(void *arg)
388 struct arm_tmr_softc *sc;
391 sc = (struct arm_tmr_softc *)arg;
392 ctrl = get_ctrl(sc->physical);
393 if (ctrl & GT_CTRL_INT_STAT) {
394 ctrl |= GT_CTRL_INT_MASK;
395 set_ctrl(ctrl, sc->physical);
398 if (sc->et.et_active)
399 sc->et.et_event_cb(&sc->et, sc->et.et_arg);
401 return (FILTER_HANDLED);
405 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
406 const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
408 struct arm_tmr_irq *irq;
410 irq = &sc->irqs[sc->irq_count];
411 irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
413 if (irq->res == NULL) {
414 if (bootverbose || (flags & RF_OPTIONAL) == 0) {
416 "could not allocate irq for %s interrupt '%s'\n",
417 (flags & RF_OPTIONAL) != 0 ? "optional" :
418 "required", irq_def->name);
421 if ((flags & RF_OPTIONAL) == 0)
425 device_printf(dev, "allocated irq for '%s'\n",
428 irq->idx = irq_def->idx;
437 arm_tmr_fdt_probe(device_t dev)
440 if (!ofw_bus_status_okay(dev))
443 if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
444 device_set_desc(dev, "ARMv8 Generic Timer");
445 return (BUS_PROBE_DEFAULT);
446 } else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
447 device_set_desc(dev, "ARMv7 Generic Timer");
448 return (BUS_PROBE_DEFAULT);
455 arm_tmr_fdt_attach(device_t dev)
457 struct arm_tmr_softc *sc;
458 const struct arm_tmr_irq_defs *irq_def;
464 sc = device_get_softc(dev);
465 node = ofw_bus_get_node(dev);
467 has_names = OF_hasprop(node, "interrupt-names");
468 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
472 * If we don't have names to go off of, we assume that they're
473 * in the "usual" order with sec-phys first and allocate by idx.
475 irq_def = &arm_tmr_irq_defs[i];
477 flags = irq_def->flags;
479 error = ofw_bus_find_string_index(node,
480 "interrupt-names", irq_def->name, &rid);
483 * If we have names, missing a name means we don't
488 * Could be noisy on a lot of platforms for no
491 if (bootverbose || (flags & RF_OPTIONAL) == 0) {
493 "could not find irq for %s interrupt '%s'\n",
494 (flags & RF_OPTIONAL) != 0 ?
495 "optional" : "required",
499 if ((flags & RF_OPTIONAL) == 0)
506 * Warn about failing to activate if we did actually
507 * have the name present.
509 flags &= ~RF_OPTIONAL;
512 error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
517 error = arm_tmr_attach(dev);
520 for (i = 0; i < sc->irq_count; i++) {
521 bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
533 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
536 BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
540 arm_tmr_acpi_identify(driver_t *driver, device_t parent)
542 ACPI_TABLE_GTDT *gtdt;
546 physaddr = acpi_find_table(ACPI_SIG_GTDT);
550 gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
552 device_printf(parent, "gic: Unable to map the GTDT\n");
556 dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
557 "generic_timer", -1);
559 device_printf(parent, "add gic child failed\n");
563 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
564 gtdt->SecureEl1Interrupt);
565 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
566 gtdt->NonSecureEl1Interrupt);
567 arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
568 gtdt->VirtualTimerInterrupt);
571 acpi_unmap_table(gtdt);
575 arm_tmr_acpi_probe(device_t dev)
578 device_set_desc(dev, "ARM Generic Timer");
579 return (BUS_PROBE_NOWILDCARD);
583 arm_tmr_acpi_attach(device_t dev)
585 const struct arm_tmr_irq_defs *irq_def;
586 struct arm_tmr_softc *sc;
589 sc = device_get_softc(dev);
590 for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
591 irq_def = &arm_tmr_irq_defs[i];
592 error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
598 error = arm_tmr_attach(dev);
601 for (int i = 0; i < sc->irq_count; i++) {
602 bus_release_resource(dev, SYS_RES_IRQ,
603 sc->irqs[i].rid, sc->irqs[i].res);
611 arm_tmr_attach(device_t dev)
613 struct arm_tmr_softc *sc;
615 const struct arm_tmr_irq_defs *irq_def;
622 int i, first_timer, last_timer;
624 sc = device_get_softc(dev);
628 sc->get_cntxct = &get_cntxct;
630 /* Get the base clock frequency */
631 node = ofw_bus_get_node(dev);
633 error = OF_getencprop(node, "clock-frequency", &clock,
638 if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
639 sc->get_cntxct = &get_cntxct_a64_unstable;
642 "Enabling allwinner unstable timer workaround\n");
647 if (sc->clkfreq == 0) {
648 /* Try to get clock frequency from timer */
649 sc->clkfreq = get_freq();
652 if (sc->clkfreq == 0) {
653 device_printf(dev, "No clock frequency specified\n");
658 /* Confirm that non-optional irqs were allocated before coming in. */
659 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
662 irq_def = &arm_tmr_irq_defs[i];
664 /* Skip optional interrupts */
665 if ((irq_def->flags & RF_OPTIONAL) != 0)
668 for (j = 0; j < sc->irq_count; j++) {
669 if (sc->irqs[j].idx == irq_def->idx)
672 KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
673 __func__, irq_def->name));
679 * Use the virtual timer when we can't use the hypervisor.
680 * A hypervisor guest may change the virtual timer registers while
681 * executing so any use of the virtual timer interrupt needs to be
682 * coordinated with the virtual machine manager.
685 sc->physical = false;
686 first_timer = GT_VIRT;
687 last_timer = GT_VIRT;
690 /* Otherwise set up the secure and non-secure physical timers. */
693 first_timer = GT_PHYS_SECURE;
694 last_timer = GT_PHYS_NONSECURE;
699 /* Setup secure, non-secure and virtual IRQs handler */
700 for (i = 0; i < sc->irq_count; i++) {
701 /* Only enable IRQs on timers we expect to use */
702 if (sc->irqs[i].idx < first_timer ||
703 sc->irqs[i].idx > last_timer)
705 error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
706 arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
708 device_printf(dev, "Unable to alloc int resource.\n");
709 for (int j = 0; j < i; j++)
710 bus_teardown_intr(dev, sc->irqs[j].res,
716 /* Disable the timers until we are ready */
717 arm_tmr_disable(false);
719 arm_tmr_disable(true);
721 arm_tmr_timecount.tc_frequency = sc->clkfreq;
722 tc_init(&arm_tmr_timecount);
724 sc->et.et_name = "ARM MPCore Eventtimer";
725 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
726 sc->et.et_quality = 1000;
728 sc->et.et_frequency = sc->clkfreq;
729 sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
730 sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
731 sc->et.et_start = arm_tmr_start;
732 sc->et.et_stop = arm_tmr_stop;
734 et_register(&sc->et);
737 arm_set_delay(arm_tmr_do_delay, sc);
744 static device_method_t arm_tmr_fdt_methods[] = {
745 DEVMETHOD(device_probe, arm_tmr_fdt_probe),
746 DEVMETHOD(device_attach, arm_tmr_fdt_attach),
750 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
751 sizeof(struct arm_tmr_softc));
753 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
754 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
755 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
756 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
760 static device_method_t arm_tmr_acpi_methods[] = {
761 DEVMETHOD(device_identify, arm_tmr_acpi_identify),
762 DEVMETHOD(device_probe, arm_tmr_acpi_probe),
763 DEVMETHOD(device_attach, arm_tmr_acpi_attach),
767 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
768 sizeof(struct arm_tmr_softc));
770 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
771 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
775 arm_tmr_do_delay(int usec, void *arg)
777 struct arm_tmr_softc *sc = arg;
778 int32_t counts, counts_per_usec;
779 uint32_t first, last;
781 /* Get the number of times to count */
782 counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
785 * Clamp the timeout at a maximum value (about 32 seconds with
786 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
787 * near that length of time and if they are, they should be hung
790 if (usec >= (0x80000000U / counts_per_usec))
791 counts = (0x80000000U / counts_per_usec) - 1;
793 counts = usec * counts_per_usec;
795 first = sc->get_cntxct(sc->physical);
798 last = sc->get_cntxct(sc->physical);
799 counts -= (int32_t)(last - first);
804 #if defined(__aarch64__)
812 * Check the timers are setup, if not just
813 * use a for loop for the meantime
815 if (arm_tmr_sc == NULL) {
816 for (; usec > 0; usec--)
817 for (counts = 200; counts > 0; counts--)
819 * Prevent the compiler from optimizing
824 arm_tmr_do_delay(usec, arm_tmr_sc);
830 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
831 struct timecounter *tc)
834 vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
835 vdso_th->th_physical = arm_tmr_sc->physical;
836 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));