2 * Copyright (c) 2005 Poul-Henning Kamp
3 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #if defined(__amd64__)
37 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/module.h>
47 #include <sys/sysctl.h>
48 #include <sys/timeet.h>
49 #include <sys/timetc.h>
51 #include <contrib/dev/acpica/include/acpi.h>
52 #include <contrib/dev/acpica/include/accommon.h>
54 #include <dev/acpica/acpivar.h>
55 #include <dev/acpica/acpi_hpet.h>
61 #define HPET_VENDID_AMD 0x4353
62 #define HPET_VENDID_AMD2 0x1022
63 #define HPET_VENDID_INTEL 0x8086
64 #define HPET_VENDID_NVIDIA 0x10de
65 #define HPET_VENDID_SW 0x1166
67 ACPI_SERIAL_DECL(hpet, "ACPI HPET support");
69 static devclass_t hpet_devclass;
71 /* ACPI CA debugging */
72 #define _COMPONENT ACPI_TIMER
73 ACPI_MODULE_NAME("HPET")
83 uint32_t allowed_irqs;
84 struct resource *mem_res;
85 struct resource *intr_res;
91 struct timecounter tc;
94 struct hpet_softc *sc;
102 int pcpu_slaves[MAXCPU];
103 struct resource *intr_res;
114 int mmap_allow_write;
117 static d_open_t hpet_open;
118 static d_mmap_t hpet_mmap;
120 static struct cdevsw hpet_cdevsw = {
121 .d_version = D_VERSION,
127 static u_int hpet_get_timecount(struct timecounter *tc);
128 static void hpet_test(struct hpet_softc *sc);
130 static char *hpet_ids[] = { "PNP0103", NULL };
132 /* Knob to disable acpi_hpet device */
133 bool acpi_hpet_disabled = false;
136 hpet_get_timecount(struct timecounter *tc)
138 struct hpet_softc *sc;
141 return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER));
145 hpet_enable(struct hpet_softc *sc)
149 val = bus_read_4(sc->mem_res, HPET_CONFIG);
150 if (sc->legacy_route)
151 val |= HPET_CNF_LEG_RT;
153 val &= ~HPET_CNF_LEG_RT;
154 val |= HPET_CNF_ENABLE;
155 bus_write_4(sc->mem_res, HPET_CONFIG, val);
159 hpet_disable(struct hpet_softc *sc)
163 val = bus_read_4(sc->mem_res, HPET_CONFIG);
164 val &= ~HPET_CNF_ENABLE;
165 bus_write_4(sc->mem_res, HPET_CONFIG, val);
169 hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
171 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
172 struct hpet_timer *t;
173 struct hpet_softc *sc = mt->sc;
176 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
179 t->div = (sc->freq * period) >> 32;
185 fdiv = (sc->freq * first) >> 32;
189 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
190 t->caps |= HPET_TCNF_INT_ENB;
191 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
193 t->next = now + fdiv;
194 if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) {
195 t->caps |= HPET_TCNF_TYPE;
196 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
197 t->caps | HPET_TCNF_VAL_SET);
198 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
200 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
203 t->caps &= ~HPET_TCNF_TYPE;
204 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
206 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
209 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
210 if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) {
218 hpet_stop(struct eventtimer *et)
220 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
221 struct hpet_timer *t;
222 struct hpet_softc *sc = mt->sc;
224 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
226 t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE);
227 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
232 hpet_intr_single(void *arg)
234 struct hpet_timer *t = (struct hpet_timer *)arg;
235 struct hpet_timer *mt;
236 struct hpet_softc *sc = t->sc;
240 return (FILTER_STRAY);
241 /* Check that per-CPU timer interrupt reached right CPU. */
242 if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) {
243 if ((++t->pcpu_misrouted) % 32 == 0) {
244 printf("HPET interrupt routed to the wrong CPU"
245 " (timer %d CPU %d -> %d)!\n",
246 t->num, t->pcpu_cpu, curcpu);
250 * Reload timer, hoping that next time may be more lucky
251 * (system will manage proper interrupt binding).
253 if ((t->mode == 1 && (t->caps & HPET_TCAP_PER_INT) == 0) ||
255 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) +
257 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
260 return (FILTER_HANDLED);
263 (t->caps & HPET_TCAP_PER_INT) == 0) {
265 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
266 if ((int32_t)((now + t->div / 2) - t->next) > 0)
267 t->next = now + t->div / 2;
268 bus_write_4(sc->mem_res,
269 HPET_TIMER_COMPARATOR(t->num), t->next);
270 } else if (t->mode == 2)
272 mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master];
273 if (mt->et.et_active)
274 mt->et.et_event_cb(&mt->et, mt->et.et_arg);
275 return (FILTER_HANDLED);
281 struct hpet_softc *sc = (struct hpet_softc *)arg;
285 val = bus_read_4(sc->mem_res, HPET_ISR);
287 bus_write_4(sc->mem_res, HPET_ISR, val);
289 for (i = 0; i < sc->num_timers; i++) {
290 if ((val & (1 << i)) == 0)
292 hpet_intr_single(&sc->t[i]);
294 return (FILTER_HANDLED);
296 return (FILTER_STRAY);
300 hpet_get_uid(device_t dev)
302 struct hpet_softc *sc;
304 sc = device_get_softc(dev);
305 return (sc->acpi_uid);
309 hpet_find(ACPI_HANDLE handle, UINT32 level, void *context,
313 uint32_t id = (uint32_t)(uintptr_t)context;
316 for (ids = hpet_ids; *ids != NULL; ids++) {
317 if (acpi_MatchHid(handle, *ids))
322 if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) ||
324 *status = acpi_get_device(handle);
329 * Find an existing IRQ resource that matches the requested IRQ range
330 * and return its RID. If one is not found, use a new RID.
333 hpet_find_irq_rid(device_t dev, u_long start, u_long end)
338 for (rid = 0;; rid++) {
339 error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL);
340 if (error != 0 || (start <= irq && irq <= end))
346 hpet_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
348 struct hpet_softc *sc;
358 hpet_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
359 int nprot, vm_memattr_t *memattr)
361 struct hpet_softc *sc;
364 if (offset > rman_get_size(sc->mem_res))
366 if (!sc->mmap_allow_write && (nprot & PROT_WRITE))
368 *paddr = rman_get_start(sc->mem_res) + offset;
369 *memattr = VM_MEMATTR_UNCACHEABLE;
374 /* Discover the HPET via the ACPI table of the same name. */
376 hpet_identify(driver_t *driver, device_t parent)
378 ACPI_TABLE_HPET *hpet;
383 /* Only one HPET device can be added. */
384 if (devclass_get_device(hpet_devclass, 0))
387 /* Search for HPET table. */
388 status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet);
389 if (ACPI_FAILURE(status))
391 /* Search for HPET device with same ID. */
393 AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
394 100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence,
396 /* If found - let it be probed in normal way. */
398 if (bus_get_resource(child, SYS_RES_MEMORY, 0,
400 bus_set_resource(child, SYS_RES_MEMORY, 0,
401 hpet->Address.Address, HPET_MEM_WIDTH);
404 /* If not - create it from table info. */
405 child = BUS_ADD_CHILD(parent, 2, "hpet", 0);
407 printf("%s: can't add child\n", __func__);
410 bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address,
416 hpet_probe(device_t dev)
418 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
420 if (acpi_disabled("hpet") || acpi_hpet_disabled)
422 if (acpi_get_handle(dev) != NULL &&
423 ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids) == NULL)
426 device_set_desc(dev, "High Precision Event Timer");
431 hpet_attach(device_t dev)
433 struct hpet_softc *sc;
434 struct hpet_timer *t;
435 struct make_dev_args mda;
436 int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu;
437 int pcpu_master, error;
438 static int maxhpetet = 0;
439 uint32_t val, val2, cvectors, dvectors;
440 uint16_t vendor, rev;
442 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
444 sc = device_get_softc(dev);
446 sc->handle = acpi_get_handle(dev);
449 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
451 if (sc->mem_res == NULL)
454 /* Validate that we can access the whole region. */
455 if (rman_get_size(sc->mem_res) < HPET_MEM_WIDTH) {
456 device_printf(dev, "memory region width %ld too small\n",
457 rman_get_size(sc->mem_res));
458 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
462 /* Be sure timer is enabled. */
465 /* Read basic statistics about the timer. */
466 val = bus_read_4(sc->mem_res, HPET_PERIOD);
468 device_printf(dev, "invalid period\n");
470 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
474 sc->freq = (1000000000000000LL + val / 2) / val;
475 sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES);
476 vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16;
477 rev = sc->caps & HPET_CAP_REV_ID;
478 num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8);
480 * ATI/AMD violates IA-PC HPET (High Precision Event Timers)
481 * Specification and provides an off by one number
482 * of timers/comparators.
483 * Additionally, they use unregistered value in VENDOR_ID field.
485 if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0)
487 sc->num_timers = num_timers;
490 "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n",
491 vendor, rev, sc->freq,
492 (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "",
494 (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : "");
496 for (i = 0; i < num_timers; i++) {
504 t->pcpu_misrouted = 0;
506 t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i));
507 t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4);
510 " t%d: irqs 0x%08x (%d)%s%s%s\n", i,
511 t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9,
512 (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "",
513 (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "",
514 (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : "");
517 if (testenv("debug.acpi.hpet_test"))
520 * Don't attach if the timer never increments. Since the spec
521 * requires it to be at least 10 MHz, it has to change in 1 us.
523 val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
525 val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
527 device_printf(dev, "HPET never increments, disabling\n");
529 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
532 /* Announce first HPET as timecounter. */
533 if (device_get_unit(dev) == 0) {
534 sc->tc.tc_get_timecount = hpet_get_timecount,
535 sc->tc.tc_counter_mask = ~0u,
536 sc->tc.tc_name = "HPET",
537 sc->tc.tc_quality = 950,
538 sc->tc.tc_frequency = sc->freq;
542 /* If not disabled - setup and announce event timers. */
543 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
544 "clock", &i) == 0 && i == 0)
547 /* Check whether we can and want legacy routing. */
548 sc->legacy_route = 0;
549 resource_int_value(device_get_name(dev), device_get_unit(dev),
550 "legacy_route", &sc->legacy_route);
551 if ((sc->caps & HPET_CAP_LEG_RT) == 0)
552 sc->legacy_route = 0;
553 if (sc->legacy_route) {
554 sc->t[0].vectors = 0;
555 sc->t[1].vectors = 0;
558 /* Check what IRQs we want use. */
559 /* By default allow any PCI IRQs. */
560 sc->allowed_irqs = 0xffff0000;
562 * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16
563 * Lower are also not always working for different reasons.
564 * SB800 fixed it, but seems do not implements level triggering
565 * properly, that makes it very unreliable - it freezes after any
566 * interrupt loss. Avoid legacy IRQs for AMD.
568 if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2)
569 sc->allowed_irqs = 0x00000000;
571 * NVidia MCP5x chipsets have number of unexplained interrupt
572 * problems. For some reason, using HPET interrupts breaks HDA sound.
574 if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01)
575 sc->allowed_irqs = 0x00000000;
577 * ServerWorks HT1000 reported to have problems with IRQs >= 16.
578 * Lower IRQs are working, but allowed mask is not set correctly.
579 * Legacy_route mode works fine.
581 if (vendor == HPET_VENDID_SW && rev <= 0x01)
582 sc->allowed_irqs = 0x00000000;
584 * Neither QEMU nor VirtualBox report supported IRQs correctly.
585 * The only way to use HPET there is to specify IRQs manually
586 * and/or use legacy_route. Legacy_route mode works on both.
589 sc->allowed_irqs = 0x00000000;
590 /* Let user override. */
591 resource_int_value(device_get_name(dev), device_get_unit(dev),
592 "allowed_irqs", &sc->allowed_irqs);
594 /* Get how much per-CPU timers we should try to provide. */
596 resource_int_value(device_get_name(dev), device_get_unit(dev),
597 "per_cpu", &sc->per_cpu);
601 /* Find IRQ vectors for all timers. */
602 cvectors = sc->allowed_irqs & 0xffff0000;
603 dvectors = sc->allowed_irqs & 0x0000ffff;
604 if (sc->legacy_route)
605 dvectors &= 0x0000fefe;
606 for (i = 0; i < num_timers; i++) {
608 if (sc->legacy_route && i < 2)
609 t->irq = (i == 0) ? 0 : 8;
611 else if (t->caps & HPET_TCAP_FSB_INT_DEL) {
612 if ((j = PCIB_ALLOC_MSIX(
613 device_get_parent(device_get_parent(dev)), dev,
616 "Can't allocate interrupt for t%d: %d\n",
621 else if (dvectors & t->vectors) {
622 t->irq = ffs(dvectors & t->vectors) - 1;
623 dvectors &= ~(1 << t->irq);
626 t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq);
627 t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
628 &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE);
629 if (t->intr_res == NULL) {
632 "Can't map interrupt for t%d.\n", i);
633 } else if (bus_setup_intr(dev, t->intr_res,
634 INTR_TYPE_CLK, hpet_intr_single, NULL, t,
635 &t->intr_handle) != 0) {
638 "Can't setup interrupt for t%d.\n", i);
640 bus_describe_intr(dev, t->intr_res,
641 t->intr_handle, "t%d", i);
645 if (t->irq < 0 && (cvectors & t->vectors) != 0) {
646 cvectors &= t->vectors;
647 sc->useirq |= (1 << i);
650 if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0)
651 sc->legacy_route = 0;
652 if (sc->legacy_route)
654 /* Group timers for per-CPU operation. */
655 num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu);
656 num_percpu_t = num_percpu_et * mp_ncpus;
658 cur_cpu = CPU_FIRST();
659 for (i = 0; i < num_timers; i++) {
661 if (t->irq >= 0 && num_percpu_t > 0) {
662 if (cur_cpu == CPU_FIRST())
664 t->pcpu_cpu = cur_cpu;
665 t->pcpu_master = pcpu_master;
667 pcpu_slaves[cur_cpu] = i;
668 bus_bind_intr(dev, t->intr_res, cur_cpu);
669 cur_cpu = CPU_NEXT(cur_cpu);
671 } else if (t->irq >= 0)
672 bus_bind_intr(dev, t->intr_res, CPU_FIRST());
674 bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff);
676 /* If at least one timer needs legacy IRQ - set it up. */
678 j = i = fls(cvectors) - 1;
679 while (j > 0 && (cvectors & (1 << (j - 1))) != 0)
681 sc->intr_rid = hpet_find_irq_rid(dev, j, i);
682 sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
683 &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE);
684 if (sc->intr_res == NULL)
685 device_printf(dev, "Can't map interrupt.\n");
686 else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK,
687 hpet_intr, NULL, sc, &sc->intr_handle) != 0) {
688 device_printf(dev, "Can't setup interrupt.\n");
690 sc->irq = rman_get_start(sc->intr_res);
691 /* Bind IRQ to BSP to avoid live migration. */
692 bus_bind_intr(dev, sc->intr_res, CPU_FIRST());
695 /* Program and announce event timers. */
696 for (i = 0; i < num_timers; i++) {
698 t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE);
699 t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB);
700 t->caps &= ~(HPET_TCNF_INT_TYPE);
701 t->caps |= HPET_TCNF_32MODE;
702 if (t->irq >= 0 && sc->legacy_route && i < 2) {
703 /* Legacy route doesn't need more configuration. */
706 if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) {
711 device_get_parent(device_get_parent(dev)), dev,
712 t->irq, &addr, &data) == 0) {
713 bus_write_4(sc->mem_res,
714 HPET_TIMER_FSB_ADDR(i), addr);
715 bus_write_4(sc->mem_res,
716 HPET_TIMER_FSB_VAL(i), data);
717 t->caps |= HPET_TCNF_FSB_EN;
723 t->caps |= (t->irq << 9);
724 else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq)))
725 t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE;
726 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps);
727 /* Skip event timers without set up IRQ. */
729 (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0))
731 /* Announce the reset. */
733 t->et.et_name = "HPET";
735 sprintf(t->name, "HPET%d", maxhpetet);
736 t->et.et_name = t->name;
738 t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT;
739 t->et.et_quality = 450;
740 if (t->pcpu_master >= 0) {
741 t->et.et_flags |= ET_FLAGS_PERCPU;
742 t->et.et_quality += 100;
743 } else if (mp_ncpus >= 8)
744 t->et.et_quality -= 100;
745 if ((t->caps & HPET_TCAP_PER_INT) == 0)
746 t->et.et_quality -= 10;
747 t->et.et_frequency = sc->freq;
748 t->et.et_min_period =
749 ((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq;
750 t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq;
751 t->et.et_start = hpet_start;
752 t->et.et_stop = hpet_stop;
753 t->et.et_priv = &sc->t[i];
754 if (t->pcpu_master < 0 || t->pcpu_master == i) {
759 acpi_GetInteger(sc->handle, "_UID", &sc->acpi_uid);
761 make_dev_args_init(&mda);
762 mda.mda_devsw = &hpet_cdevsw;
763 mda.mda_uid = UID_ROOT;
764 mda.mda_gid = GID_WHEEL;
766 mda.mda_si_drv1 = sc;
767 error = make_dev_s(&mda, &sc->pdev, "hpet%d", device_get_unit(dev));
770 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow",
772 sc->mmap_allow_write = 1;
773 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow_write",
774 &sc->mmap_allow_write);
775 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
776 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
777 OID_AUTO, "mmap_allow",
778 CTLFLAG_RW, &sc->mmap_allow, 0,
779 "Allow userland to memory map HPET");
780 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
781 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
782 OID_AUTO, "mmap_allow_write",
783 CTLFLAG_RW, &sc->mmap_allow_write, 0,
784 "Allow userland write to the HPET register space");
786 device_printf(dev, "could not create /dev/hpet%d, error %d\n",
787 device_get_unit(dev), error);
794 hpet_detach(device_t dev)
796 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
798 /* XXX Without a tc_remove() function, we can't detach. */
803 hpet_suspend(device_t dev)
805 // struct hpet_softc *sc;
808 * Disable the timer during suspend. The timer will not lose
809 * its state in S1 or S2, but we are required to disable
812 // sc = device_get_softc(dev);
819 hpet_resume(device_t dev)
821 struct hpet_softc *sc;
822 struct hpet_timer *t;
825 /* Re-enable the timer after a resume to keep the clock advancing. */
826 sc = device_get_softc(dev);
828 /* Restart event timers that were running on suspend. */
829 for (i = 0; i < sc->num_timers; i++) {
832 if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) {
837 device_get_parent(device_get_parent(dev)), dev,
838 t->irq, &addr, &data) == 0) {
839 bus_write_4(sc->mem_res,
840 HPET_TIMER_FSB_ADDR(i), addr);
841 bus_write_4(sc->mem_res,
842 HPET_TIMER_FSB_VAL(i), data);
848 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
849 if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) {
850 t->caps |= HPET_TCNF_TYPE;
852 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
853 t->caps | HPET_TCNF_VAL_SET);
854 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
856 bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num));
857 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
860 t->next += sc->freq / 1024;
861 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
864 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
865 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
870 /* Print some basic latency/rate information to assist in debugging. */
872 hpet_test(struct hpet_softc *sc)
876 struct bintime b0, b1, b2;
882 u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
883 for (i = 1; i < 1000; i++)
884 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
886 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
888 bintime_sub(&b2, &b1);
889 bintime_sub(&b1, &b0);
890 bintime_sub(&b2, &b1);
891 bintime2timespec(&b2, &ts);
893 device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n",
894 (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1);
896 device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000);
901 hpet_remap_intr(device_t dev, device_t child, u_int irq)
903 struct hpet_softc *sc = device_get_softc(dev);
904 struct hpet_timer *t;
909 for (i = 0; i < sc->num_timers; i++) {
913 error = PCIB_MAP_MSI(
914 device_get_parent(device_get_parent(dev)), dev,
918 hpet_disable(sc); /* Stop timer to avoid interrupt loss. */
919 bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr);
920 bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data);
928 static device_method_t hpet_methods[] = {
929 /* Device interface */
930 DEVMETHOD(device_identify, hpet_identify),
931 DEVMETHOD(device_probe, hpet_probe),
932 DEVMETHOD(device_attach, hpet_attach),
933 DEVMETHOD(device_detach, hpet_detach),
934 DEVMETHOD(device_suspend, hpet_suspend),
935 DEVMETHOD(device_resume, hpet_resume),
938 DEVMETHOD(bus_remap_intr, hpet_remap_intr),
944 static driver_t hpet_driver = {
947 sizeof(struct hpet_softc),
950 DRIVER_MODULE(hpet, acpi, hpet_driver, hpet_devclass, 0, 0);
951 MODULE_DEPEND(hpet, acpi, 1, 1, 1);