2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
39 #include <sys/power.h>
41 #include <sys/sched.h>
45 #include <dev/pci/pcivar.h>
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #if defined(__amd64__) || defined(__i386__)
49 #include <machine/clock.h>
50 #include <machine/specialreg.h>
51 #include <machine/md_var.h>
55 #include <contrib/dev/acpica/include/acpi.h>
56 #include <contrib/dev/acpica/include/accommon.h>
58 #include <dev/acpica/acpivar.h>
61 * Support for ACPI Processor devices, including C[1-3] sleep states.
64 /* Hooks for the ACPI CA debugging infrastructure */
65 #define _COMPONENT ACPI_PROCESSOR
66 ACPI_MODULE_NAME("PROCESSOR")
69 struct resource *p_lvlx; /* Register to read to enter state. */
70 uint32_t type; /* C1-3 (C4 and up treated as C3). */
71 uint32_t trans_lat; /* Transition latency (usec). */
72 uint32_t power; /* Power consumed (mW). */
73 int res_type; /* Resource type for p_lvlx. */
74 int res_rid; /* Resource ID for p_lvlx. */
78 bool mwait_bm_avoidance;
80 #define MAX_CX_STATES 8
82 struct acpi_cpu_softc {
84 ACPI_HANDLE cpu_handle;
85 struct pcpu *cpu_pcpu;
86 uint32_t cpu_acpi_id; /* ACPI processor id */
87 uint32_t cpu_p_blk; /* ACPI P_BLK location */
88 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
89 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
90 int cpu_cx_count; /* Number of valid Cx states. */
91 int cpu_prev_sleep;/* Last idle sleep duration. */
92 int cpu_features; /* Child driver supported features. */
94 int cpu_non_c2; /* Index of lowest non-C2 state. */
95 int cpu_non_c3; /* Index of lowest non-C3 state. */
96 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
97 /* Values for sysctl. */
98 struct sysctl_ctx_list cpu_sysctl_ctx;
99 struct sysctl_oid *cpu_sysctl_tree;
101 int cpu_cx_lowest_lim;
102 int cpu_disable_idle; /* Disable entry to idle function */
103 char cpu_cx_supported[64];
106 struct acpi_cpu_device {
107 struct resource_list ad_rl;
110 #define CPU_GET_REG(reg, width) \
111 (bus_space_read_ ## width(rman_get_bustag((reg)), \
112 rman_get_bushandle((reg)), 0))
113 #define CPU_SET_REG(reg, width, val) \
114 (bus_space_write_ ## width(rman_get_bustag((reg)), \
115 rman_get_bushandle((reg)), 0, (val)))
117 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
119 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
121 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
122 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
124 #define PCI_VENDOR_INTEL 0x8086
125 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
126 #define PCI_REVISION_A_STEP 0
127 #define PCI_REVISION_B_STEP 1
128 #define PCI_REVISION_4E 2
129 #define PCI_REVISION_4M 3
130 #define PIIX4_DEVACTB_REG 0x58
131 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
132 #define PIIX4_BRLD_EN_IRQ (1<<1)
133 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
134 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
135 #define PIIX4_PCNTRL_BST_EN (1<<10)
137 #define CST_FFH_VENDOR_INTEL 1
138 #define CST_FFH_INTEL_CL_C1IO 1
139 #define CST_FFH_INTEL_CL_MWAIT 2
140 #define CST_FFH_MWAIT_HW_COORD 0x0001
141 #define CST_FFH_MWAIT_BM_AVOID 0x0002
143 /* Allow users to ignore processor orders in MADT. */
144 static int cpu_unordered;
145 SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
147 "Do not use the MADT to match ACPI Processor objects to CPUs.");
149 /* Knob to disable acpi_cpu devices */
150 bool acpi_cpu_disabled = false;
152 /* Platform hardware resource information. */
153 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
154 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
155 static int cpu_quirks; /* Indicate any hardware bugs. */
157 /* Values for sysctl. */
158 static struct sysctl_ctx_list cpu_sysctl_ctx;
159 static struct sysctl_oid *cpu_sysctl_tree;
160 static int cpu_cx_generic;
161 static int cpu_cx_lowest_lim;
163 static device_t *cpu_devices;
164 static int cpu_ndevices;
165 static struct acpi_cpu_softc **cpu_softc;
166 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
168 static int acpi_cpu_probe(device_t dev);
169 static int acpi_cpu_attach(device_t dev);
170 static int acpi_cpu_suspend(device_t dev);
171 static int acpi_cpu_resume(device_t dev);
172 static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id,
174 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
175 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
177 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
179 static int acpi_cpu_shutdown(device_t dev);
180 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
181 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
182 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
183 static void acpi_cpu_startup(void *arg);
184 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
185 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
186 #if defined(__i386__) || defined(__amd64__)
187 static void acpi_cpu_idle(sbintime_t sbt);
189 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
190 static void acpi_cpu_quirks(void);
191 static void acpi_cpu_quirks_piix4(void);
192 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
193 static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
194 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
195 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
196 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
197 #if defined(__i386__) || defined(__amd64__)
198 static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
201 static device_method_t acpi_cpu_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_probe, acpi_cpu_probe),
204 DEVMETHOD(device_attach, acpi_cpu_attach),
205 DEVMETHOD(device_detach, bus_generic_detach),
206 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
207 DEVMETHOD(device_suspend, acpi_cpu_suspend),
208 DEVMETHOD(device_resume, acpi_cpu_resume),
211 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
212 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
213 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
214 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
215 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
216 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
217 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
218 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
219 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
220 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
221 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
226 static driver_t acpi_cpu_driver = {
229 sizeof(struct acpi_cpu_softc),
232 static devclass_t acpi_cpu_devclass;
233 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
234 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
237 acpi_cpu_probe(device_t dev)
245 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR ||
249 handle = acpi_get_handle(dev);
250 if (cpu_softc == NULL)
251 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
252 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
254 /* Get our Processor object. */
256 buf.Length = ACPI_ALLOCATE_BUFFER;
257 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
258 if (ACPI_FAILURE(status)) {
259 device_printf(dev, "probe failed to get Processor obj - %s\n",
260 AcpiFormatException(status));
263 obj = (ACPI_OBJECT *)buf.Pointer;
264 if (obj->Type != ACPI_TYPE_PROCESSOR) {
265 device_printf(dev, "Processor object has bad type %d\n", obj->Type);
271 * Find the processor associated with our unit. We could use the
272 * ProcId as a key, however, some boxes do not have the same values
273 * in their Processor object as the ProcId values in the MADT.
275 acpi_id = obj->Processor.ProcId;
277 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0)
281 * Check if we already probed this processor. We scan the bus twice
282 * so it's possible we've already seen this one.
284 if (cpu_softc[cpu_id] != NULL)
287 /* Mark this processor as in-use and save our derived id for attach. */
288 cpu_softc[cpu_id] = (void *)1;
289 acpi_set_private(dev, (void*)(intptr_t)cpu_id);
290 device_set_desc(dev, "ACPI CPU");
296 acpi_cpu_attach(device_t dev)
299 ACPI_OBJECT arg[1], *obj;
300 ACPI_OBJECT_LIST arglist;
301 struct pcpu *pcpu_data;
302 struct acpi_cpu_softc *sc;
303 struct acpi_softc *acpi_sc;
306 int cpu_id, drv_count, i;
310 /* UUID needed by _OSC evaluation */
311 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
312 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
313 0x58, 0x71, 0x39, 0x53 };
315 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
317 sc = device_get_softc(dev);
319 sc->cpu_handle = acpi_get_handle(dev);
320 cpu_id = (int)(intptr_t)acpi_get_private(dev);
321 cpu_softc[cpu_id] = sc;
322 pcpu_data = pcpu_find(cpu_id);
323 pcpu_data->pc_device = dev;
324 sc->cpu_pcpu = pcpu_data;
325 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
326 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
329 buf.Length = ACPI_ALLOCATE_BUFFER;
330 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
331 if (ACPI_FAILURE(status)) {
332 device_printf(dev, "attach failed to get Processor obj - %s\n",
333 AcpiFormatException(status));
336 obj = (ACPI_OBJECT *)buf.Pointer;
337 sc->cpu_p_blk = obj->Processor.PblkAddress;
338 sc->cpu_p_blk_len = obj->Processor.PblkLength;
339 sc->cpu_acpi_id = obj->Processor.ProcId;
341 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
342 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
345 * If this is the first cpu we attach, create and initialize the generic
346 * resources that will be used by all acpi cpu devices.
348 if (device_get_unit(dev) == 0) {
349 /* Assume we won't be using generic Cx mode by default */
350 cpu_cx_generic = FALSE;
352 /* Install hw.acpi.cpu sysctl tree */
353 acpi_sc = acpi_device_get_parent_softc(dev);
354 sysctl_ctx_init(&cpu_sysctl_ctx);
355 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
356 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
357 CTLFLAG_RD, 0, "node for CPU children");
359 /* Queue post cpu-probing task handler */
360 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
364 * Before calling any CPU methods, collect child driver feature hints
365 * and notify ACPI of them. We support unified SMP power control
366 * so advertise this ourselves. Note this is not the same as independent
367 * SMP control where each CPU can have different settings.
369 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
372 #if defined(__i386__) || defined(__amd64__)
374 * Ask for MWAIT modes if not disabled and interrupts work
375 * reasonable with MWAIT.
377 if (!acpi_disabled("mwait") && cpu_mwait_usable())
378 sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
381 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
382 for (i = 0; i < drv_count; i++) {
383 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
384 sc->cpu_features |= features;
386 free(drivers, M_TEMP);
390 * CPU capabilities are specified in
391 * Intel Processor Vendor-Specific ACPI Interface Specification.
393 if (sc->cpu_features) {
395 cap_set[1] = sc->cpu_features;
396 status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set);
397 if (ACPI_SUCCESS(status)) {
399 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
402 arglist.Pointer = arg;
404 arg[0].Type = ACPI_TYPE_BUFFER;
405 arg[0].Buffer.Length = sizeof(cap_set);
406 arg[0].Buffer.Pointer = (uint8_t *)cap_set;
407 cap_set[0] = 1; /* revision */
408 cap_set[1] = 1; /* number of capabilities integers */
409 cap_set[2] = sc->cpu_features;
410 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
414 /* Probe for Cx state support. */
415 acpi_cpu_cx_probe(sc);
421 acpi_cpu_postattach(void *unused __unused)
427 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n);
429 printf("devclass_get_devices(acpi_cpu_devclass) failed\n");
432 for (i = 0; i < n; i++)
433 bus_generic_probe(devices[i]);
434 for (i = 0; i < n; i++)
435 bus_generic_attach(devices[i]);
436 free(devices, M_TEMP);
439 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
440 acpi_cpu_postattach, NULL);
443 disable_idle(struct acpi_cpu_softc *sc)
447 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
448 sc->cpu_disable_idle = TRUE;
451 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
452 * Note that this code depends on the fact that the rendezvous IPI
453 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
454 * is called and executed in such a context with interrupts being re-enabled
455 * right before return.
457 smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL,
458 smp_no_rendevous_barrier, NULL);
462 enable_idle(struct acpi_cpu_softc *sc)
465 sc->cpu_disable_idle = FALSE;
468 #if defined(__i386__) || defined(__amd64__)
470 is_idle_disabled(struct acpi_cpu_softc *sc)
473 return (sc->cpu_disable_idle);
478 * Disable any entry to the idle function during suspend and re-enable it
482 acpi_cpu_suspend(device_t dev)
486 error = bus_generic_suspend(dev);
489 disable_idle(device_get_softc(dev));
494 acpi_cpu_resume(device_t dev)
497 enable_idle(device_get_softc(dev));
498 return (bus_generic_resume(dev));
502 * Find the processor associated with a given ACPI ID. By default,
503 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a
504 * processor. Some systems have inconsistent ASL and MADT however.
505 * For these systems the cpu_unordered tunable can be set in which
506 * case we assume that Processor objects are listed in the same order
507 * in both the MADT and ASL.
510 acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id)
515 KASSERT(acpi_id != NULL, ("Null acpi_id"));
516 KASSERT(cpu_id != NULL, ("Null cpu_id"));
517 idx = device_get_unit(dev);
520 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
521 * UP box) use the ACPI ID from the first processor we find.
523 if (idx == 0 && mp_ncpus == 1) {
525 if (pc->pc_acpi_id == 0xffffffff)
526 pc->pc_acpi_id = *acpi_id;
533 KASSERT(pc != NULL, ("no pcpu data for %d", i));
537 * If pc_acpi_id doesn't match the ACPI ID from the
538 * ASL, prefer the MADT-derived value.
540 if (pc->pc_acpi_id != *acpi_id)
541 *acpi_id = pc->pc_acpi_id;
542 *cpu_id = pc->pc_cpuid;
546 if (pc->pc_acpi_id == *acpi_id) {
549 "Processor %s (ACPI ID %u) -> APIC ID %d\n",
550 acpi_name(acpi_get_handle(dev)), *acpi_id,
552 *cpu_id = pc->pc_cpuid;
559 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
560 acpi_name(acpi_get_handle(dev)), *acpi_id);
565 static struct resource_list *
566 acpi_cpu_get_rlist(device_t dev, device_t child)
568 struct acpi_cpu_device *ad;
570 ad = device_get_ivars(child);
577 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
579 struct acpi_cpu_device *ad;
582 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
585 resource_list_init(&ad->ad_rl);
587 child = device_add_child_ordered(dev, order, name, unit);
589 device_set_ivars(child, ad);
596 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
598 struct acpi_cpu_softc *sc;
600 sc = device_get_softc(dev);
602 case ACPI_IVAR_HANDLE:
603 *result = (uintptr_t)sc->cpu_handle;
606 *result = (uintptr_t)sc->cpu_pcpu;
608 #if defined(__amd64__) || defined(__i386__)
609 case CPU_IVAR_NOMINAL_MHZ:
610 if (tsc_is_invariant) {
611 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
623 acpi_cpu_shutdown(device_t dev)
625 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
627 /* Allow children to shutdown first. */
628 bus_generic_shutdown(dev);
631 * Disable any entry to the idle function.
633 disable_idle(device_get_softc(dev));
636 * CPU devices are not truely detached and remain referenced,
637 * so their resources are not freed.
644 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
646 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
648 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
649 sc->cpu_prev_sleep = 1000000;
650 sc->cpu_cx_lowest = 0;
651 sc->cpu_cx_lowest_lim = 0;
654 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
655 * any, we'll revert to generic FADT/P_BLK Cx control method which will
656 * be handled by acpi_cpu_startup. We need to defer to after having
657 * probed all the cpus in the system before probing for generic Cx
658 * states as we may already have found cpus with valid _CST packages
660 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
662 * We were unable to find a _CST package for this cpu or there
663 * was an error parsing it. Switch back to generic mode.
665 cpu_cx_generic = TRUE;
667 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
671 * TODO: _CSD Package should be checked here.
676 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
678 ACPI_GENERIC_ADDRESS gas;
679 struct acpi_cx *cx_ptr;
681 sc->cpu_cx_count = 0;
682 cx_ptr = sc->cpu_cx_states;
684 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
685 sc->cpu_prev_sleep = 1000000;
687 /* C1 has been required since just after ACPI 1.0 */
688 cx_ptr->type = ACPI_STATE_C1;
689 cx_ptr->trans_lat = 0;
691 sc->cpu_non_c2 = sc->cpu_cx_count;
692 sc->cpu_non_c3 = sc->cpu_cx_count;
694 cpu_deepest_sleep = 1;
697 * The spec says P_BLK must be 6 bytes long. However, some systems
698 * use it to indicate a fractional set of features present so we
699 * take 5 as C2. Some may also have a value of 7 to indicate
700 * another C3 but most use _CST for this (as required) and having
701 * "only" C1-C3 is not a hardship.
703 if (sc->cpu_p_blk_len < 5)
706 /* Validate and allocate resources for C2 (P_LVL2). */
707 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
709 if (AcpiGbl_FADT.C2Latency <= 100) {
710 gas.Address = sc->cpu_p_blk + 4;
712 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
713 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
714 if (cx_ptr->p_lvlx != NULL) {
715 cx_ptr->type = ACPI_STATE_C2;
716 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
718 sc->cpu_non_c3 = sc->cpu_cx_count;
720 cpu_deepest_sleep = 2;
723 if (sc->cpu_p_blk_len < 6)
726 /* Validate and allocate resources for C3 (P_LVL3). */
727 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
728 gas.Address = sc->cpu_p_blk + 5;
730 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
731 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
732 if (cx_ptr->p_lvlx != NULL) {
733 cx_ptr->type = ACPI_STATE_C3;
734 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
737 cpu_deepest_sleep = 3;
742 #if defined(__i386__) || defined(__amd64__)
744 acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
747 cx_ptr->do_mwait = true;
748 cx_ptr->mwait_hint = address & 0xffffffff;
749 cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
750 cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
755 acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
758 if (cx_ptr->p_lvlx == NULL)
760 bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
762 cx_ptr->p_lvlx = NULL;
766 * Parse a _CST package and set up its Cx states. Since the _CST object
767 * can change dynamically, our notify handler may call this function
768 * to clean up and probe the new _CST package.
771 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
773 struct acpi_cx *cx_ptr;
780 #if defined(__i386__) || defined(__amd64__)
782 int vendor, class, accsize;
785 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
788 buf.Length = ACPI_ALLOCATE_BUFFER;
789 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
790 if (ACPI_FAILURE(status))
793 /* _CST is a package with a count and at least one Cx package. */
794 top = (ACPI_OBJECT *)buf.Pointer;
795 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
796 device_printf(sc->cpu_dev, "invalid _CST package\n");
797 AcpiOsFree(buf.Pointer);
800 if (count != top->Package.Count - 1) {
801 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
802 count, top->Package.Count - 1);
803 count = top->Package.Count - 1;
805 if (count > MAX_CX_STATES) {
806 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
807 count = MAX_CX_STATES;
812 sc->cpu_cx_count = 0;
813 cx_ptr = sc->cpu_cx_states;
816 * C1 has been required since just after ACPI 1.0.
817 * Reserve the first slot for it.
819 cx_ptr->type = ACPI_STATE_C0;
822 cpu_deepest_sleep = 1;
824 /* Set up all valid states. */
825 for (i = 0; i < count; i++) {
826 pkg = &top->Package.Elements[i + 1];
827 if (!ACPI_PKG_VALID(pkg, 4) ||
828 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
829 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
830 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
832 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
836 /* Validate the state to see if we should use it. */
837 switch (cx_ptr->type) {
839 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
840 #if defined(__i386__) || defined(__amd64__)
841 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
842 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
843 if (class == CST_FFH_INTEL_CL_C1IO) {
844 /* C1 I/O then Halt */
845 cx_ptr->res_rid = sc->cpu_cx_count;
846 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
847 cx_ptr->res_rid, address, 1);
848 cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
849 SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
851 if (cx_ptr->p_lvlx == NULL) {
852 bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
854 device_printf(sc->cpu_dev,
855 "C1 I/O failed to allocate port %d, "
856 "degrading to C1 Halt", (int)address);
858 } else if (class == CST_FFH_INTEL_CL_MWAIT) {
859 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
863 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
864 /* This is the first C1 state. Use the reserved slot. */
865 sc->cpu_cx_states[0] = *cx_ptr;
867 sc->cpu_non_c2 = sc->cpu_cx_count;
868 sc->cpu_non_c3 = sc->cpu_cx_count;
874 sc->cpu_non_c3 = sc->cpu_cx_count;
875 if (cpu_deepest_sleep < 2)
876 cpu_deepest_sleep = 2;
880 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
881 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
882 "acpi_cpu%d: C3[%d] not available.\n",
883 device_get_unit(sc->cpu_dev), i));
886 cpu_deepest_sleep = 3;
890 /* Free up any previous register. */
891 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
893 /* Allocate the control register for C2 or C3. */
894 #if defined(__i386__) || defined(__amd64__)
895 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
896 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
897 class == CST_FFH_INTEL_CL_MWAIT) {
898 /* Native C State Instruction use (mwait) */
899 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
900 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
901 "acpi_cpu%d: Got C%d/mwait - %d latency\n",
902 device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
908 cx_ptr->res_rid = sc->cpu_cx_count;
909 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
910 &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
911 if (cx_ptr->p_lvlx) {
912 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
913 "acpi_cpu%d: Got C%d - %d latency\n",
914 device_get_unit(sc->cpu_dev), cx_ptr->type,
921 AcpiOsFree(buf.Pointer);
923 /* If C1 state was not found, we need one now. */
924 cx_ptr = sc->cpu_cx_states;
925 if (cx_ptr->type == ACPI_STATE_C0) {
926 cx_ptr->type = ACPI_STATE_C1;
927 cx_ptr->trans_lat = 0;
934 * Call this *after* all CPUs have been attached.
937 acpi_cpu_startup(void *arg)
939 struct acpi_cpu_softc *sc;
942 /* Get set of CPU devices */
943 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
946 * Setup any quirks that might necessary now that we have probed
951 if (cpu_cx_generic) {
953 * We are using generic Cx mode, probe for available Cx states
954 * for all processors.
956 for (i = 0; i < cpu_ndevices; i++) {
957 sc = device_get_softc(cpu_devices[i]);
958 acpi_cpu_generic_cx_probe(sc);
962 * We are using _CST mode, remove C3 state if necessary.
963 * As we now know for sure that we will be using _CST mode
964 * install our notify handler.
966 for (i = 0; i < cpu_ndevices; i++) {
967 sc = device_get_softc(cpu_devices[i]);
968 if (cpu_quirks & CPU_QUIRK_NO_C3) {
969 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
971 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
972 acpi_cpu_notify, sc);
976 /* Perform Cx final initialization. */
977 for (i = 0; i < cpu_ndevices; i++) {
978 sc = device_get_softc(cpu_devices[i]);
979 acpi_cpu_startup_cx(sc);
982 /* Add a sysctl handler to handle global Cx lowest setting */
983 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
984 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
985 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
986 "Global lowest Cx sleep state to use");
988 /* Take over idling from cpu_idle_default(). */
989 cpu_cx_lowest_lim = 0;
990 for (i = 0; i < cpu_ndevices; i++) {
991 sc = device_get_softc(cpu_devices[i]);
994 #if defined(__i386__) || defined(__amd64__)
995 cpu_idle_hook = acpi_cpu_idle;
1000 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
1006 * Set up the list of Cx states
1008 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
1010 for (i = 0; i < sc->cpu_cx_count; i++)
1011 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
1012 sc->cpu_cx_states[i].trans_lat);
1018 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
1020 acpi_cpu_cx_list(sc);
1022 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
1023 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1024 OID_AUTO, "cx_supported", CTLFLAG_RD,
1025 sc->cpu_cx_supported, 0,
1026 "Cx/microsecond values for supported Cx states");
1027 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1028 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1029 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
1030 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
1031 "lowest Cx sleep state to use");
1032 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1033 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1034 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
1035 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
1036 "percent usage for each Cx state");
1037 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1038 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1039 OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD,
1040 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
1041 "Cx sleep state counters");
1042 #if defined(__i386__) || defined(__amd64__)
1043 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1044 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1045 OID_AUTO, "cx_method", CTLTYPE_STRING | CTLFLAG_RD,
1046 (void *)sc, 0, acpi_cpu_method_sysctl, "A",
1047 "Cx entrance methods");
1050 /* Signal platform that we can handle _CST notification. */
1051 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
1053 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
1058 #if defined(__i386__) || defined(__amd64__)
1060 * Idle the CPU in the lowest state possible. This function is called with
1061 * interrupts disabled. Note that once it re-enables interrupts, a task
1062 * switch can occur so do not access shared data (i.e. the softc) after
1063 * interrupts are re-enabled.
1066 acpi_cpu_idle(sbintime_t sbt)
1068 struct acpi_cpu_softc *sc;
1069 struct acpi_cx *cx_next;
1071 uint32_t start_time, end_time;
1073 int bm_active, cx_next_idx, i, us;
1076 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
1077 * since there is no ACPI processor object for this CPU. This occurs
1078 * for logical CPUs in the HTT case.
1080 sc = cpu_softc[PCPU_GET(cpuid)];
1086 /* If disabled, take the safe path. */
1087 if (is_idle_disabled(sc)) {
1092 /* Find the lowest state that has small enough latency. */
1093 us = sc->cpu_prev_sleep;
1094 if (sbt >= 0 && us > (sbt >> 12))
1097 if (cpu_disable_c2_sleep)
1098 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
1099 else if (cpu_disable_c3_sleep)
1100 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1102 i = sc->cpu_cx_lowest;
1103 for (; i >= 0; i--) {
1104 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1111 * Check for bus master activity. If there was activity, clear
1112 * the bit and use the lowest non-C3 state. Note that the USB
1113 * driver polling for new devices keeps this bit set all the
1114 * time if USB is loaded.
1116 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1117 cx_next_idx > sc->cpu_non_c3) {
1118 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1119 if (ACPI_SUCCESS(status) && bm_active != 0) {
1120 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1121 cx_next_idx = sc->cpu_non_c3;
1125 /* Select the next state and update statistics. */
1126 cx_next = &sc->cpu_cx_states[cx_next_idx];
1127 sc->cpu_cx_stats[cx_next_idx]++;
1128 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1131 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1132 * precisely calculate the time spent in C1 since the place we wake up
1133 * is an ISR. Assume we slept no more then half of quantum, unless
1134 * we are called inside critical section, delaying context switch.
1136 if (cx_next->type == ACPI_STATE_C1) {
1137 cputicks = cpu_ticks();
1138 if (cx_next->p_lvlx != NULL) {
1139 /* C1 I/O then Halt */
1140 CPU_GET_REG(cx_next->p_lvlx, 1);
1142 if (cx_next->do_mwait)
1143 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1146 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1147 if (curthread->td_critnest == 0)
1148 end_time = min(end_time, 500000 / hz);
1149 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1154 * For C3, disable bus master arbitration and enable bus master wake
1155 * if BM control is available, otherwise flush the CPU cache.
1157 if (cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) {
1158 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1159 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1160 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
1162 ACPI_FLUSH_CPU_CACHE();
1166 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1167 * Use the ACPI timer for measuring sleep time. Since we need to
1168 * get the time very close to the CPU start/stop clock logic, this
1169 * is the only reliable time source.
1171 if (cx_next->type == ACPI_STATE_C3) {
1172 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
1176 cputicks = cpu_ticks();
1178 if (cx_next->do_mwait)
1179 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1181 CPU_GET_REG(cx_next->p_lvlx, 1);
1184 * Read the end time twice. Since it may take an arbitrary time
1185 * to enter the idle state, the first read may be executed before
1186 * the processor has stopped. Doing it again provides enough
1187 * margin that we are certain to have a correct value.
1189 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1190 if (cx_next->type == ACPI_STATE_C3) {
1191 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
1192 end_time = acpi_TimerDelta(end_time, start_time);
1194 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate();
1196 /* Enable bus master arbitration and disable bus master wakeup. */
1197 if ((cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) &&
1198 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
1199 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1200 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1204 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
1209 * Re-evaluate the _CST object when we are notified that it changed.
1212 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1214 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1216 if (notify != ACPI_NOTIFY_CX_STATES)
1220 * C-state data for target CPU is going to be in flux while we execute
1221 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1222 * Also, it may happen that multiple ACPI taskqueues may concurrently
1223 * execute notifications for the same CPU. ACPI_SERIAL is used to
1224 * protect against that.
1226 ACPI_SERIAL_BEGIN(cpu);
1229 /* Update the list of Cx states. */
1230 acpi_cpu_cx_cst(sc);
1231 acpi_cpu_cx_list(sc);
1232 acpi_cpu_set_cx_lowest(sc);
1235 ACPI_SERIAL_END(cpu);
1237 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1241 acpi_cpu_quirks(void)
1243 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1246 * Bus mastering arbitration control is needed to keep caches coherent
1247 * while sleeping in C3. If it's not present but a working flush cache
1248 * instruction is present, flush the caches before entering C3 instead.
1249 * Otherwise, just disable C3 completely.
1251 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1252 AcpiGbl_FADT.Pm2ControlLength == 0) {
1253 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1254 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1255 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1256 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1257 "acpi_cpu: no BM control, using flush cache method\n"));
1259 cpu_quirks |= CPU_QUIRK_NO_C3;
1260 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1261 "acpi_cpu: no BM control, C3 not available\n"));
1266 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1267 * the expensive flush cache instruction.
1269 if (cpu_cx_generic && mp_ncpus > 1) {
1270 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1271 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1272 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1275 /* Look for various quirks of the PIIX4 part. */
1276 acpi_cpu_quirks_piix4();
1280 acpi_cpu_quirks_piix4(void)
1287 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1288 if (acpi_dev != NULL) {
1289 switch (pci_get_revid(acpi_dev)) {
1291 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1292 * do not report the BMIDE status to the BM status register and
1293 * others have a livelock bug if Type-F DMA is enabled. Linux
1294 * works around the BMIDE bug by reading the BM status directly
1295 * but we take the simpler approach of disabling C3 for these
1298 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1299 * Livelock") from the January 2002 PIIX4 specification update.
1300 * Applies to all PIIX4 models.
1302 * Also, make sure that all interrupts cause a "Stop Break"
1303 * event to exit from C2 state.
1304 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1305 * should be set to zero, otherwise it causes C2 to short-sleep.
1306 * PIIX4 doesn't properly support C3 and bus master activity
1307 * need not break out of C2.
1309 case PCI_REVISION_A_STEP:
1310 case PCI_REVISION_B_STEP:
1311 case PCI_REVISION_4E:
1312 case PCI_REVISION_4M:
1313 cpu_quirks |= CPU_QUIRK_NO_C3;
1314 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1315 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1317 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1318 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1319 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1320 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1321 val |= PIIX4_STOP_BREAK_MASK;
1322 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1324 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1325 if (ACPI_SUCCESS(status) && val != 0) {
1326 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1327 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1328 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1339 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1341 struct acpi_cpu_softc *sc;
1345 uintmax_t fract, sum, whole;
1347 sc = (struct acpi_cpu_softc *) arg1;
1349 for (i = 0; i < sc->cpu_cx_count; i++)
1350 sum += sc->cpu_cx_stats[i];
1351 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1352 for (i = 0; i < sc->cpu_cx_count; i++) {
1354 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1355 fract = (whole % sum) * 100;
1356 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1357 (u_int)(fract / sum));
1359 sbuf_printf(&sb, "0.00%% ");
1361 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1364 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1371 * XXX TODO: actually add support to count each entry/exit
1372 * from the Cx states.
1375 acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
1377 struct acpi_cpu_softc *sc;
1382 sc = (struct acpi_cpu_softc *) arg1;
1384 /* Print out the raw counters */
1385 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1387 for (i = 0; i < sc->cpu_cx_count; i++) {
1388 sbuf_printf(&sb, "%u ", sc->cpu_cx_stats[i]);
1393 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1399 #if defined(__i386__) || defined(__amd64__)
1401 acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
1403 struct acpi_cpu_softc *sc;
1409 sc = (struct acpi_cpu_softc *)arg1;
1410 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1411 for (i = 0; i < sc->cpu_cx_count; i++) {
1412 cx = &sc->cpu_cx_states[i];
1413 sbuf_printf(&sb, "C%d/", i + 1);
1415 sbuf_cat(&sb, "mwait");
1416 if (cx->mwait_hw_coord)
1417 sbuf_cat(&sb, "/hwc");
1418 if (cx->mwait_bm_avoidance)
1419 sbuf_cat(&sb, "/bma");
1420 } else if (cx->type == ACPI_STATE_C1) {
1421 sbuf_cat(&sb, "hlt");
1423 sbuf_cat(&sb, "io");
1425 if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
1426 sbuf_cat(&sb, "/iohlt");
1427 sbuf_putc(&sb, ' ');
1431 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1438 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1442 ACPI_SERIAL_ASSERT(cpu);
1443 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1445 /* If not disabling, cache the new lowest non-C3 state. */
1447 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1448 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1454 /* Reset the statistics counters. */
1455 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1460 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1462 struct acpi_cpu_softc *sc;
1466 sc = (struct acpi_cpu_softc *) arg1;
1467 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1468 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1469 if (error != 0 || req->newptr == NULL)
1471 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1473 if (strcasecmp(state, "Cmax") == 0)
1474 val = MAX_CX_STATES;
1476 val = (int) strtol(state + 1, NULL, 10);
1477 if (val < 1 || val > MAX_CX_STATES)
1481 ACPI_SERIAL_BEGIN(cpu);
1482 sc->cpu_cx_lowest_lim = val - 1;
1483 acpi_cpu_set_cx_lowest(sc);
1484 ACPI_SERIAL_END(cpu);
1490 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1492 struct acpi_cpu_softc *sc;
1496 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1497 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1498 if (error != 0 || req->newptr == NULL)
1500 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1502 if (strcasecmp(state, "Cmax") == 0)
1503 val = MAX_CX_STATES;
1505 val = (int) strtol(state + 1, NULL, 10);
1506 if (val < 1 || val > MAX_CX_STATES)
1510 /* Update the new lowest useable Cx state for all CPUs. */
1511 ACPI_SERIAL_BEGIN(cpu);
1512 cpu_cx_lowest_lim = val - 1;
1513 for (i = 0; i < cpu_ndevices; i++) {
1514 sc = device_get_softc(cpu_devices[i]);
1515 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1516 acpi_cpu_set_cx_lowest(sc);
1518 ACPI_SERIAL_END(cpu);