2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2005 Nate Lawson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Throttle clock frequency by using the thermal control circuit. This
31 * operates independently of SpeedStep and ACPI throttling and is supported
32 * on Pentium 4 and later models (feature TM).
34 * Reference: Intel Developer's manual v.3 #245472-012
36 * The original version of this driver was written by Ted Unangst for
37 * OpenBSD and imported by Maxim Sobolev. It was rewritten by Nate Lawson
38 * for use with the cpufreq framework.
41 #include <sys/param.h>
42 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
48 #include <machine/md_var.h>
49 #include <machine/specialreg.h>
51 #include "cpufreq_if.h"
53 #include <contrib/dev/acpica/include/acpi.h>
55 #include <dev/acpica/acpivar.h>
65 #define TCC_NUM_SETTINGS 8
67 #define TCC_ENABLE_ONDEMAND (1<<4)
68 #define TCC_REG_OFFSET 1
69 #define TCC_SPEED_PERCENT(x) ((10000 * (x)) / TCC_NUM_SETTINGS)
71 static int p4tcc_features(driver_t *driver, u_int *features);
72 static void p4tcc_identify(driver_t *driver, device_t parent);
73 static int p4tcc_probe(device_t dev);
74 static int p4tcc_attach(device_t dev);
75 static int p4tcc_detach(device_t dev);
76 static int p4tcc_settings(device_t dev, struct cf_setting *sets,
78 static int p4tcc_set(device_t dev, const struct cf_setting *set);
79 static int p4tcc_get(device_t dev, struct cf_setting *set);
80 static int p4tcc_type(device_t dev, int *type);
82 static device_method_t p4tcc_methods[] = {
83 /* Device interface */
84 DEVMETHOD(device_identify, p4tcc_identify),
85 DEVMETHOD(device_probe, p4tcc_probe),
86 DEVMETHOD(device_attach, p4tcc_attach),
87 DEVMETHOD(device_detach, p4tcc_detach),
89 /* cpufreq interface */
90 DEVMETHOD(cpufreq_drv_set, p4tcc_set),
91 DEVMETHOD(cpufreq_drv_get, p4tcc_get),
92 DEVMETHOD(cpufreq_drv_type, p4tcc_type),
93 DEVMETHOD(cpufreq_drv_settings, p4tcc_settings),
96 DEVMETHOD(acpi_get_features, p4tcc_features),
100 static driver_t p4tcc_driver = {
103 sizeof(struct p4tcc_softc),
106 DRIVER_MODULE(p4tcc, cpu, p4tcc_driver, 0, 0);
109 p4tcc_features(driver_t *driver, u_int *features)
112 /* Notify the ACPI CPU that we support direct access to MSRs */
113 *features = ACPI_CAP_THR_MSRS;
118 p4tcc_identify(driver_t *driver, device_t parent)
121 if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) != (CPUID_ACPI | CPUID_TM))
124 /* Make sure we're not being doubly invoked. */
125 if (device_find_child(parent, "p4tcc", -1) != NULL)
129 * We attach a p4tcc child for every CPU since settings need to
130 * be performed on every CPU in the SMP case. See section 13.15.3
131 * of the IA32 Intel Architecture Software Developer's Manual,
132 * Volume 3, for more info.
134 if (BUS_ADD_CHILD(parent, 10, "p4tcc", device_get_unit(parent))
136 device_printf(parent, "add p4tcc child failed\n");
140 p4tcc_probe(device_t dev)
143 if (resource_disabled("p4tcc", 0))
146 device_set_desc(dev, "CPU Frequency Thermal Control");
151 p4tcc_attach(device_t dev)
153 struct p4tcc_softc *sc;
154 struct cf_setting set;
156 sc = device_get_softc(dev);
158 sc->set_count = TCC_NUM_SETTINGS;
161 * On boot, the TCC is usually in Automatic mode where reading the
162 * current performance level is likely to produce bogus results.
163 * We record that state here and don't trust the contents of the
164 * status MSR until we've set it ourselves.
166 sc->auto_mode = TRUE;
169 * XXX: After a cursory glance at various Intel specification
170 * XXX: updates it seems like these tests for errata is bogus.
171 * XXX: As far as I can tell, the failure mode is benign, in
172 * XXX: that cpus with no errata will have their bottom two
173 * XXX: STPCLK# rates disabled, so rather than waste more time
174 * XXX: hunting down intel docs, just document it and punt. /phk
176 switch (cpu_id & 0xff) {
183 * These CPU models hang when set to 12.5%.
184 * See Errata O50, P44, and Z21.
188 case 0x07: /* errata N44 and P18 */
192 case 0x62: /* Pentium D B1: errata AA21 */
193 case 0x64: /* Pentium D C1: errata AA21 */
194 case 0x65: /* Pentium D D0: errata AA21 */
196 * These CPU models hang when set to 12.5% or 25%.
197 * See Errata N44, P18l and AA21.
202 sc->lowest_val = TCC_NUM_SETTINGS - sc->set_count + 1;
205 * Before we finish attach, switch to 100%. It's possible the BIOS
206 * set us to a lower rate. The user can override this after boot.
209 p4tcc_set(dev, &set);
211 cpufreq_register(dev);
216 p4tcc_detach(device_t dev)
218 struct cf_setting set;
221 error = cpufreq_unregister(dev);
226 * Before we finish detach, switch to Automatic mode.
229 p4tcc_set(dev, &set);
234 p4tcc_settings(device_t dev, struct cf_setting *sets, int *count)
236 struct p4tcc_softc *sc;
239 sc = device_get_softc(dev);
240 if (sets == NULL || count == NULL)
242 if (*count < sc->set_count)
245 /* Return a list of valid settings for this driver. */
246 memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * sc->set_count);
247 val = TCC_NUM_SETTINGS;
248 for (i = 0; i < sc->set_count; i++, val--) {
249 sets[i].freq = TCC_SPEED_PERCENT(val);
252 *count = sc->set_count;
258 p4tcc_set(device_t dev, const struct cf_setting *set)
260 struct p4tcc_softc *sc;
266 sc = device_get_softc(dev);
269 * Validate requested state converts to a setting that is an integer
270 * from [sc->lowest_val .. TCC_NUM_SETTINGS].
272 val = set->freq * TCC_NUM_SETTINGS / 10000;
273 if (val * 10000 != set->freq * TCC_NUM_SETTINGS ||
274 val < sc->lowest_val || val > TCC_NUM_SETTINGS)
278 * Read the current register and mask off the old setting and
279 * On-Demand bit. If the new val is < 100%, set it and the On-Demand
280 * bit, otherwise just return to Automatic mode.
282 msr = rdmsr(MSR_THERM_CONTROL);
283 mask = (TCC_NUM_SETTINGS - 1) << TCC_REG_OFFSET;
284 msr &= ~(mask | TCC_ENABLE_ONDEMAND);
285 if (val < TCC_NUM_SETTINGS)
286 msr |= (val << TCC_REG_OFFSET) | TCC_ENABLE_ONDEMAND;
287 wrmsr(MSR_THERM_CONTROL, msr);
290 * Record whether we're now in Automatic or On-Demand mode. We have
291 * to cache this since there is no reliable way to check if TCC is in
292 * Automatic mode (i.e., at 100% or possibly 50%). Reading bit 4 of
293 * the ACPI Thermal Monitor Control Register produces 0 no matter
294 * what the current mode.
296 if (msr & TCC_ENABLE_ONDEMAND)
297 sc->auto_mode = FALSE;
299 sc->auto_mode = TRUE;
305 p4tcc_get(device_t dev, struct cf_setting *set)
307 struct p4tcc_softc *sc;
313 sc = device_get_softc(dev);
316 * Read the current register and extract the current setting. If
317 * in automatic mode, assume we're at TCC_NUM_SETTINGS (100%).
319 * XXX This is not completely reliable since at high temperatures
320 * the CPU may be automatically throttling to 50% but it's the best
323 if (!sc->auto_mode) {
324 msr = rdmsr(MSR_THERM_CONTROL);
325 val = (msr >> TCC_REG_OFFSET) & (TCC_NUM_SETTINGS - 1);
327 val = TCC_NUM_SETTINGS;
329 memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
330 set->freq = TCC_SPEED_PERCENT(val);
337 p4tcc_type(device_t dev, int *type)
343 *type = CPUFREQ_TYPE_RELATIVE;