2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Hudson River Trading LLC
5 * Written by: John H. Baldwin <jhb@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
40 #include <sys/mutex.h>
42 #include <sys/vmmeter.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_phys.h>
49 #include <contrib/dev/acpica/include/acpi.h>
50 #include <contrib/dev/acpica/include/aclocal.h>
51 #include <contrib/dev/acpica/include/actables.h>
53 #include <machine/intr_machdep.h>
54 #include <machine/md_var.h>
55 #include <x86/apicvar.h>
57 #include <dev/acpica/acpivar.h>
60 static struct cpu_info {
66 struct mem_affinity mem_info[VM_PHYSSEG_MAX + 1];
69 static ACPI_TABLE_SRAT *srat;
70 static vm_paddr_t srat_physaddr;
72 static int domain_pxm[MAXMEMDOM];
75 static ACPI_TABLE_SLIT *slit;
76 static vm_paddr_t slit_physaddr;
77 static int vm_locality_table[MAXMEMDOM * MAXMEMDOM];
79 static void srat_walk_table(acpi_subtable_handler *handler, void *arg);
86 slit_parse_table(ACPI_TABLE_SLIT *s)
89 int i_domain, j_domain;
94 * This maps the SLIT data into the VM-domain centric view.
95 * There may be sparse entries in the PXM namespace, so
96 * remap them to a VM-domain ID and if it doesn't exist,
99 * It should result in a packed 2d array of VM-domain
100 * locality information entries.
104 printf("SLIT.Localities: %d\n", (int) s->LocalityCount);
105 for (i = 0; i < s->LocalityCount; i++) {
106 i_domain = acpi_map_pxm_to_vm_domainid(i);
112 for (j = 0; j < s->LocalityCount; j++) {
113 j_domain = acpi_map_pxm_to_vm_domainid(j);
116 e = s->Entry[i * s->LocalityCount + j];
118 printf("%d ", (int) e);
119 /* 255 == "no locality information" */
121 vm_locality_table[offset] = -1;
123 vm_locality_table[offset] = e;
132 * Look for an ACPI System Locality Distance Information Table ("SLIT")
138 if (resource_disabled("slit", 0)) {
142 slit_physaddr = acpi_find_table(ACPI_SIG_SLIT);
143 if (slit_physaddr == 0) {
148 * Make a pass over the table to populate the cpus[] and
151 slit = acpi_map_table(slit_physaddr, ACPI_SIG_SLIT);
152 slit_parse_table(slit);
153 acpi_unmap_table(slit);
157 /* Tell the VM about it! */
158 mem_locality = vm_locality_table;
168 * Returns true if a memory range overlaps with at least one range in
172 overlaps_phys_avail(vm_paddr_t start, vm_paddr_t end)
176 for (i = 0; phys_avail[i] != 0 && phys_avail[i + 1] != 0; i += 2) {
177 if (phys_avail[i + 1] <= start)
179 if (phys_avail[i] < end)
188 srat_parse_entry(ACPI_SUBTABLE_HEADER *entry, void *arg)
190 ACPI_SRAT_CPU_AFFINITY *cpu;
191 ACPI_SRAT_X2APIC_CPU_AFFINITY *x2apic;
192 ACPI_SRAT_MEM_AFFINITY *mem;
195 switch (entry->Type) {
196 case ACPI_SRAT_TYPE_CPU_AFFINITY:
197 cpu = (ACPI_SRAT_CPU_AFFINITY *)entry;
198 domain = cpu->ProximityDomainLo |
199 cpu->ProximityDomainHi[0] << 8 |
200 cpu->ProximityDomainHi[1] << 16 |
201 cpu->ProximityDomainHi[2] << 24;
203 printf("SRAT: Found CPU APIC ID %u domain %d: %s\n",
205 (cpu->Flags & ACPI_SRAT_CPU_ENABLED) ?
206 "enabled" : "disabled");
207 if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED))
209 if (cpu->ApicId > max_apic_id) {
210 printf("SRAT: Ignoring local APIC ID %u (too high)\n",
215 if (cpus[cpu->ApicId].enabled) {
216 printf("SRAT: Duplicate local APIC ID %u\n",
221 cpus[cpu->ApicId].domain = domain;
222 cpus[cpu->ApicId].enabled = 1;
224 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
225 x2apic = (ACPI_SRAT_X2APIC_CPU_AFFINITY *)entry;
227 printf("SRAT: Found CPU APIC ID %u domain %d: %s\n",
228 x2apic->ApicId, x2apic->ProximityDomain,
229 (x2apic->Flags & ACPI_SRAT_CPU_ENABLED) ?
230 "enabled" : "disabled");
231 if (!(x2apic->Flags & ACPI_SRAT_CPU_ENABLED))
233 if (x2apic->ApicId > max_apic_id) {
234 printf("SRAT: Ignoring local APIC ID %u (too high)\n",
239 KASSERT(!cpus[x2apic->ApicId].enabled,
240 ("Duplicate local APIC ID %u", x2apic->ApicId));
241 cpus[x2apic->ApicId].domain = x2apic->ProximityDomain;
242 cpus[x2apic->ApicId].enabled = 1;
244 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
245 mem = (ACPI_SRAT_MEM_AFFINITY *)entry;
248 "SRAT: Found memory domain %d addr 0x%jx len 0x%jx: %s\n",
249 mem->ProximityDomain, (uintmax_t)mem->BaseAddress,
250 (uintmax_t)mem->Length,
251 (mem->Flags & ACPI_SRAT_MEM_ENABLED) ?
252 "enabled" : "disabled");
253 if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED))
255 if (mem->BaseAddress >= cpu_getmaxphyaddr() ||
256 !overlaps_phys_avail(mem->BaseAddress,
257 mem->BaseAddress + mem->Length)) {
258 printf("SRAT: Ignoring memory at addr 0x%jx\n",
259 (uintmax_t)mem->BaseAddress);
262 if (num_mem == VM_PHYSSEG_MAX) {
263 printf("SRAT: Too many memory regions\n");
268 for (i = 0; i < num_mem; i++) {
269 if (mem_info[i].end <= mem->BaseAddress)
271 if (mem_info[i].start <
272 (mem->BaseAddress + mem->Length)) {
273 printf("SRAT: Overlapping memory entries\n");
279 for (i = num_mem; i > slot; i--)
280 mem_info[i] = mem_info[i - 1];
281 mem_info[slot].start = mem->BaseAddress;
282 mem_info[slot].end = mem->BaseAddress + mem->Length;
283 mem_info[slot].domain = mem->ProximityDomain;
290 * Ensure each memory domain has at least one CPU and that each CPU
291 * has at least one memory domain.
298 for (i = 0; i < num_mem; i++) {
300 for (j = 0; j <= max_apic_id; j++)
301 if (cpus[j].enabled &&
302 cpus[j].domain == mem_info[i].domain) {
303 cpus[j].has_memory = 1;
307 printf("SRAT: No CPU found for memory domain %d\n",
312 for (i = 0; i <= max_apic_id; i++)
313 if (cpus[i].enabled && !cpus[i].has_memory) {
315 for (j = 0; j < num_mem && !found; j++) {
316 if (mem_info[j].domain == cpus[i].domain)
321 printf("SRAT: mem dom %d is empty\n",
323 mem_info[num_mem].start = 0;
324 mem_info[num_mem].end = 0;
325 mem_info[num_mem].domain = cpus[i].domain;
333 * Check that the SRAT memory regions cover all of the regions in
337 check_phys_avail(void)
342 /* j is the current offset into phys_avail[]. */
343 address = phys_avail[0];
345 for (i = 0; i < num_mem; i++) {
347 * Consume as many phys_avail[] entries as fit in this
350 while (address >= mem_info[i].start &&
351 address <= mem_info[i].end) {
353 * If we cover the rest of this phys_avail[] entry,
354 * advance to the next entry.
356 if (phys_avail[j + 1] <= mem_info[i].end) {
358 if (phys_avail[j] == 0 &&
359 phys_avail[j + 1] == 0) {
362 address = phys_avail[j];
364 address = mem_info[i].end + 1;
367 printf("SRAT: No memory region found for 0x%jx - 0x%jx\n",
368 (uintmax_t)phys_avail[j], (uintmax_t)phys_avail[j + 1]);
373 * Renumber the memory domains to be compact and zero-based if not
374 * already. Returns an error if there are too many domains.
377 renumber_domains(void)
381 /* Enumerate all the domains. */
383 for (i = 0; i < num_mem; i++) {
384 /* See if this domain is already known. */
385 for (j = 0; j < ndomain; j++) {
386 if (domain_pxm[j] >= mem_info[i].domain)
389 if (j < ndomain && domain_pxm[j] == mem_info[i].domain)
392 if (ndomain >= MAXMEMDOM) {
394 printf("SRAT: Too many memory domains\n");
398 /* Insert the new domain at slot 'j'. */
400 for (j = ndomain; j > slot; j--)
401 domain_pxm[j] = domain_pxm[j - 1];
402 domain_pxm[slot] = mem_info[i].domain;
406 /* Renumber each domain to its index in the sorted 'domain_pxm' list. */
407 for (i = 0; i < ndomain; i++) {
409 * If the domain is already the right value, no need
412 if (domain_pxm[i] == i)
415 /* Walk the cpu[] and mem_info[] arrays to renumber. */
416 for (j = 0; j < num_mem; j++)
417 if (mem_info[j].domain == domain_pxm[i])
418 mem_info[j].domain = i;
419 for (j = 0; j <= max_apic_id; j++)
420 if (cpus[j].enabled && cpus[j].domain == domain_pxm[i])
428 * Look for an ACPI System Resource Affinity Table ("SRAT")
433 unsigned int idx, size;
437 if (resource_disabled("srat", 0))
440 srat_physaddr = acpi_find_table(ACPI_SIG_SRAT);
441 if (srat_physaddr == 0)
445 * Allocate data structure:
447 * Find the last physical memory region and steal some memory from
448 * it. This is done because at this point in the boot process
449 * malloc is still not usable.
451 for (idx = 0; phys_avail[idx + 1] != 0; idx += 2);
452 KASSERT(idx != 0, ("phys_avail is empty!"));
455 size = sizeof(*cpus) * (max_apic_id + 1);
456 addr = trunc_page(phys_avail[idx + 1] - size);
457 KASSERT(addr >= phys_avail[idx],
458 ("Not enough memory for SRAT table items"));
459 phys_avail[idx + 1] = addr - 1;
462 * We cannot rely on PHYS_TO_DMAP because this code is also used in
463 * i386, so use pmap_mapbios to map the memory, this will end up using
464 * the default memory attribute (WB), and the DMAP when available.
466 cpus = (struct cpu_info *)pmap_mapbios(addr, size);
470 * Make a pass over the table to populate the cpus[] and
473 srat = acpi_map_table(srat_physaddr, ACPI_SIG_SRAT);
475 srat_walk_table(srat_parse_entry, &error);
476 acpi_unmap_table(srat);
478 if (error || check_domains() != 0 || check_phys_avail() != 0 ||
479 renumber_domains() != 0) {
485 vm_ndomains = ndomain;
486 for (int i = 0; i < vm_ndomains; i++)
487 DOMAINSET_SET(i, &all_domains);
488 mem_affinity = mem_info;
495 init_mem_locality(void)
500 * For now, assume -1 == "no locality information for
503 for (i = 0; i < MAXMEMDOM * MAXMEMDOM; i++)
504 vm_locality_table[i] = -1;
508 parse_acpi_tables(void *dummy)
511 if (parse_srat() < 0)
516 SYSINIT(parse_acpi_tables, SI_SUB_VM - 1, SI_ORDER_FIRST, parse_acpi_tables,
520 srat_walk_table(acpi_subtable_handler *handler, void *arg)
523 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
528 * Setup per-CPU domain IDs.
531 srat_set_cpus(void *dummy)
533 struct cpu_info *cpu;
537 if (srat_physaddr == 0)
539 for (i = 0; i < MAXCPU; i++) {
543 KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
544 cpu = &cpus[pc->pc_apic_id];
546 panic("SRAT: CPU with APIC ID %u is not known",
549 pc->pc_domain = cpu->domain;
553 CPU_SET(i, &cpuset_domain[pc->pc_domain]);
555 printf("SRAT: CPU %u has memory domain %d\n", i,
559 /* Last usage of the cpus array, unmap it. */
560 pmap_unmapbios((vm_offset_t)cpus, sizeof(*cpus) * (max_apic_id + 1));
563 SYSINIT(srat_set_cpus, SI_SUB_CPU, SI_ORDER_ANY, srat_set_cpus, NULL);
566 * Map a _PXM value to a VM domain ID.
568 * Returns the domain ID, or -1 if no domain ID was found.
571 acpi_map_pxm_to_vm_domainid(int pxm)
575 for (i = 0; i < ndomain; i++) {
576 if (domain_pxm[i] == pxm)
583 #else /* MAXMEMDOM == 1 */
586 acpi_map_pxm_to_vm_domainid(int pxm)
592 #endif /* MAXMEMDOM > 1 */