2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include "opt_kstack_pages.h"
35 #include "opt_sched.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/cpuset.h>
45 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/memrange.h>
50 #include <sys/mutex.h>
53 #include <sys/sched.h>
55 #include <sys/sysctl.h>
58 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_extern.h>
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cputypes.h>
66 #include <machine/cpufunc.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
73 #include <machine/tss.h>
74 #include <machine/cpu.h>
77 #define WARMBOOT_TARGET 0
78 #define WARMBOOT_OFF (KERNBASE + 0x0467)
79 #define WARMBOOT_SEG (KERNBASE + 0x0469)
81 #define CMOS_REG (0x70)
82 #define CMOS_DATA (0x71)
83 #define BIOS_RESET (0x0f)
84 #define BIOS_WARM (0x0a)
86 #define GiB(v) (v ## ULL << 30)
88 extern struct pcpu __pcpu[];
90 /* Temporary variables for init_secondary() */
91 char *doublefault_stack;
97 * Local data and functions.
100 static int start_ap(int apic_id);
103 * Calculate usable address in base memory for AP trampoline code.
106 mp_bootaddress(vm_paddr_t *physmap, unsigned int *physmap_idx)
111 alloc_ap_trampoline(physmap, physmap_idx);
114 for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
116 * Find a memory region big enough below the 4GB
117 * boundary to store the initial page tables. Region
118 * must be mapped by the direct map.
120 * Note that it needs to be aligned to a page
123 if (physmap[i] >= GiB(4) || physmap[i + 1] -
124 round_page(physmap[i]) < PAGE_SIZE * 3 ||
125 physmap[i + 1] > Maxmem)
129 mptramp_pagetables = round_page(physmap[i]);
130 physmap[i] = round_page(physmap[i]) + (PAGE_SIZE * 3);
131 if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
132 memmove(&physmap[i], &physmap[i + 2],
133 sizeof(*physmap) * (*physmap_idx - i + 2));
140 mptramp_pagetables = trunc_page(boot_address) - (PAGE_SIZE * 3);
143 "Cannot find enough space for the initial AP page tables, placing them at %#x",
149 * Initialize the IPI handlers and start up the AP's.
156 /* Initialize the logical ID to APIC ID table. */
157 for (i = 0; i < MAXCPU; i++) {
158 cpu_apic_ids[i] = -1;
159 cpu_ipi_pending[i] = 0;
162 /* Install an inter-CPU IPI for TLB invalidation */
163 if (pmap_pcid_enabled) {
165 setidt(IPI_INVLTLB, pti ?
166 IDTVEC(invltlb_invpcid_pti_pti) :
167 IDTVEC(invltlb_invpcid_nopti), SDT_SYSIGT,
169 setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_invpcid_pti) :
170 IDTVEC(invlpg_invpcid), SDT_SYSIGT, SEL_KPL, 0);
171 setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_invpcid_pti) :
172 IDTVEC(invlrng_invpcid), SDT_SYSIGT, SEL_KPL, 0);
174 setidt(IPI_INVLTLB, pti ? IDTVEC(invltlb_pcid_pti) :
175 IDTVEC(invltlb_pcid), SDT_SYSIGT, SEL_KPL, 0);
176 setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_pcid_pti) :
177 IDTVEC(invlpg_pcid), SDT_SYSIGT, SEL_KPL, 0);
178 setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_pcid_pti) :
179 IDTVEC(invlrng_pcid), SDT_SYSIGT, SEL_KPL, 0);
182 setidt(IPI_INVLTLB, pti ? IDTVEC(invltlb_pti) : IDTVEC(invltlb),
183 SDT_SYSIGT, SEL_KPL, 0);
184 setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_pti) : IDTVEC(invlpg),
185 SDT_SYSIGT, SEL_KPL, 0);
186 setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_pti) : IDTVEC(invlrng),
187 SDT_SYSIGT, SEL_KPL, 0);
190 /* Install an inter-CPU IPI for cache invalidation. */
191 setidt(IPI_INVLCACHE, pti ? IDTVEC(invlcache_pti) : IDTVEC(invlcache),
192 SDT_SYSIGT, SEL_KPL, 0);
194 /* Install an inter-CPU IPI for all-CPU rendezvous */
195 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
196 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
198 /* Install generic inter-CPU IPI handler */
199 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
200 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
202 /* Install an inter-CPU IPI for CPU stop/restart */
203 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
204 SDT_SYSIGT, SEL_KPL, 0);
206 /* Install an inter-CPU IPI for CPU suspend/resume */
207 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
208 SDT_SYSIGT, SEL_KPL, 0);
210 /* Set boot_cpu_id if needed. */
211 if (boot_cpu_id == -1) {
212 boot_cpu_id = PCPU_GET(apic_id);
213 cpu_info[boot_cpu_id].cpu_bsp = 1;
215 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
216 ("BSP's APIC ID doesn't match boot_cpu_id"));
218 /* Probe logical/physical core configuration. */
223 /* Start each Application Processor */
224 init_ops.start_all_aps();
226 set_interrupt_apic_ids();
231 * AP CPU's call this to initialize themselves.
239 int cpu, gsel_tss, x;
240 struct region_descriptor ap_gdt;
242 /* Set by the startup code for us to use */
246 common_tss[cpu] = common_tss[0];
247 common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
249 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
251 /* The NMI stack runs on IST2. */
252 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
253 common_tss[cpu].tss_ist2 = (long) np;
255 /* The MC# stack runs on IST3. */
256 np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
257 common_tss[cpu].tss_ist3 = (long) np;
259 /* The DB# stack runs on IST4. */
260 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
261 common_tss[cpu].tss_ist4 = (long) np;
263 /* Prepare private GDT */
264 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
265 for (x = 0; x < NGDT; x++) {
266 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
267 x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
268 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
270 ssdtosyssd(&gdt_segs[GPROC0_SEL],
271 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
272 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
273 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
274 lgdt(&ap_gdt); /* does magic intra-segment return */
276 /* Get per-cpu data */
279 /* prime data page for it to use */
280 pcpu_init(pc, cpu, sizeof(struct pcpu));
281 dpcpu_init(dpcpu, cpu);
282 pc->pc_apic_id = cpu_apic_ids[cpu];
283 pc->pc_prvspace = pc;
284 pc->pc_curthread = 0;
285 pc->pc_tssp = &common_tss[cpu];
286 pc->pc_commontssp = &common_tss[cpu];
288 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
289 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
290 pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
292 pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
293 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
294 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
296 pc->pc_curpmap = kernel_pmap;
298 pc->pc_pcid_next = PMAP_PCID_KERN + 1;
299 common_tss[cpu].tss_rsp0 = 0;
301 /* Save the per-cpu pointer for use by the NMI handler. */
302 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
303 np->np_pcpu = (register_t) pc;
305 /* Save the per-cpu pointer for use by the MC# handler. */
306 np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
307 np->np_pcpu = (register_t) pc;
309 /* Save the per-cpu pointer for use by the DB# handler. */
310 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
311 np->np_pcpu = (register_t) pc;
313 wrmsr(MSR_FSBASE, 0); /* User value */
314 wrmsr(MSR_GSBASE, (u_int64_t)pc);
315 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
320 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
324 * Set to a known state:
325 * Set by mpboot.s: CR0_PG, CR0_PE
326 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
329 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
332 amd64_conf_fast_syscall();
334 /* signal our startup to the BSP. */
337 /* Spin until the BSP releases the AP's. */
338 while (atomic_load_acq_int(&aps_ready) == 0)
341 init_secondary_tail();
344 /*******************************************************************
345 * local functions and data
349 * start each AP in our list
352 native_start_all_aps(void)
354 u_int64_t *pt4, *pt3, *pt2;
355 u_int32_t mpbioswarmvec;
359 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
361 /* copy the AP 1st level boot code */
362 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
364 /* Locate the page tables, they'll be below the trampoline */
365 pt4 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables);
366 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
367 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
369 /* Create the initial 1GB replicated page tables */
370 for (i = 0; i < 512; i++) {
371 /* Each slot of the level 4 pages points to the same level 3 page */
372 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
373 pt4[i] |= PG_V | PG_RW | PG_U;
375 /* Each slot of the level 3 pages points to the same level 2 page */
376 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
377 pt3[i] |= PG_V | PG_RW | PG_U;
379 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
380 pt2[i] = i * (2 * 1024 * 1024);
381 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
384 /* save the current value of the warm-start vector */
385 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
386 outb(CMOS_REG, BIOS_RESET);
387 mpbiosreason = inb(CMOS_DATA);
389 /* setup a vector to our boot code */
390 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
391 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
392 outb(CMOS_REG, BIOS_RESET);
393 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
396 for (cpu = 1; cpu < mp_ncpus; cpu++) {
397 apic_id = cpu_apic_ids[cpu];
399 /* allocate and set up an idle stack data page */
400 bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
401 kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO);
402 doublefault_stack = (char *)kmem_malloc(kernel_arena,
403 PAGE_SIZE, M_WAITOK | M_ZERO);
404 mce_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
406 nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
408 dbg_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
410 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
413 bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
416 /* attempt to start the Application Processor */
417 if (!start_ap(apic_id)) {
418 /* restore the warmstart vector */
419 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
420 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
423 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
426 /* restore the warmstart vector */
427 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
429 outb(CMOS_REG, BIOS_RESET);
430 outb(CMOS_DATA, mpbiosreason);
432 /* number of APs actually started */
438 * This function starts the AP (application processor) identified
439 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
440 * to accomplish this. This is necessary because of the nuances
441 * of the different hardware we might encounter. It isn't pretty,
442 * but it seems to work.
445 start_ap(int apic_id)
450 /* calculate the vector */
451 vector = (boot_address >> 12) & 0xff;
453 /* used as a watchpoint to signal AP startup */
456 ipi_startup(apic_id, vector);
458 /* Wait up to 5 seconds for it to start. */
459 for (ms = 0; ms < 5000; ms++) {
461 return 1; /* return SUCCESS */
464 return 0; /* return FAILURE */
468 invltlb_invpcid_handler(void)
470 struct invpcid_descr d;
473 #ifdef COUNT_XINVLTLB_HITS
474 xhits_gbl[PCPU_GET(cpuid)]++;
475 #endif /* COUNT_XINVLTLB_HITS */
477 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
478 #endif /* COUNT_IPIS */
480 generation = smp_tlb_generation;
481 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
484 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
486 PCPU_SET(smp_tlb_done, generation);
490 invltlb_invpcid_pti_handler(void)
492 struct invpcid_descr d;
495 #ifdef COUNT_XINVLTLB_HITS
496 xhits_gbl[PCPU_GET(cpuid)]++;
497 #endif /* COUNT_XINVLTLB_HITS */
499 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
500 #endif /* COUNT_IPIS */
502 generation = smp_tlb_generation;
503 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
506 if (smp_tlb_pmap == kernel_pmap) {
508 * This invalidation actually needs to clear kernel
509 * mappings from the TLB in the current pmap, but
510 * since we were asked for the flush in the kernel
511 * pmap, achieve it by performing global flush.
513 invpcid(&d, INVPCID_CTXGLOB);
515 invpcid(&d, INVPCID_CTX);
516 d.pcid |= PMAP_PCID_USER_PT;
517 invpcid(&d, INVPCID_CTX);
519 PCPU_SET(smp_tlb_done, generation);
523 invltlb_pcid_handler(void)
526 uint32_t generation, pcid;
528 #ifdef COUNT_XINVLTLB_HITS
529 xhits_gbl[PCPU_GET(cpuid)]++;
530 #endif /* COUNT_XINVLTLB_HITS */
532 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
533 #endif /* COUNT_IPIS */
535 generation = smp_tlb_generation; /* Overlap with serialization */
536 if (smp_tlb_pmap == kernel_pmap) {
540 * The current pmap might not be equal to
541 * smp_tlb_pmap. The clearing of the pm_gen in
542 * pmap_invalidate_all() takes care of TLB
543 * invalidation when switching to the pmap on this
546 if (PCPU_GET(curpmap) == smp_tlb_pmap) {
547 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
548 kcr3 = smp_tlb_pmap->pm_cr3 | pcid;
549 ucr3 = smp_tlb_pmap->pm_ucr3;
550 if (ucr3 != PMAP_NO_CR3) {
551 ucr3 |= PMAP_PCID_USER_PT | pcid;
552 pmap_pti_pcid_invalidate(ucr3, kcr3);
557 PCPU_SET(smp_tlb_done, generation);
561 invlpg_invpcid_handler(void)
563 struct invpcid_descr d;
566 #ifdef COUNT_XINVLTLB_HITS
567 xhits_pg[PCPU_GET(cpuid)]++;
568 #endif /* COUNT_XINVLTLB_HITS */
570 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
571 #endif /* COUNT_IPIS */
573 generation = smp_tlb_generation; /* Overlap with serialization */
574 invlpg(smp_tlb_addr1);
575 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) {
576 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
579 d.addr = smp_tlb_addr1;
580 invpcid(&d, INVPCID_ADDR);
582 PCPU_SET(smp_tlb_done, generation);
586 invlpg_pcid_handler(void)
592 #ifdef COUNT_XINVLTLB_HITS
593 xhits_pg[PCPU_GET(cpuid)]++;
594 #endif /* COUNT_XINVLTLB_HITS */
596 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
597 #endif /* COUNT_IPIS */
599 generation = smp_tlb_generation; /* Overlap with serialization */
600 invlpg(smp_tlb_addr1);
601 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
602 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3) {
603 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
604 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
605 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
606 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
608 PCPU_SET(smp_tlb_done, generation);
612 invlrng_invpcid_handler(void)
614 struct invpcid_descr d;
615 vm_offset_t addr, addr2;
618 #ifdef COUNT_XINVLTLB_HITS
619 xhits_rng[PCPU_GET(cpuid)]++;
620 #endif /* COUNT_XINVLTLB_HITS */
622 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
623 #endif /* COUNT_IPIS */
625 addr = smp_tlb_addr1;
626 addr2 = smp_tlb_addr2;
627 generation = smp_tlb_generation; /* Overlap with serialization */
631 } while (addr < addr2);
632 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) {
633 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
636 d.addr = smp_tlb_addr1;
638 invpcid(&d, INVPCID_ADDR);
640 } while (d.addr < addr2);
642 PCPU_SET(smp_tlb_done, generation);
646 invlrng_pcid_handler(void)
648 vm_offset_t addr, addr2;
653 #ifdef COUNT_XINVLTLB_HITS
654 xhits_rng[PCPU_GET(cpuid)]++;
655 #endif /* COUNT_XINVLTLB_HITS */
657 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
658 #endif /* COUNT_IPIS */
660 addr = smp_tlb_addr1;
661 addr2 = smp_tlb_addr2;
662 generation = smp_tlb_generation; /* Overlap with serialization */
666 } while (addr < addr2);
667 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
668 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3) {
669 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
670 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
671 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
672 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
674 PCPU_SET(smp_tlb_done, generation);