Print this page
XXXX pat_sync is clever enough to check for X86FSET_PAT
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/os/mp_startup.c
+++ new/usr/src/uts/i86pc/os/mp_startup.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 26 * Copyright (c) 2010, Intel Corporation.
27 27 * All rights reserved.
28 28 */
29 29 /*
30 30 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
31 31 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
32 32 */
33 33
34 34 #include <sys/types.h>
35 35 #include <sys/thread.h>
36 36 #include <sys/cpuvar.h>
37 37 #include <sys/cpu.h>
38 38 #include <sys/t_lock.h>
39 39 #include <sys/param.h>
40 40 #include <sys/proc.h>
41 41 #include <sys/disp.h>
42 42 #include <sys/class.h>
43 43 #include <sys/cmn_err.h>
44 44 #include <sys/debug.h>
45 45 #include <sys/note.h>
46 46 #include <sys/asm_linkage.h>
47 47 #include <sys/x_call.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/var.h>
50 50 #include <sys/vtrace.h>
51 51 #include <vm/hat.h>
52 52 #include <vm/as.h>
53 53 #include <vm/seg_kmem.h>
54 54 #include <vm/seg_kp.h>
55 55 #include <sys/segments.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/stack.h>
58 58 #include <sys/smp_impldefs.h>
59 59 #include <sys/x86_archext.h>
60 60 #include <sys/machsystm.h>
61 61 #include <sys/traptrace.h>
62 62 #include <sys/clock.h>
63 63 #include <sys/cpc_impl.h>
64 64 #include <sys/pg.h>
65 65 #include <sys/cmt.h>
66 66 #include <sys/dtrace.h>
67 67 #include <sys/archsystm.h>
68 68 #include <sys/fp.h>
69 69 #include <sys/reboot.h>
70 70 #include <sys/kdi_machimpl.h>
71 71 #include <vm/hat_i86.h>
72 72 #include <vm/vm_dep.h>
73 73 #include <sys/memnode.h>
74 74 #include <sys/pci_cfgspace.h>
75 75 #include <sys/mach_mmu.h>
76 76 #include <sys/sysmacros.h>
77 77 #if defined(__xpv)
78 78 #include <sys/hypervisor.h>
79 79 #endif
80 80 #include <sys/cpu_module.h>
81 81 #include <sys/ontrap.h>
82 82
83 83 struct cpu cpus[1]; /* CPU data */
84 84 struct cpu *cpu[NCPU] = {&cpus[0]}; /* pointers to all CPUs */
85 85 struct cpu *cpu_free_list; /* list for released CPUs */
86 86 cpu_core_t cpu_core[NCPU]; /* cpu_core structures */
87 87
88 88 #define cpu_next_free cpu_prev
89 89
90 90 /*
91 91 * Useful for disabling MP bring-up on a MP capable system.
92 92 */
93 93 int use_mp = 1;
94 94
95 95 /*
96 96 * to be set by a PSM to indicate what cpus
97 97 * are sitting around on the system.
98 98 */
99 99 cpuset_t mp_cpus;
100 100
101 101 /*
102 102 * This variable is used by the hat layer to decide whether or not
103 103 * critical sections are needed to prevent race conditions. For sun4m,
104 104 * this variable is set once enough MP initialization has been done in
105 105 * order to allow cross calls.
106 106 */
107 107 int flushes_require_xcalls;
108 108
109 109 cpuset_t cpu_ready_set; /* initialized in startup() */
110 110
111 111 static void mp_startup_boot(void);
112 112 static void mp_startup_hotplug(void);
113 113
114 114 static void cpu_sep_enable(void);
115 115 static void cpu_sep_disable(void);
116 116 static void cpu_asysc_enable(void);
117 117 static void cpu_asysc_disable(void);
118 118
119 119 /*
120 120 * Init CPU info - get CPU type info for processor_info system call.
121 121 */
122 122 void
123 123 init_cpu_info(struct cpu *cp)
124 124 {
125 125 processor_info_t *pi = &cp->cpu_type_info;
126 126
127 127 /*
128 128 * Get clock-frequency property for the CPU.
129 129 */
130 130 pi->pi_clock = cpu_freq;
131 131
132 132 /*
133 133 * Current frequency in Hz.
134 134 */
135 135 cp->cpu_curr_clock = cpu_freq_hz;
136 136
137 137 /*
138 138 * Supported frequencies.
139 139 */
140 140 if (cp->cpu_supp_freqs == NULL) {
141 141 cpu_set_supp_freqs(cp, NULL);
142 142 }
143 143
144 144 (void) strcpy(pi->pi_processor_type, "i386");
145 145 if (fpu_exists)
146 146 (void) strcpy(pi->pi_fputypes, "i387 compatible");
147 147
148 148 cp->cpu_idstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
149 149 cp->cpu_brandstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
150 150
151 151 /*
152 152 * If called for the BSP, cp is equal to current CPU.
153 153 * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info
154 154 * of current CPU as default values for cpu_idstr and cpu_brandstr.
155 155 * They will be corrected in mp_startup_common() after cpuid_pass1()
156 156 * has been invoked on target CPU.
157 157 */
158 158 (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN);
159 159 (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN);
160 160 }
161 161
162 162 /*
163 163 * Configure syscall support on this CPU.
164 164 */
165 165 /*ARGSUSED*/
166 166 void
167 167 init_cpu_syscall(struct cpu *cp)
168 168 {
169 169 kpreempt_disable();
170 170
171 171 #if defined(__amd64)
172 172 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
173 173 is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
174 174
175 175 #if !defined(__lint)
176 176 /*
177 177 * The syscall instruction imposes a certain ordering on
178 178 * segment selectors, so we double-check that ordering
179 179 * here.
180 180 */
181 181 ASSERT(KDS_SEL == KCS_SEL + 8);
182 182 ASSERT(UDS_SEL == U32CS_SEL + 8);
183 183 ASSERT(UCS_SEL == U32CS_SEL + 16);
184 184 #endif
185 185 /*
186 186 * Turn syscall/sysret extensions on.
187 187 */
188 188 cpu_asysc_enable();
189 189
190 190 /*
191 191 * Program the magic registers ..
192 192 */
193 193 wrmsr(MSR_AMD_STAR,
194 194 ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32);
195 195 wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall);
196 196 wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32);
197 197
198 198 /*
199 199 * This list of flags is masked off the incoming
200 200 * %rfl when we enter the kernel.
201 201 */
202 202 wrmsr(MSR_AMD_SFMASK, (uint64_t)(uintptr_t)(PS_IE | PS_T));
203 203 }
204 204 #endif
205 205
206 206 /*
207 207 * On 32-bit kernels, we use sysenter/sysexit because it's too
208 208 * hard to use syscall/sysret, and it is more portable anyway.
209 209 *
210 210 * On 64-bit kernels on Nocona machines, the 32-bit syscall
211 211 * variant isn't available to 32-bit applications, but sysenter is.
212 212 */
213 213 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
214 214 is_x86_feature(x86_featureset, X86FSET_SEP)) {
215 215
216 216 #if !defined(__lint)
217 217 /*
218 218 * The sysenter instruction imposes a certain ordering on
219 219 * segment selectors, so we double-check that ordering
220 220 * here. See "sysenter" in Intel document 245471-012, "IA-32
221 221 * Intel Architecture Software Developer's Manual Volume 2:
222 222 * Instruction Set Reference"
223 223 */
224 224 ASSERT(KDS_SEL == KCS_SEL + 8);
225 225
226 226 ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3));
227 227 ASSERT32(UDS_SEL == UCS_SEL + 8);
228 228
229 229 ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3));
230 230 ASSERT64(UDS_SEL == U32CS_SEL + 8);
231 231 #endif
232 232
233 233 cpu_sep_enable();
234 234
235 235 /*
236 236 * resume() sets this value to the base of the threads stack
237 237 * via a context handler.
238 238 */
239 239 wrmsr(MSR_INTC_SEP_ESP, 0);
240 240 wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter);
241 241 }
242 242
243 243 kpreempt_enable();
244 244 }
245 245
246 246 /*
247 247 * Multiprocessor initialization.
248 248 *
249 249 * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the
250 250 * startup and idle threads for the specified CPU.
251 251 * Parameter boot is true for boot time operations and is false for CPU
252 252 * DR operations.
253 253 */
254 254 static struct cpu *
255 255 mp_cpu_configure_common(int cpun, boolean_t boot)
256 256 {
257 257 struct cpu *cp;
258 258 kthread_id_t tp;
259 259 caddr_t sp;
260 260 proc_t *procp;
261 261 #if !defined(__xpv)
262 262 extern int idle_cpu_prefer_mwait;
263 263 extern void cpu_idle_mwait();
264 264 #endif
265 265 extern void idle();
266 266 extern void cpu_idle();
267 267
268 268 #ifdef TRAPTRACE
269 269 trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun];
270 270 #endif
271 271
272 272 ASSERT(MUTEX_HELD(&cpu_lock));
273 273 ASSERT(cpun < NCPU && cpu[cpun] == NULL);
274 274
275 275 if (cpu_free_list == NULL) {
276 276 cp = kmem_zalloc(sizeof (*cp), KM_SLEEP);
277 277 } else {
278 278 cp = cpu_free_list;
279 279 cpu_free_list = cp->cpu_next_free;
280 280 }
281 281
282 282 cp->cpu_m.mcpu_istamp = cpun << 16;
283 283
284 284 /* Create per CPU specific threads in the process p0. */
285 285 procp = &p0;
286 286
287 287 /*
288 288 * Initialize the dispatcher first.
289 289 */
290 290 disp_cpu_init(cp);
291 291
292 292 cpu_vm_data_init(cp);
293 293
294 294 /*
295 295 * Allocate and initialize the startup thread for this CPU.
296 296 * Interrupt and process switch stacks get allocated later
297 297 * when the CPU starts running.
298 298 */
299 299 tp = thread_create(NULL, 0, NULL, NULL, 0, procp,
300 300 TS_STOPPED, maxclsyspri);
301 301
302 302 /*
303 303 * Set state to TS_ONPROC since this thread will start running
304 304 * as soon as the CPU comes online.
305 305 *
306 306 * All the other fields of the thread structure are setup by
307 307 * thread_create().
308 308 */
309 309 THREAD_ONPROC(tp, cp);
310 310 tp->t_preempt = 1;
311 311 tp->t_bound_cpu = cp;
312 312 tp->t_affinitycnt = 1;
313 313 tp->t_cpu = cp;
314 314 tp->t_disp_queue = cp->cpu_disp;
315 315
316 316 /*
317 317 * Setup thread to start in mp_startup_common.
318 318 */
319 319 sp = tp->t_stk;
320 320 tp->t_sp = (uintptr_t)(sp - MINFRAME);
321 321 #if defined(__amd64)
322 322 tp->t_sp -= STACK_ENTRY_ALIGN; /* fake a call */
323 323 #endif
324 324 /*
325 325 * Setup thread start entry point for boot or hotplug.
326 326 */
327 327 if (boot) {
328 328 tp->t_pc = (uintptr_t)mp_startup_boot;
329 329 } else {
330 330 tp->t_pc = (uintptr_t)mp_startup_hotplug;
331 331 }
332 332
333 333 cp->cpu_id = cpun;
334 334 cp->cpu_self = cp;
335 335 cp->cpu_thread = tp;
336 336 cp->cpu_lwp = NULL;
337 337 cp->cpu_dispthread = tp;
338 338 cp->cpu_dispatch_pri = DISP_PRIO(tp);
339 339
340 340 /*
341 341 * cpu_base_spl must be set explicitly here to prevent any blocking
342 342 * operations in mp_startup_common from causing the spl of the cpu
343 343 * to drop to 0 (allowing device interrupts before we're ready) in
344 344 * resume().
345 345 * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY.
346 346 * As an extra bit of security on DEBUG kernels, this is enforced with
347 347 * an assertion in mp_startup_common() -- before cpu_base_spl is set
348 348 * to its proper value.
349 349 */
350 350 cp->cpu_base_spl = ipltospl(LOCK_LEVEL);
351 351
352 352 /*
353 353 * Now, initialize per-CPU idle thread for this CPU.
354 354 */
355 355 tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1);
356 356
357 357 cp->cpu_idle_thread = tp;
358 358
359 359 tp->t_preempt = 1;
360 360 tp->t_bound_cpu = cp;
361 361 tp->t_affinitycnt = 1;
362 362 tp->t_cpu = cp;
363 363 tp->t_disp_queue = cp->cpu_disp;
364 364
365 365 /*
366 366 * Bootstrap the CPU's PG data
367 367 */
368 368 pg_cpu_bootstrap(cp);
369 369
370 370 /*
371 371 * Perform CPC initialization on the new CPU.
372 372 */
373 373 kcpc_hw_init(cp);
374 374
375 375 /*
376 376 * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2
377 377 * for each CPU.
378 378 */
379 379 setup_vaddr_for_ppcopy(cp);
380 380
381 381 /*
382 382 * Allocate page for new GDT and initialize from current GDT.
383 383 */
384 384 #if !defined(__lint)
385 385 ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE);
386 386 #endif
387 387 cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP);
388 388 bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT));
389 389
390 390 #if defined(__i386)
391 391 /*
392 392 * setup kernel %gs.
393 393 */
394 394 set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
395 395 SEL_KPL, 0, 1);
396 396 #endif
397 397
398 398 /*
399 399 * If we have more than one node, each cpu gets a copy of IDT
400 400 * local to its node. If this is a Pentium box, we use cpu 0's
401 401 * IDT. cpu 0's IDT has been made read-only to workaround the
402 402 * cmpxchgl register bug
403 403 */
404 404 if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) {
405 405 #if !defined(__lint)
406 406 ASSERT((sizeof (*CPU->cpu_idt) * NIDT) <= PAGESIZE);
407 407 #endif
408 408 cp->cpu_idt = kmem_zalloc(PAGESIZE, KM_SLEEP);
409 409 bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE);
410 410 } else {
411 411 cp->cpu_idt = CPU->cpu_idt;
412 412 }
413 413
414 414 /*
415 415 * alloc space for cpuid info
416 416 */
417 417 cpuid_alloc_space(cp);
418 418 #if !defined(__xpv)
419 419 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
420 420 idle_cpu_prefer_mwait) {
421 421 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
422 422 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
423 423 } else
424 424 #endif
425 425 cp->cpu_m.mcpu_idle_cpu = cpu_idle;
426 426
427 427 init_cpu_info(cp);
428 428
429 429 /*
430 430 * alloc space for ucode_info
431 431 */
432 432 ucode_alloc_space(cp);
433 433 xc_init_cpu(cp);
434 434 hat_cpu_online(cp);
435 435
436 436 #ifdef TRAPTRACE
437 437 /*
438 438 * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers
439 439 */
440 440 ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP);
441 441 ttc->ttc_next = ttc->ttc_first;
442 442 ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize;
443 443 #endif
444 444
445 445 /*
446 446 * Record that we have another CPU.
447 447 */
448 448 /*
449 449 * Initialize the interrupt threads for this CPU
450 450 */
451 451 cpu_intr_alloc(cp, NINTR_THREADS);
452 452
453 453 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
454 454 cpu_set_state(cp);
455 455
456 456 /*
457 457 * Add CPU to list of available CPUs. It'll be on the active list
458 458 * after mp_startup_common().
459 459 */
460 460 cpu_add_unit(cp);
461 461
462 462 return (cp);
463 463 }
464 464
465 465 /*
466 466 * Undo what was done in mp_cpu_configure_common
467 467 */
468 468 static void
469 469 mp_cpu_unconfigure_common(struct cpu *cp, int error)
470 470 {
471 471 ASSERT(MUTEX_HELD(&cpu_lock));
472 472
473 473 /*
474 474 * Remove the CPU from the list of available CPUs.
475 475 */
476 476 cpu_del_unit(cp->cpu_id);
477 477
478 478 if (error == ETIMEDOUT) {
479 479 /*
480 480 * The cpu was started, but never *seemed* to run any
481 481 * code in the kernel; it's probably off spinning in its
482 482 * own private world, though with potential references to
483 483 * our kmem-allocated IDTs and GDTs (for example).
484 484 *
485 485 * Worse still, it may actually wake up some time later,
486 486 * so rather than guess what it might or might not do, we
487 487 * leave the fundamental data structures intact.
488 488 */
489 489 cp->cpu_flags = 0;
490 490 return;
491 491 }
492 492
493 493 /*
494 494 * At this point, the only threads bound to this CPU should
495 495 * special per-cpu threads: it's idle thread, it's pause threads,
496 496 * and it's interrupt threads. Clean these up.
497 497 */
498 498 cpu_destroy_bound_threads(cp);
499 499 cp->cpu_idle_thread = NULL;
500 500
501 501 /*
502 502 * Free the interrupt stack.
503 503 */
504 504 segkp_release(segkp,
505 505 cp->cpu_intr_stack - (INTR_STACK_SIZE - SA(MINFRAME)));
506 506 cp->cpu_intr_stack = NULL;
507 507
508 508 #ifdef TRAPTRACE
509 509 /*
510 510 * Discard the trap trace buffer
511 511 */
512 512 {
513 513 trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id];
514 514
515 515 kmem_free((void *)ttc->ttc_first, trap_trace_bufsize);
516 516 ttc->ttc_first = NULL;
517 517 }
518 518 #endif
519 519
520 520 hat_cpu_offline(cp);
521 521
522 522 ucode_free_space(cp);
523 523
524 524 /* Free CPU ID string and brand string. */
525 525 if (cp->cpu_idstr) {
526 526 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN);
527 527 cp->cpu_idstr = NULL;
528 528 }
529 529 if (cp->cpu_brandstr) {
530 530 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN);
531 531 cp->cpu_brandstr = NULL;
532 532 }
533 533
534 534 #if !defined(__xpv)
535 535 if (cp->cpu_m.mcpu_mwait != NULL) {
536 536 cpuid_mwait_free(cp);
537 537 cp->cpu_m.mcpu_mwait = NULL;
538 538 }
539 539 #endif
540 540 cpuid_free_space(cp);
541 541
542 542 if (cp->cpu_idt != CPU->cpu_idt)
543 543 kmem_free(cp->cpu_idt, PAGESIZE);
544 544 cp->cpu_idt = NULL;
545 545
546 546 kmem_free(cp->cpu_gdt, PAGESIZE);
547 547 cp->cpu_gdt = NULL;
548 548
549 549 if (cp->cpu_supp_freqs != NULL) {
550 550 size_t len = strlen(cp->cpu_supp_freqs) + 1;
551 551 kmem_free(cp->cpu_supp_freqs, len);
552 552 cp->cpu_supp_freqs = NULL;
553 553 }
554 554
555 555 teardown_vaddr_for_ppcopy(cp);
556 556
557 557 kcpc_hw_fini(cp);
558 558
559 559 cp->cpu_dispthread = NULL;
560 560 cp->cpu_thread = NULL; /* discarded by cpu_destroy_bound_threads() */
561 561
562 562 cpu_vm_data_destroy(cp);
563 563
564 564 xc_fini_cpu(cp);
565 565 disp_cpu_fini(cp);
566 566
567 567 ASSERT(cp != CPU0);
568 568 bzero(cp, sizeof (*cp));
569 569 cp->cpu_next_free = cpu_free_list;
570 570 cpu_free_list = cp;
571 571 }
572 572
573 573 /*
574 574 * Apply workarounds for known errata, and warn about those that are absent.
575 575 *
576 576 * System vendors occasionally create configurations which contain different
577 577 * revisions of the CPUs that are almost but not exactly the same. At the
578 578 * time of writing, this meant that their clock rates were the same, their
579 579 * feature sets were the same, but the required workaround were -not-
580 580 * necessarily the same. So, this routine is invoked on -every- CPU soon
581 581 * after starting to make sure that the resulting system contains the most
582 582 * pessimal set of workarounds needed to cope with *any* of the CPUs in the
583 583 * system.
584 584 *
585 585 * workaround_errata is invoked early in mlsetup() for CPU 0, and in
586 586 * mp_startup_common() for all slave CPUs. Slaves process workaround_errata
587 587 * prior to acknowledging their readiness to the master, so this routine will
588 588 * never be executed by multiple CPUs in parallel, thus making updates to
589 589 * global data safe.
590 590 *
591 591 * These workarounds are based on Rev 3.57 of the Revision Guide for
592 592 * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005.
593 593 */
594 594
595 595 #if defined(OPTERON_ERRATUM_88)
596 596 int opteron_erratum_88; /* if non-zero -> at least one cpu has it */
597 597 #endif
598 598
599 599 #if defined(OPTERON_ERRATUM_91)
600 600 int opteron_erratum_91; /* if non-zero -> at least one cpu has it */
601 601 #endif
602 602
603 603 #if defined(OPTERON_ERRATUM_93)
604 604 int opteron_erratum_93; /* if non-zero -> at least one cpu has it */
605 605 #endif
606 606
607 607 #if defined(OPTERON_ERRATUM_95)
608 608 int opteron_erratum_95; /* if non-zero -> at least one cpu has it */
609 609 #endif
610 610
611 611 #if defined(OPTERON_ERRATUM_100)
612 612 int opteron_erratum_100; /* if non-zero -> at least one cpu has it */
613 613 #endif
614 614
615 615 #if defined(OPTERON_ERRATUM_108)
616 616 int opteron_erratum_108; /* if non-zero -> at least one cpu has it */
617 617 #endif
618 618
619 619 #if defined(OPTERON_ERRATUM_109)
620 620 int opteron_erratum_109; /* if non-zero -> at least one cpu has it */
621 621 #endif
622 622
623 623 #if defined(OPTERON_ERRATUM_121)
624 624 int opteron_erratum_121; /* if non-zero -> at least one cpu has it */
625 625 #endif
626 626
627 627 #if defined(OPTERON_ERRATUM_122)
628 628 int opteron_erratum_122; /* if non-zero -> at least one cpu has it */
629 629 #endif
630 630
631 631 #if defined(OPTERON_ERRATUM_123)
632 632 int opteron_erratum_123; /* if non-zero -> at least one cpu has it */
633 633 #endif
634 634
635 635 #if defined(OPTERON_ERRATUM_131)
636 636 int opteron_erratum_131; /* if non-zero -> at least one cpu has it */
637 637 #endif
638 638
639 639 #if defined(OPTERON_WORKAROUND_6336786)
640 640 int opteron_workaround_6336786; /* non-zero -> WA relevant and applied */
641 641 int opteron_workaround_6336786_UP = 0; /* Not needed for UP */
642 642 #endif
643 643
644 644 #if defined(OPTERON_WORKAROUND_6323525)
645 645 int opteron_workaround_6323525; /* if non-zero -> at least one cpu has it */
646 646 #endif
647 647
648 648 #if defined(OPTERON_ERRATUM_298)
649 649 int opteron_erratum_298;
650 650 #endif
651 651
652 652 #if defined(OPTERON_ERRATUM_721)
653 653 int opteron_erratum_721;
654 654 #endif
655 655
656 656 static void
657 657 workaround_warning(cpu_t *cp, uint_t erratum)
658 658 {
659 659 cmn_err(CE_WARN, "cpu%d: no workaround for erratum %u",
660 660 cp->cpu_id, erratum);
661 661 }
662 662
663 663 static void
664 664 workaround_applied(uint_t erratum)
665 665 {
666 666 if (erratum > 1000000)
667 667 cmn_err(CE_CONT, "?workaround applied for cpu issue #%d\n",
668 668 erratum);
669 669 else
670 670 cmn_err(CE_CONT, "?workaround applied for cpu erratum #%d\n",
671 671 erratum);
672 672 }
673 673
674 674 static void
675 675 msr_warning(cpu_t *cp, const char *rw, uint_t msr, int error)
676 676 {
677 677 cmn_err(CE_WARN, "cpu%d: couldn't %smsr 0x%x, error %d",
678 678 cp->cpu_id, rw, msr, error);
679 679 }
680 680
681 681 /*
682 682 * Determine the number of nodes in a Hammer / Greyhound / Griffin family
683 683 * system.
684 684 */
685 685 static uint_t
686 686 opteron_get_nnodes(void)
687 687 {
688 688 static uint_t nnodes = 0;
689 689
690 690 if (nnodes == 0) {
691 691 #ifdef DEBUG
692 692 uint_t family;
693 693
694 694 /*
695 695 * This routine uses a PCI config space based mechanism
696 696 * for retrieving the number of nodes in the system.
697 697 * Device 24, function 0, offset 0x60 as used here is not
698 698 * AMD processor architectural, and may not work on processor
699 699 * families other than those listed below.
700 700 *
701 701 * Callers of this routine must ensure that we're running on
702 702 * a processor which supports this mechanism.
703 703 * The assertion below is meant to catch calls on unsupported
704 704 * processors.
705 705 */
706 706 family = cpuid_getfamily(CPU);
707 707 ASSERT(family == 0xf || family == 0x10 || family == 0x11);
708 708 #endif /* DEBUG */
709 709
710 710 /*
711 711 * Obtain the number of nodes in the system from
712 712 * bits [6:4] of the Node ID register on node 0.
713 713 *
714 714 * The actual node count is NodeID[6:4] + 1
715 715 *
716 716 * The Node ID register is accessed via function 0,
717 717 * offset 0x60. Node 0 is device 24.
718 718 */
719 719 nnodes = ((pci_getl_func(0, 24, 0, 0x60) & 0x70) >> 4) + 1;
720 720 }
721 721 return (nnodes);
722 722 }
723 723
724 724 uint_t
725 725 do_erratum_298(struct cpu *cpu)
726 726 {
727 727 static int osvwrc = -3;
728 728 extern int osvw_opteron_erratum(cpu_t *, uint_t);
729 729
730 730 /*
731 731 * L2 Eviction May Occur During Processor Operation To Set
732 732 * Accessed or Dirty Bit.
733 733 */
734 734 if (osvwrc == -3) {
735 735 osvwrc = osvw_opteron_erratum(cpu, 298);
736 736 } else {
737 737 /* osvw return codes should be consistent for all cpus */
738 738 ASSERT(osvwrc == osvw_opteron_erratum(cpu, 298));
739 739 }
740 740
741 741 switch (osvwrc) {
742 742 case 0: /* erratum is not present: do nothing */
743 743 break;
744 744 case 1: /* erratum is present: BIOS workaround applied */
745 745 /*
746 746 * check if workaround is actually in place and issue warning
747 747 * if not.
748 748 */
749 749 if (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) ||
750 750 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0)) {
751 751 #if defined(OPTERON_ERRATUM_298)
752 752 opteron_erratum_298++;
753 753 #else
754 754 workaround_warning(cpu, 298);
755 755 return (1);
756 756 #endif
757 757 }
758 758 break;
759 759 case -1: /* cannot determine via osvw: check cpuid */
760 760 if ((cpuid_opteron_erratum(cpu, 298) > 0) &&
761 761 (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) ||
762 762 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0))) {
763 763 #if defined(OPTERON_ERRATUM_298)
764 764 opteron_erratum_298++;
765 765 #else
766 766 workaround_warning(cpu, 298);
767 767 return (1);
768 768 #endif
769 769 }
770 770 break;
771 771 }
772 772 return (0);
773 773 }
774 774
775 775 uint_t
776 776 workaround_errata(struct cpu *cpu)
777 777 {
778 778 uint_t missing = 0;
779 779
780 780 ASSERT(cpu == CPU);
781 781
782 782 /*LINTED*/
783 783 if (cpuid_opteron_erratum(cpu, 88) > 0) {
784 784 /*
785 785 * SWAPGS May Fail To Read Correct GS Base
786 786 */
787 787 #if defined(OPTERON_ERRATUM_88)
788 788 /*
789 789 * The workaround is an mfence in the relevant assembler code
790 790 */
791 791 opteron_erratum_88++;
792 792 #else
793 793 workaround_warning(cpu, 88);
794 794 missing++;
795 795 #endif
796 796 }
797 797
798 798 if (cpuid_opteron_erratum(cpu, 91) > 0) {
799 799 /*
800 800 * Software Prefetches May Report A Page Fault
801 801 */
802 802 #if defined(OPTERON_ERRATUM_91)
803 803 /*
804 804 * fix is in trap.c
805 805 */
806 806 opteron_erratum_91++;
807 807 #else
808 808 workaround_warning(cpu, 91);
809 809 missing++;
810 810 #endif
811 811 }
812 812
813 813 if (cpuid_opteron_erratum(cpu, 93) > 0) {
814 814 /*
815 815 * RSM Auto-Halt Restart Returns to Incorrect RIP
816 816 */
817 817 #if defined(OPTERON_ERRATUM_93)
818 818 /*
819 819 * fix is in trap.c
820 820 */
821 821 opteron_erratum_93++;
822 822 #else
823 823 workaround_warning(cpu, 93);
824 824 missing++;
825 825 #endif
826 826 }
827 827
828 828 /*LINTED*/
829 829 if (cpuid_opteron_erratum(cpu, 95) > 0) {
830 830 /*
831 831 * RET Instruction May Return to Incorrect EIP
832 832 */
833 833 #if defined(OPTERON_ERRATUM_95)
834 834 #if defined(_LP64)
835 835 /*
836 836 * Workaround this by ensuring that 32-bit user code and
837 837 * 64-bit kernel code never occupy the same address
838 838 * range mod 4G.
839 839 */
840 840 if (_userlimit32 > 0xc0000000ul)
841 841 *(uintptr_t *)&_userlimit32 = 0xc0000000ul;
842 842
843 843 /*LINTED*/
844 844 ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u);
845 845 opteron_erratum_95++;
846 846 #endif /* _LP64 */
847 847 #else
848 848 workaround_warning(cpu, 95);
849 849 missing++;
850 850 #endif
851 851 }
852 852
853 853 if (cpuid_opteron_erratum(cpu, 100) > 0) {
854 854 /*
855 855 * Compatibility Mode Branches Transfer to Illegal Address
856 856 */
857 857 #if defined(OPTERON_ERRATUM_100)
858 858 /*
859 859 * fix is in trap.c
860 860 */
861 861 opteron_erratum_100++;
862 862 #else
863 863 workaround_warning(cpu, 100);
864 864 missing++;
865 865 #endif
866 866 }
867 867
868 868 /*LINTED*/
869 869 if (cpuid_opteron_erratum(cpu, 108) > 0) {
870 870 /*
871 871 * CPUID Instruction May Return Incorrect Model Number In
872 872 * Some Processors
873 873 */
874 874 #if defined(OPTERON_ERRATUM_108)
875 875 /*
876 876 * (Our cpuid-handling code corrects the model number on
877 877 * those processors)
878 878 */
879 879 #else
880 880 workaround_warning(cpu, 108);
881 881 missing++;
882 882 #endif
883 883 }
884 884
885 885 /*LINTED*/
886 886 if (cpuid_opteron_erratum(cpu, 109) > 0) do {
887 887 /*
888 888 * Certain Reverse REP MOVS May Produce Unpredictable Behavior
889 889 */
890 890 #if defined(OPTERON_ERRATUM_109)
891 891 /*
892 892 * The "workaround" is to print a warning to upgrade the BIOS
893 893 */
894 894 uint64_t value;
895 895 const uint_t msr = MSR_AMD_PATCHLEVEL;
896 896 int err;
897 897
898 898 if ((err = checked_rdmsr(msr, &value)) != 0) {
899 899 msr_warning(cpu, "rd", msr, err);
900 900 workaround_warning(cpu, 109);
901 901 missing++;
902 902 }
903 903 if (value == 0)
904 904 opteron_erratum_109++;
905 905 #else
906 906 workaround_warning(cpu, 109);
907 907 missing++;
908 908 #endif
909 909 /*CONSTANTCONDITION*/
910 910 } while (0);
911 911
912 912 /*LINTED*/
913 913 if (cpuid_opteron_erratum(cpu, 121) > 0) {
914 914 /*
915 915 * Sequential Execution Across Non_Canonical Boundary Caused
916 916 * Processor Hang
917 917 */
918 918 #if defined(OPTERON_ERRATUM_121)
919 919 #if defined(_LP64)
920 920 /*
921 921 * Erratum 121 is only present in long (64 bit) mode.
922 922 * Workaround is to include the page immediately before the
923 923 * va hole to eliminate the possibility of system hangs due to
924 924 * sequential execution across the va hole boundary.
925 925 */
926 926 if (opteron_erratum_121)
927 927 opteron_erratum_121++;
928 928 else {
929 929 if (hole_start) {
930 930 hole_start -= PAGESIZE;
931 931 } else {
932 932 /*
933 933 * hole_start not yet initialized by
934 934 * mmu_init. Initialize hole_start
935 935 * with value to be subtracted.
936 936 */
937 937 hole_start = PAGESIZE;
938 938 }
939 939 opteron_erratum_121++;
940 940 }
941 941 #endif /* _LP64 */
942 942 #else
943 943 workaround_warning(cpu, 121);
944 944 missing++;
945 945 #endif
946 946 }
947 947
948 948 /*LINTED*/
949 949 if (cpuid_opteron_erratum(cpu, 122) > 0) do {
950 950 /*
951 951 * TLB Flush Filter May Cause Coherency Problem in
952 952 * Multiprocessor Systems
953 953 */
954 954 #if defined(OPTERON_ERRATUM_122)
955 955 uint64_t value;
956 956 const uint_t msr = MSR_AMD_HWCR;
957 957 int error;
958 958
959 959 /*
960 960 * Erratum 122 is only present in MP configurations (multi-core
961 961 * or multi-processor).
962 962 */
963 963 #if defined(__xpv)
964 964 if (!DOMAIN_IS_INITDOMAIN(xen_info))
965 965 break;
966 966 if (!opteron_erratum_122 && xpv_nr_phys_cpus() == 1)
967 967 break;
968 968 #else
969 969 if (!opteron_erratum_122 && opteron_get_nnodes() == 1 &&
970 970 cpuid_get_ncpu_per_chip(cpu) == 1)
971 971 break;
972 972 #endif
973 973 /* disable TLB Flush Filter */
974 974
975 975 if ((error = checked_rdmsr(msr, &value)) != 0) {
976 976 msr_warning(cpu, "rd", msr, error);
977 977 workaround_warning(cpu, 122);
978 978 missing++;
979 979 } else {
980 980 value |= (uint64_t)AMD_HWCR_FFDIS;
981 981 if ((error = checked_wrmsr(msr, value)) != 0) {
982 982 msr_warning(cpu, "wr", msr, error);
983 983 workaround_warning(cpu, 122);
984 984 missing++;
985 985 }
986 986 }
987 987 opteron_erratum_122++;
988 988 #else
989 989 workaround_warning(cpu, 122);
990 990 missing++;
991 991 #endif
992 992 /*CONSTANTCONDITION*/
993 993 } while (0);
994 994
995 995 /*LINTED*/
996 996 if (cpuid_opteron_erratum(cpu, 123) > 0) do {
997 997 /*
998 998 * Bypassed Reads May Cause Data Corruption of System Hang in
999 999 * Dual Core Processors
1000 1000 */
1001 1001 #if defined(OPTERON_ERRATUM_123)
1002 1002 uint64_t value;
1003 1003 const uint_t msr = MSR_AMD_PATCHLEVEL;
1004 1004 int err;
1005 1005
1006 1006 /*
1007 1007 * Erratum 123 applies only to multi-core cpus.
1008 1008 */
1009 1009 if (cpuid_get_ncpu_per_chip(cpu) < 2)
1010 1010 break;
1011 1011 #if defined(__xpv)
1012 1012 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1013 1013 break;
1014 1014 #endif
1015 1015 /*
1016 1016 * The "workaround" is to print a warning to upgrade the BIOS
1017 1017 */
1018 1018 if ((err = checked_rdmsr(msr, &value)) != 0) {
1019 1019 msr_warning(cpu, "rd", msr, err);
1020 1020 workaround_warning(cpu, 123);
1021 1021 missing++;
1022 1022 }
1023 1023 if (value == 0)
1024 1024 opteron_erratum_123++;
1025 1025 #else
1026 1026 workaround_warning(cpu, 123);
1027 1027 missing++;
1028 1028
1029 1029 #endif
1030 1030 /*CONSTANTCONDITION*/
1031 1031 } while (0);
1032 1032
1033 1033 /*LINTED*/
1034 1034 if (cpuid_opteron_erratum(cpu, 131) > 0) do {
1035 1035 /*
1036 1036 * Multiprocessor Systems with Four or More Cores May Deadlock
1037 1037 * Waiting for a Probe Response
1038 1038 */
1039 1039 #if defined(OPTERON_ERRATUM_131)
1040 1040 uint64_t nbcfg;
1041 1041 const uint_t msr = MSR_AMD_NB_CFG;
1042 1042 const uint64_t wabits =
1043 1043 AMD_NB_CFG_SRQ_HEARTBEAT | AMD_NB_CFG_SRQ_SPR;
1044 1044 int error;
1045 1045
1046 1046 /*
1047 1047 * Erratum 131 applies to any system with four or more cores.
1048 1048 */
1049 1049 if (opteron_erratum_131)
1050 1050 break;
1051 1051 #if defined(__xpv)
1052 1052 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1053 1053 break;
1054 1054 if (xpv_nr_phys_cpus() < 4)
1055 1055 break;
1056 1056 #else
1057 1057 if (opteron_get_nnodes() * cpuid_get_ncpu_per_chip(cpu) < 4)
1058 1058 break;
1059 1059 #endif
1060 1060 /*
1061 1061 * Print a warning if neither of the workarounds for
1062 1062 * erratum 131 is present.
1063 1063 */
1064 1064 if ((error = checked_rdmsr(msr, &nbcfg)) != 0) {
1065 1065 msr_warning(cpu, "rd", msr, error);
1066 1066 workaround_warning(cpu, 131);
1067 1067 missing++;
1068 1068 } else if ((nbcfg & wabits) == 0) {
1069 1069 opteron_erratum_131++;
1070 1070 } else {
1071 1071 /* cannot have both workarounds set */
1072 1072 ASSERT((nbcfg & wabits) != wabits);
1073 1073 }
1074 1074 #else
1075 1075 workaround_warning(cpu, 131);
1076 1076 missing++;
1077 1077 #endif
1078 1078 /*CONSTANTCONDITION*/
1079 1079 } while (0);
1080 1080
1081 1081 /*
1082 1082 * This isn't really an erratum, but for convenience the
1083 1083 * detection/workaround code lives here and in cpuid_opteron_erratum.
1084 1084 */
1085 1085 if (cpuid_opteron_erratum(cpu, 6336786) > 0) {
1086 1086 #if defined(OPTERON_WORKAROUND_6336786)
1087 1087 /*
1088 1088 * Disable C1-Clock ramping on multi-core/multi-processor
1089 1089 * K8 platforms to guard against TSC drift.
1090 1090 */
1091 1091 if (opteron_workaround_6336786) {
1092 1092 opteron_workaround_6336786++;
1093 1093 #if defined(__xpv)
1094 1094 } else if ((DOMAIN_IS_INITDOMAIN(xen_info) &&
1095 1095 xpv_nr_phys_cpus() > 1) ||
1096 1096 opteron_workaround_6336786_UP) {
1097 1097 /*
1098 1098 * XXPV Hmm. We can't walk the Northbridges on
1099 1099 * the hypervisor; so just complain and drive
1100 1100 * on. This probably needs to be fixed in
1101 1101 * the hypervisor itself.
1102 1102 */
1103 1103 opteron_workaround_6336786++;
1104 1104 workaround_warning(cpu, 6336786);
1105 1105 #else /* __xpv */
1106 1106 } else if ((opteron_get_nnodes() *
1107 1107 cpuid_get_ncpu_per_chip(cpu) > 1) ||
1108 1108 opteron_workaround_6336786_UP) {
1109 1109
1110 1110 uint_t node, nnodes;
1111 1111 uint8_t data;
1112 1112
1113 1113 nnodes = opteron_get_nnodes();
1114 1114 for (node = 0; node < nnodes; node++) {
1115 1115 /*
1116 1116 * Clear PMM7[1:0] (function 3, offset 0x87)
1117 1117 * Northbridge device is the node id + 24.
1118 1118 */
1119 1119 data = pci_getb_func(0, node + 24, 3, 0x87);
1120 1120 data &= 0xFC;
1121 1121 pci_putb_func(0, node + 24, 3, 0x87, data);
1122 1122 }
1123 1123 opteron_workaround_6336786++;
1124 1124 #endif /* __xpv */
1125 1125 }
1126 1126 #else
1127 1127 workaround_warning(cpu, 6336786);
1128 1128 missing++;
1129 1129 #endif
1130 1130 }
1131 1131
1132 1132 /*LINTED*/
1133 1133 /*
1134 1134 * Mutex primitives don't work as expected.
1135 1135 */
1136 1136 if (cpuid_opteron_erratum(cpu, 6323525) > 0) {
1137 1137 #if defined(OPTERON_WORKAROUND_6323525)
1138 1138 /*
1139 1139 * This problem only occurs with 2 or more cores. If bit in
1140 1140 * MSR_AMD_BU_CFG set, then not applicable. The workaround
1141 1141 * is to patch the semaphone routines with the lfence
1142 1142 * instruction to provide necessary load memory barrier with
1143 1143 * possible subsequent read-modify-write ops.
1144 1144 *
1145 1145 * It is too early in boot to call the patch routine so
1146 1146 * set erratum variable to be done in startup_end().
1147 1147 */
1148 1148 if (opteron_workaround_6323525) {
1149 1149 opteron_workaround_6323525++;
1150 1150 #if defined(__xpv)
1151 1151 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2)) {
1152 1152 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1153 1153 /*
1154 1154 * XXPV Use dom0_msr here when extended
1155 1155 * operations are supported?
1156 1156 */
1157 1157 if (xpv_nr_phys_cpus() > 1)
1158 1158 opteron_workaround_6323525++;
1159 1159 } else {
1160 1160 /*
1161 1161 * We have no way to tell how many physical
1162 1162 * cpus there are, or even if this processor
1163 1163 * has the problem, so enable the workaround
1164 1164 * unconditionally (at some performance cost).
1165 1165 */
1166 1166 opteron_workaround_6323525++;
1167 1167 }
1168 1168 #else /* __xpv */
1169 1169 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2) &&
1170 1170 ((opteron_get_nnodes() *
1171 1171 cpuid_get_ncpu_per_chip(cpu)) > 1)) {
1172 1172 if ((xrdmsr(MSR_AMD_BU_CFG) & (UINT64_C(1) << 33)) == 0)
1173 1173 opteron_workaround_6323525++;
1174 1174 #endif /* __xpv */
1175 1175 }
1176 1176 #else
1177 1177 workaround_warning(cpu, 6323525);
1178 1178 missing++;
1179 1179 #endif
1180 1180 }
1181 1181
1182 1182 missing += do_erratum_298(cpu);
1183 1183
1184 1184 if (cpuid_opteron_erratum(cpu, 721) > 0) {
1185 1185 #if defined(OPTERON_ERRATUM_721)
1186 1186 on_trap_data_t otd;
1187 1187
1188 1188 if (!on_trap(&otd, OT_DATA_ACCESS))
1189 1189 wrmsr(MSR_AMD_DE_CFG,
1190 1190 rdmsr(MSR_AMD_DE_CFG) | AMD_DE_CFG_E721);
1191 1191 no_trap();
1192 1192
1193 1193 opteron_erratum_721++;
1194 1194 #else
1195 1195 workaround_warning(cpu, 721);
1196 1196 missing++;
1197 1197 #endif
1198 1198 }
1199 1199
1200 1200 #ifdef __xpv
1201 1201 return (0);
1202 1202 #else
1203 1203 return (missing);
1204 1204 #endif
1205 1205 }
1206 1206
1207 1207 void
1208 1208 workaround_errata_end()
1209 1209 {
1210 1210 #if defined(OPTERON_ERRATUM_88)
1211 1211 if (opteron_erratum_88)
1212 1212 workaround_applied(88);
1213 1213 #endif
1214 1214 #if defined(OPTERON_ERRATUM_91)
1215 1215 if (opteron_erratum_91)
1216 1216 workaround_applied(91);
1217 1217 #endif
1218 1218 #if defined(OPTERON_ERRATUM_93)
1219 1219 if (opteron_erratum_93)
1220 1220 workaround_applied(93);
1221 1221 #endif
1222 1222 #if defined(OPTERON_ERRATUM_95)
1223 1223 if (opteron_erratum_95)
1224 1224 workaround_applied(95);
1225 1225 #endif
1226 1226 #if defined(OPTERON_ERRATUM_100)
1227 1227 if (opteron_erratum_100)
1228 1228 workaround_applied(100);
1229 1229 #endif
1230 1230 #if defined(OPTERON_ERRATUM_108)
1231 1231 if (opteron_erratum_108)
1232 1232 workaround_applied(108);
1233 1233 #endif
1234 1234 #if defined(OPTERON_ERRATUM_109)
1235 1235 if (opteron_erratum_109) {
1236 1236 cmn_err(CE_WARN,
1237 1237 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1238 1238 " processor\nerratum 109 was not detected; updating your"
1239 1239 " system's BIOS to a version\ncontaining this"
1240 1240 " microcode patch is HIGHLY recommended or erroneous"
1241 1241 " system\noperation may occur.\n");
1242 1242 }
1243 1243 #endif
1244 1244 #if defined(OPTERON_ERRATUM_121)
1245 1245 if (opteron_erratum_121)
1246 1246 workaround_applied(121);
1247 1247 #endif
1248 1248 #if defined(OPTERON_ERRATUM_122)
1249 1249 if (opteron_erratum_122)
1250 1250 workaround_applied(122);
1251 1251 #endif
1252 1252 #if defined(OPTERON_ERRATUM_123)
1253 1253 if (opteron_erratum_123) {
1254 1254 cmn_err(CE_WARN,
1255 1255 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1256 1256 " processor\nerratum 123 was not detected; updating your"
1257 1257 " system's BIOS to a version\ncontaining this"
1258 1258 " microcode patch is HIGHLY recommended or erroneous"
1259 1259 " system\noperation may occur.\n");
1260 1260 }
1261 1261 #endif
1262 1262 #if defined(OPTERON_ERRATUM_131)
1263 1263 if (opteron_erratum_131) {
1264 1264 cmn_err(CE_WARN,
1265 1265 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1266 1266 " processor\nerratum 131 was not detected; updating your"
1267 1267 " system's BIOS to a version\ncontaining this"
1268 1268 " microcode patch is HIGHLY recommended or erroneous"
1269 1269 " system\noperation may occur.\n");
1270 1270 }
1271 1271 #endif
1272 1272 #if defined(OPTERON_WORKAROUND_6336786)
1273 1273 if (opteron_workaround_6336786)
1274 1274 workaround_applied(6336786);
1275 1275 #endif
1276 1276 #if defined(OPTERON_WORKAROUND_6323525)
1277 1277 if (opteron_workaround_6323525)
1278 1278 workaround_applied(6323525);
1279 1279 #endif
1280 1280 #if defined(OPTERON_ERRATUM_298)
1281 1281 if (opteron_erratum_298) {
1282 1282 cmn_err(CE_WARN,
1283 1283 "BIOS microcode patch for AMD 64/Opteron(tm)"
1284 1284 " processor\nerratum 298 was not detected; updating your"
1285 1285 " system's BIOS to a version\ncontaining this"
1286 1286 " microcode patch is HIGHLY recommended or erroneous"
1287 1287 " system\noperation may occur.\n");
1288 1288 }
1289 1289 #endif
1290 1290 #if defined(OPTERON_ERRATUM_721)
1291 1291 if (opteron_erratum_721)
1292 1292 workaround_applied(721);
1293 1293 #endif
1294 1294 }
1295 1295
1296 1296 /*
1297 1297 * The procset_slave and procset_master are used to synchronize
1298 1298 * between the control CPU and the target CPU when starting CPUs.
1299 1299 */
1300 1300 static cpuset_t procset_slave, procset_master;
1301 1301
1302 1302 static void
1303 1303 mp_startup_wait(cpuset_t *sp, processorid_t cpuid)
1304 1304 {
1305 1305 cpuset_t tempset;
1306 1306
1307 1307 for (tempset = *sp; !CPU_IN_SET(tempset, cpuid);
1308 1308 tempset = *(volatile cpuset_t *)sp) {
1309 1309 SMT_PAUSE();
1310 1310 }
1311 1311 CPUSET_ATOMIC_DEL(*(cpuset_t *)sp, cpuid);
1312 1312 }
1313 1313
1314 1314 static void
1315 1315 mp_startup_signal(cpuset_t *sp, processorid_t cpuid)
1316 1316 {
1317 1317 cpuset_t tempset;
1318 1318
1319 1319 CPUSET_ATOMIC_ADD(*(cpuset_t *)sp, cpuid);
1320 1320 for (tempset = *sp; CPU_IN_SET(tempset, cpuid);
1321 1321 tempset = *(volatile cpuset_t *)sp) {
1322 1322 SMT_PAUSE();
1323 1323 }
1324 1324 }
1325 1325
1326 1326 int
1327 1327 mp_start_cpu_common(cpu_t *cp, boolean_t boot)
1328 1328 {
1329 1329 _NOTE(ARGUNUSED(boot));
1330 1330
1331 1331 void *ctx;
1332 1332 int delays;
1333 1333 int error = 0;
1334 1334 cpuset_t tempset;
1335 1335 processorid_t cpuid;
1336 1336 #ifndef __xpv
1337 1337 extern void cpupm_init(cpu_t *);
1338 1338 #endif
1339 1339
1340 1340 ASSERT(cp != NULL);
1341 1341 cpuid = cp->cpu_id;
1342 1342 ctx = mach_cpucontext_alloc(cp);
1343 1343 if (ctx == NULL) {
1344 1344 cmn_err(CE_WARN,
1345 1345 "cpu%d: failed to allocate context", cp->cpu_id);
1346 1346 return (EAGAIN);
1347 1347 }
1348 1348 error = mach_cpu_start(cp, ctx);
1349 1349 if (error != 0) {
1350 1350 cmn_err(CE_WARN,
1351 1351 "cpu%d: failed to start, error %d", cp->cpu_id, error);
1352 1352 mach_cpucontext_free(cp, ctx, error);
1353 1353 return (error);
1354 1354 }
1355 1355
1356 1356 for (delays = 0, tempset = procset_slave; !CPU_IN_SET(tempset, cpuid);
1357 1357 delays++) {
1358 1358 if (delays == 500) {
1359 1359 /*
1360 1360 * After five seconds, things are probably looking
1361 1361 * a bit bleak - explain the hang.
1362 1362 */
1363 1363 cmn_err(CE_NOTE, "cpu%d: started, "
1364 1364 "but not running in the kernel yet", cpuid);
1365 1365 } else if (delays > 2000) {
1366 1366 /*
1367 1367 * We waited at least 20 seconds, bail ..
1368 1368 */
1369 1369 error = ETIMEDOUT;
1370 1370 cmn_err(CE_WARN, "cpu%d: timed out", cpuid);
1371 1371 mach_cpucontext_free(cp, ctx, error);
1372 1372 return (error);
1373 1373 }
1374 1374
1375 1375 /*
1376 1376 * wait at least 10ms, then check again..
1377 1377 */
1378 1378 delay(USEC_TO_TICK_ROUNDUP(10000));
1379 1379 tempset = *((volatile cpuset_t *)&procset_slave);
1380 1380 }
1381 1381 CPUSET_ATOMIC_DEL(procset_slave, cpuid);
1382 1382
1383 1383 mach_cpucontext_free(cp, ctx, 0);
1384 1384
1385 1385 #ifndef __xpv
1386 1386 if (tsc_gethrtime_enable)
1387 1387 tsc_sync_master(cpuid);
1388 1388 #endif
1389 1389
1390 1390 if (dtrace_cpu_init != NULL) {
1391 1391 (*dtrace_cpu_init)(cpuid);
1392 1392 }
1393 1393
1394 1394 /*
1395 1395 * During CPU DR operations, the cpu_lock is held by current
1396 1396 * (the control) thread. We can't release the cpu_lock here
1397 1397 * because that will break the CPU DR logic.
1398 1398 * On the other hand, CPUPM and processor group initialization
1399 1399 * routines need to access the cpu_lock. So we invoke those
1400 1400 * routines here on behalf of mp_startup_common().
1401 1401 *
1402 1402 * CPUPM and processor group initialization routines depend
1403 1403 * on the cpuid probing results. Wait for mp_startup_common()
1404 1404 * to signal that cpuid probing is done.
1405 1405 */
1406 1406 mp_startup_wait(&procset_slave, cpuid);
1407 1407 #ifndef __xpv
1408 1408 cpupm_init(cp);
1409 1409 #endif
1410 1410 (void) pg_cpu_init(cp, B_FALSE);
1411 1411 cpu_set_state(cp);
1412 1412 mp_startup_signal(&procset_master, cpuid);
1413 1413
1414 1414 return (0);
1415 1415 }
1416 1416
1417 1417 /*
1418 1418 * Start a single cpu, assuming that the kernel context is available
1419 1419 * to successfully start another cpu.
1420 1420 *
1421 1421 * (For example, real mode code is mapped into the right place
1422 1422 * in memory and is ready to be run.)
1423 1423 */
1424 1424 int
1425 1425 start_cpu(processorid_t who)
1426 1426 {
1427 1427 cpu_t *cp;
1428 1428 int error = 0;
1429 1429 cpuset_t tempset;
1430 1430
1431 1431 ASSERT(who != 0);
1432 1432
1433 1433 /*
1434 1434 * Check if there's at least a Mbyte of kmem available
1435 1435 * before attempting to start the cpu.
1436 1436 */
1437 1437 if (kmem_avail() < 1024 * 1024) {
1438 1438 /*
1439 1439 * Kick off a reap in case that helps us with
1440 1440 * later attempts ..
1441 1441 */
1442 1442 kmem_reap();
1443 1443 return (ENOMEM);
1444 1444 }
1445 1445
1446 1446 /*
1447 1447 * First configure cpu.
1448 1448 */
1449 1449 cp = mp_cpu_configure_common(who, B_TRUE);
1450 1450 ASSERT(cp != NULL);
1451 1451
1452 1452 /*
1453 1453 * Then start cpu.
1454 1454 */
1455 1455 error = mp_start_cpu_common(cp, B_TRUE);
1456 1456 if (error != 0) {
1457 1457 mp_cpu_unconfigure_common(cp, error);
1458 1458 return (error);
1459 1459 }
1460 1460
1461 1461 mutex_exit(&cpu_lock);
1462 1462 tempset = cpu_ready_set;
1463 1463 while (!CPU_IN_SET(tempset, who)) {
1464 1464 drv_usecwait(1);
1465 1465 tempset = *((volatile cpuset_t *)&cpu_ready_set);
1466 1466 }
1467 1467 mutex_enter(&cpu_lock);
1468 1468
1469 1469 return (0);
1470 1470 }
1471 1471
1472 1472 void
1473 1473 start_other_cpus(int cprboot)
1474 1474 {
1475 1475 _NOTE(ARGUNUSED(cprboot));
1476 1476
1477 1477 uint_t who;
1478 1478 uint_t bootcpuid = 0;
1479 1479
1480 1480 /*
1481 1481 * Initialize our own cpu_info.
1482 1482 */
1483 1483 init_cpu_info(CPU);
1484 1484
1485 1485 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_idstr);
1486 1486 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_brandstr);
1487 1487
1488 1488 /*
1489 1489 * Initialize our syscall handlers
1490 1490 */
1491 1491 init_cpu_syscall(CPU);
1492 1492
1493 1493 /*
1494 1494 * Take the boot cpu out of the mp_cpus set because we know
1495 1495 * it's already running. Add it to the cpu_ready_set for
1496 1496 * precisely the same reason.
1497 1497 */
1498 1498 CPUSET_DEL(mp_cpus, bootcpuid);
1499 1499 CPUSET_ADD(cpu_ready_set, bootcpuid);
1500 1500
1501 1501 /*
1502 1502 * skip the rest of this if
1503 1503 * . only 1 cpu dectected and system isn't hotplug-capable
1504 1504 * . not using MP
1505 1505 */
1506 1506 if ((CPUSET_ISNULL(mp_cpus) && plat_dr_support_cpu() == 0) ||
1507 1507 use_mp == 0) {
1508 1508 if (use_mp == 0)
1509 1509 cmn_err(CE_CONT, "?***** Not in MP mode\n");
1510 1510 goto done;
1511 1511 }
1512 1512
1513 1513 /*
1514 1514 * perform such initialization as is needed
1515 1515 * to be able to take CPUs on- and off-line.
1516 1516 */
1517 1517 cpu_pause_init();
1518 1518
1519 1519 xc_init_cpu(CPU); /* initialize processor crosscalls */
1520 1520
1521 1521 if (mach_cpucontext_init() != 0)
1522 1522 goto done;
1523 1523
1524 1524 flushes_require_xcalls = 1;
1525 1525
1526 1526 /*
1527 1527 * We lock our affinity to the master CPU to ensure that all slave CPUs
1528 1528 * do their TSC syncs with the same CPU.
1529 1529 */
1530 1530 affinity_set(CPU_CURRENT);
1531 1531
1532 1532 for (who = 0; who < NCPU; who++) {
1533 1533 if (!CPU_IN_SET(mp_cpus, who))
1534 1534 continue;
1535 1535 ASSERT(who != bootcpuid);
1536 1536
1537 1537 mutex_enter(&cpu_lock);
1538 1538 if (start_cpu(who) != 0)
1539 1539 CPUSET_DEL(mp_cpus, who);
1540 1540 cpu_state_change_notify(who, CPU_SETUP);
1541 1541 mutex_exit(&cpu_lock);
1542 1542 }
1543 1543
1544 1544 /* Free the space allocated to hold the microcode file */
1545 1545 ucode_cleanup();
1546 1546
1547 1547 affinity_clear();
1548 1548
1549 1549 mach_cpucontext_fini();
1550 1550
1551 1551 done:
1552 1552 if (get_hwenv() == HW_NATIVE)
1553 1553 workaround_errata_end();
1554 1554 cmi_post_mpstartup();
1555 1555
1556 1556 if (use_mp && ncpus != boot_max_ncpus) {
1557 1557 cmn_err(CE_NOTE,
1558 1558 "System detected %d cpus, but "
1559 1559 "only %d cpu(s) were enabled during boot.",
1560 1560 boot_max_ncpus, ncpus);
1561 1561 cmn_err(CE_NOTE,
1562 1562 "Use \"boot-ncpus\" parameter to enable more CPU(s). "
1563 1563 "See eeprom(1M).");
1564 1564 }
1565 1565 }
1566 1566
1567 1567 int
1568 1568 mp_cpu_configure(int cpuid)
1569 1569 {
1570 1570 cpu_t *cp;
1571 1571
1572 1572 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
1573 1573 return (ENOTSUP);
1574 1574 }
1575 1575
1576 1576 cp = cpu_get(cpuid);
1577 1577 if (cp != NULL) {
1578 1578 return (EALREADY);
1579 1579 }
1580 1580
1581 1581 /*
1582 1582 * Check if there's at least a Mbyte of kmem available
1583 1583 * before attempting to start the cpu.
1584 1584 */
1585 1585 if (kmem_avail() < 1024 * 1024) {
1586 1586 /*
1587 1587 * Kick off a reap in case that helps us with
1588 1588 * later attempts ..
1589 1589 */
1590 1590 kmem_reap();
1591 1591 return (ENOMEM);
1592 1592 }
1593 1593
1594 1594 cp = mp_cpu_configure_common(cpuid, B_FALSE);
1595 1595 ASSERT(cp != NULL && cpu_get(cpuid) == cp);
1596 1596
1597 1597 return (cp != NULL ? 0 : EAGAIN);
1598 1598 }
1599 1599
1600 1600 int
1601 1601 mp_cpu_unconfigure(int cpuid)
1602 1602 {
1603 1603 cpu_t *cp;
1604 1604
1605 1605 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
1606 1606 return (ENOTSUP);
1607 1607 } else if (cpuid < 0 || cpuid >= max_ncpus) {
1608 1608 return (EINVAL);
1609 1609 }
1610 1610
1611 1611 cp = cpu_get(cpuid);
1612 1612 if (cp == NULL) {
1613 1613 return (ENODEV);
1614 1614 }
1615 1615 mp_cpu_unconfigure_common(cp, 0);
1616 1616
1617 1617 return (0);
1618 1618 }
1619 1619
1620 1620 /*
1621 1621 * Startup function for 'other' CPUs (besides boot cpu).
1622 1622 * Called from real_mode_start.
1623 1623 *
1624 1624 * WARNING: until CPU_READY is set, mp_startup_common and routines called by
1625 1625 * mp_startup_common should not call routines (e.g. kmem_free) that could call
1626 1626 * hat_unload which requires CPU_READY to be set.
1627 1627 */
1628 1628 static void
1629 1629 mp_startup_common(boolean_t boot)
1630 1630 {
1631 1631 cpu_t *cp = CPU;
1632 1632 uchar_t new_x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
1633 1633 extern void cpu_event_init_cpu(cpu_t *);
1634 1634
1635 1635 /*
1636 1636 * We need to get TSC on this proc synced (i.e., any delta
1637 1637 * from cpu0 accounted for) as soon as we can, because many
1638 1638 * many things use gethrtime/pc_gethrestime, including
1639 1639 * interrupts, cmn_err, etc. Before we can do that, we want to
1640 1640 * clear TSC if we're on a buggy Sandy/Ivy Bridge CPU, so do that
1641 1641 * right away.
1642 1642 */
1643 1643 bzero(new_x86_featureset, BT_SIZEOFMAP(NUM_X86_FEATURES));
1644 1644 cpuid_pass1(cp, new_x86_featureset);
1645 1645
1646 1646 if (boot && get_hwenv() == HW_NATIVE &&
1647 1647 cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
1648 1648 cpuid_getfamily(CPU) == 6 &&
1649 1649 (cpuid_getmodel(CPU) == 0x2d || cpuid_getmodel(CPU) == 0x3e) &&
1650 1650 is_x86_feature(new_x86_featureset, X86FSET_TSC)) {
1651 1651 (void) wrmsr(REG_TSC, 0UL);
1652 1652 }
1653 1653
1654 1654 /* Let the control CPU continue into tsc_sync_master() */
1655 1655 mp_startup_signal(&procset_slave, cp->cpu_id);
1656 1656
1657 1657 #ifndef __xpv
1658 1658 if (tsc_gethrtime_enable)
1659 1659 tsc_sync_slave();
1660 1660 #endif
1661 1661
1662 1662 /*
1663 1663 * Once this was done from assembly, but it's safer here; if
↓ open down ↓ |
1663 lines elided |
↑ open up ↑ |
1664 1664 * it blocks, we need to be able to swtch() to and from, and
1665 1665 * since we get here by calling t_pc, we need to do that call
1666 1666 * before swtch() overwrites it.
1667 1667 */
1668 1668 (void) (*ap_mlsetup)();
1669 1669
1670 1670 #ifndef __xpv
1671 1671 /*
1672 1672 * Program this cpu's PAT
1673 1673 */
1674 - if (is_x86_feature(x86_featureset, X86FSET_PAT))
1675 - pat_sync();
1674 + pat_sync();
1676 1675 #endif
1677 1676
1678 1677 /*
1679 1678 * Set up TSC_AUX to contain the cpuid for this processor
1680 1679 * for the rdtscp instruction.
1681 1680 */
1682 1681 if (is_x86_feature(x86_featureset, X86FSET_TSCP))
1683 1682 (void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id);
1684 1683
1685 1684 /*
1686 1685 * Initialize this CPU's syscall handlers
1687 1686 */
1688 1687 init_cpu_syscall(cp);
1689 1688
1690 1689 /*
1691 1690 * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the
1692 1691 * highest level at which a routine is permitted to block on
1693 1692 * an adaptive mutex (allows for cpu poke interrupt in case
1694 1693 * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks
1695 1694 * device interrupts that may end up in the hat layer issuing cross
1696 1695 * calls before CPU_READY is set.
1697 1696 */
1698 1697 splx(ipltospl(LOCK_LEVEL));
1699 1698 sti();
1700 1699
1701 1700 /*
1702 1701 * Do a sanity check to make sure this new CPU is a sane thing
1703 1702 * to add to the collection of processors running this system.
1704 1703 *
1705 1704 * XXX Clearly this needs to get more sophisticated, if x86
1706 1705 * systems start to get built out of heterogenous CPUs; as is
1707 1706 * likely to happen once the number of processors in a configuration
1708 1707 * gets large enough.
1709 1708 */
1710 1709 if (compare_x86_featureset(x86_featureset, new_x86_featureset) ==
1711 1710 B_FALSE) {
1712 1711 cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
1713 1712 print_x86_featureset(new_x86_featureset);
1714 1713 cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
1715 1714 }
1716 1715
1717 1716 /*
1718 1717 * We do not support cpus with mixed monitor/mwait support if the
1719 1718 * boot cpu supports monitor/mwait.
1720 1719 */
1721 1720 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) !=
1722 1721 is_x86_feature(new_x86_featureset, X86FSET_MWAIT))
1723 1722 panic("unsupported mixed cpu monitor/mwait support detected");
1724 1723
1725 1724 /*
1726 1725 * We could be more sophisticated here, and just mark the CPU
1727 1726 * as "faulted" but at this point we'll opt for the easier
1728 1727 * answer of dying horribly. Provided the boot cpu is ok,
1729 1728 * the system can be recovered by booting with use_mp set to zero.
1730 1729 */
1731 1730 if (workaround_errata(cp) != 0)
1732 1731 panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
1733 1732
1734 1733 /*
1735 1734 * We can touch cpu_flags here without acquiring the cpu_lock here
1736 1735 * because the cpu_lock is held by the control CPU which is running
1737 1736 * mp_start_cpu_common().
1738 1737 * Need to clear CPU_QUIESCED flag before calling any function which
1739 1738 * may cause thread context switching, such as kmem_alloc() etc.
1740 1739 * The idle thread checks for CPU_QUIESCED flag and loops for ever if
1741 1740 * it's set. So the startup thread may have no chance to switch back
1742 1741 * again if it's switched away with CPU_QUIESCED set.
1743 1742 */
1744 1743 cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED);
1745 1744
1746 1745 /*
1747 1746 * Setup this processor for XSAVE.
1748 1747 */
1749 1748 if (fp_save_mech == FP_XSAVE) {
1750 1749 xsave_setup_msr(cp);
1751 1750 }
1752 1751
1753 1752 cpuid_pass2(cp);
1754 1753 cpuid_pass3(cp);
1755 1754 cpuid_pass4(cp, NULL);
1756 1755
1757 1756 /*
1758 1757 * Correct cpu_idstr and cpu_brandstr on target CPU after
1759 1758 * cpuid_pass1() is done.
1760 1759 */
1761 1760 (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN);
1762 1761 (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN);
1763 1762
1764 1763 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS;
1765 1764
1766 1765 post_startup_cpu_fixups();
1767 1766
1768 1767 cpu_event_init_cpu(cp);
1769 1768
1770 1769 /*
1771 1770 * Enable preemption here so that contention for any locks acquired
1772 1771 * later in mp_startup_common may be preempted if the thread owning
1773 1772 * those locks is continuously executing on other CPUs (for example,
1774 1773 * this CPU must be preemptible to allow other CPUs to pause it during
1775 1774 * their startup phases). It's safe to enable preemption here because
1776 1775 * the CPU state is pretty-much fully constructed.
1777 1776 */
1778 1777 curthread->t_preempt = 0;
1779 1778
1780 1779 /* The base spl should still be at LOCK LEVEL here */
1781 1780 ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL));
1782 1781 set_base_spl(); /* Restore the spl to its proper value */
1783 1782
1784 1783 pghw_physid_create(cp);
1785 1784 /*
1786 1785 * Delegate initialization tasks, which need to access the cpu_lock,
1787 1786 * to mp_start_cpu_common() because we can't acquire the cpu_lock here
1788 1787 * during CPU DR operations.
1789 1788 */
1790 1789 mp_startup_signal(&procset_slave, cp->cpu_id);
1791 1790 mp_startup_wait(&procset_master, cp->cpu_id);
1792 1791 pg_cmt_cpu_startup(cp);
1793 1792
1794 1793 if (boot) {
1795 1794 mutex_enter(&cpu_lock);
1796 1795 cp->cpu_flags &= ~CPU_OFFLINE;
1797 1796 cpu_enable_intr(cp);
1798 1797 cpu_add_active(cp);
1799 1798 mutex_exit(&cpu_lock);
1800 1799 }
1801 1800
1802 1801 /* Enable interrupts */
1803 1802 (void) spl0();
1804 1803
1805 1804 /*
1806 1805 * Fill out cpu_ucode_info. Update microcode if necessary.
1807 1806 */
1808 1807 ucode_check(cp);
1809 1808
1810 1809 #ifndef __xpv
1811 1810 {
1812 1811 /*
1813 1812 * Set up the CPU module for this CPU. This can't be done
1814 1813 * before this CPU is made CPU_READY, because we may (in
1815 1814 * heterogeneous systems) need to go load another CPU module.
1816 1815 * The act of attempting to load a module may trigger a
1817 1816 * cross-call, which will ASSERT unless this cpu is CPU_READY.
1818 1817 */
1819 1818 cmi_hdl_t hdl;
1820 1819
1821 1820 if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU),
1822 1821 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) != NULL) {
1823 1822 if (is_x86_feature(x86_featureset, X86FSET_MCA))
1824 1823 cmi_mca_init(hdl);
1825 1824 cp->cpu_m.mcpu_cmi_hdl = hdl;
1826 1825 }
1827 1826 }
1828 1827 #endif /* __xpv */
1829 1828
1830 1829 if (boothowto & RB_DEBUG)
1831 1830 kdi_cpu_init();
1832 1831
1833 1832 /*
1834 1833 * Setting the bit in cpu_ready_set must be the last operation in
1835 1834 * processor initialization; the boot CPU will continue to boot once
1836 1835 * it sees this bit set for all active CPUs.
1837 1836 */
1838 1837 CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id);
1839 1838
1840 1839 (void) mach_cpu_create_device_node(cp, NULL);
1841 1840
1842 1841 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr);
1843 1842 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr);
1844 1843 cmn_err(CE_CONT, "?cpu%d initialization complete - online\n",
1845 1844 cp->cpu_id);
1846 1845
1847 1846 /*
1848 1847 * Now we are done with the startup thread, so free it up.
1849 1848 */
1850 1849 thread_exit();
1851 1850 panic("mp_startup: cannot return");
1852 1851 /*NOTREACHED*/
1853 1852 }
1854 1853
1855 1854 /*
1856 1855 * Startup function for 'other' CPUs at boot time (besides boot cpu).
1857 1856 */
1858 1857 static void
1859 1858 mp_startup_boot(void)
1860 1859 {
1861 1860 mp_startup_common(B_TRUE);
1862 1861 }
1863 1862
1864 1863 /*
1865 1864 * Startup function for hotplug CPUs at runtime.
1866 1865 */
1867 1866 void
1868 1867 mp_startup_hotplug(void)
1869 1868 {
1870 1869 mp_startup_common(B_FALSE);
1871 1870 }
1872 1871
1873 1872 /*
1874 1873 * Start CPU on user request.
1875 1874 */
1876 1875 /* ARGSUSED */
1877 1876 int
1878 1877 mp_cpu_start(struct cpu *cp)
1879 1878 {
1880 1879 ASSERT(MUTEX_HELD(&cpu_lock));
1881 1880 return (0);
1882 1881 }
1883 1882
1884 1883 /*
1885 1884 * Stop CPU on user request.
1886 1885 */
1887 1886 int
1888 1887 mp_cpu_stop(struct cpu *cp)
1889 1888 {
1890 1889 extern int cbe_psm_timer_mode;
1891 1890 ASSERT(MUTEX_HELD(&cpu_lock));
1892 1891
1893 1892 #ifdef __xpv
1894 1893 /*
1895 1894 * We can't offline vcpu0.
1896 1895 */
1897 1896 if (cp->cpu_id == 0)
1898 1897 return (EBUSY);
1899 1898 #endif
1900 1899
1901 1900 /*
1902 1901 * If TIMER_PERIODIC mode is used, CPU0 is the one running it;
1903 1902 * can't stop it. (This is true only for machines with no TSC.)
1904 1903 */
1905 1904
1906 1905 if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0))
1907 1906 return (EBUSY);
1908 1907
1909 1908 return (0);
1910 1909 }
1911 1910
1912 1911 /*
1913 1912 * Take the specified CPU out of participation in interrupts.
1914 1913 */
1915 1914 int
1916 1915 cpu_disable_intr(struct cpu *cp)
1917 1916 {
1918 1917 if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS)
1919 1918 return (EBUSY);
1920 1919
1921 1920 cp->cpu_flags &= ~CPU_ENABLE;
1922 1921 return (0);
1923 1922 }
1924 1923
1925 1924 /*
1926 1925 * Allow the specified CPU to participate in interrupts.
1927 1926 */
1928 1927 void
1929 1928 cpu_enable_intr(struct cpu *cp)
1930 1929 {
1931 1930 ASSERT(MUTEX_HELD(&cpu_lock));
1932 1931 cp->cpu_flags |= CPU_ENABLE;
1933 1932 psm_enable_intr(cp->cpu_id);
1934 1933 }
1935 1934
1936 1935 void
1937 1936 mp_cpu_faulted_enter(struct cpu *cp)
1938 1937 {
1939 1938 #ifdef __xpv
1940 1939 _NOTE(ARGUNUSED(cp));
1941 1940 #else
1942 1941 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
1943 1942
1944 1943 if (hdl != NULL) {
1945 1944 cmi_hdl_hold(hdl);
1946 1945 } else {
1947 1946 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
1948 1947 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
1949 1948 }
1950 1949 if (hdl != NULL) {
1951 1950 cmi_faulted_enter(hdl);
1952 1951 cmi_hdl_rele(hdl);
1953 1952 }
1954 1953 #endif
1955 1954 }
1956 1955
1957 1956 void
1958 1957 mp_cpu_faulted_exit(struct cpu *cp)
1959 1958 {
1960 1959 #ifdef __xpv
1961 1960 _NOTE(ARGUNUSED(cp));
1962 1961 #else
1963 1962 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
1964 1963
1965 1964 if (hdl != NULL) {
1966 1965 cmi_hdl_hold(hdl);
1967 1966 } else {
1968 1967 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
1969 1968 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
1970 1969 }
1971 1970 if (hdl != NULL) {
1972 1971 cmi_faulted_exit(hdl);
1973 1972 cmi_hdl_rele(hdl);
1974 1973 }
1975 1974 #endif
1976 1975 }
1977 1976
1978 1977 /*
1979 1978 * The following two routines are used as context operators on threads belonging
1980 1979 * to processes with a private LDT (see sysi86). Due to the rarity of such
1981 1980 * processes, these routines are currently written for best code readability and
1982 1981 * organization rather than speed. We could avoid checking x86_featureset at
1983 1982 * every context switch by installing different context ops, depending on
1984 1983 * x86_featureset, at LDT creation time -- one for each combination of fast
1985 1984 * syscall features.
1986 1985 */
1987 1986
1988 1987 /*ARGSUSED*/
1989 1988 void
1990 1989 cpu_fast_syscall_disable(void *arg)
1991 1990 {
1992 1991 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
1993 1992 is_x86_feature(x86_featureset, X86FSET_SEP))
1994 1993 cpu_sep_disable();
1995 1994 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
1996 1995 is_x86_feature(x86_featureset, X86FSET_ASYSC))
1997 1996 cpu_asysc_disable();
1998 1997 }
1999 1998
2000 1999 /*ARGSUSED*/
2001 2000 void
2002 2001 cpu_fast_syscall_enable(void *arg)
2003 2002 {
2004 2003 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2005 2004 is_x86_feature(x86_featureset, X86FSET_SEP))
2006 2005 cpu_sep_enable();
2007 2006 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2008 2007 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2009 2008 cpu_asysc_enable();
2010 2009 }
2011 2010
2012 2011 static void
2013 2012 cpu_sep_enable(void)
2014 2013 {
2015 2014 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2016 2015 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2017 2016
2018 2017 wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
2019 2018 }
2020 2019
2021 2020 static void
2022 2021 cpu_sep_disable(void)
2023 2022 {
2024 2023 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2025 2024 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2026 2025
2027 2026 /*
2028 2027 * Setting the SYSENTER_CS_MSR register to 0 causes software executing
2029 2028 * the sysenter or sysexit instruction to trigger a #gp fault.
2030 2029 */
2031 2030 wrmsr(MSR_INTC_SEP_CS, 0);
2032 2031 }
2033 2032
2034 2033 static void
2035 2034 cpu_asysc_enable(void)
2036 2035 {
2037 2036 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
2038 2037 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2039 2038
2040 2039 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
2041 2040 (uint64_t)(uintptr_t)AMD_EFER_SCE);
2042 2041 }
2043 2042
2044 2043 static void
2045 2044 cpu_asysc_disable(void)
2046 2045 {
2047 2046 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
2048 2047 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2049 2048
2050 2049 /*
2051 2050 * Turn off the SCE (syscall enable) bit in the EFER register. Software
2052 2051 * executing syscall or sysret with this bit off will incur a #ud trap.
2053 2052 */
2054 2053 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) &
2055 2054 ~((uint64_t)(uintptr_t)AMD_EFER_SCE));
2056 2055 }
↓ open down ↓ |
371 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX