1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2012 Gary Mills
  23  *
  24  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  25  * Copyright (c) 2011 by Delphix. All rights reserved.
  26  */
  27 /*
  28  * Copyright (c) 2010, Intel Corporation.
  29  * All rights reserved.
  30  */
  31 
  32 #include <sys/types.h>
  33 #include <sys/sysmacros.h>
  34 #include <sys/disp.h>
  35 #include <sys/promif.h>
  36 #include <sys/clock.h>
  37 #include <sys/cpuvar.h>
  38 #include <sys/stack.h>
  39 #include <vm/as.h>
  40 #include <vm/hat.h>
  41 #include <sys/reboot.h>
  42 #include <sys/avintr.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/proc.h>
  45 #include <sys/thread.h>
  46 #include <sys/cpupart.h>
  47 #include <sys/pset.h>
  48 #include <sys/copyops.h>
  49 #include <sys/pg.h>
  50 #include <sys/disp.h>
  51 #include <sys/debug.h>
  52 #include <sys/sunddi.h>
  53 #include <sys/x86_archext.h>
  54 #include <sys/privregs.h>
  55 #include <sys/machsystm.h>
  56 #include <sys/ontrap.h>
  57 #include <sys/bootconf.h>
  58 #include <sys/boot_console.h>
  59 #include <sys/kdi_machimpl.h>
  60 #include <sys/archsystm.h>
  61 #include <sys/promif.h>
  62 #include <sys/pci_cfgspace.h>
  63 #ifdef __xpv
  64 #include <sys/hypervisor.h>
  65 #else
  66 #include <sys/xpv_support.h>
  67 #endif
  68 
  69 /*
  70  * some globals for patching the result of cpuid
  71  * to solve problems w/ creative cpu vendors
  72  */
  73 
  74 extern uint32_t cpuid_feature_ecx_include;
  75 extern uint32_t cpuid_feature_ecx_exclude;
  76 extern uint32_t cpuid_feature_edx_include;
  77 extern uint32_t cpuid_feature_edx_exclude;
  78 
  79 /*
  80  * Dummy spl priority masks
  81  */
  82 static unsigned char dummy_cpu_pri[MAXIPL + 1] = {
  83         0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf,
  84         0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf
  85 };
  86 
  87 /*
  88  * Set console mode
  89  */
  90 static void
  91 set_console_mode(uint8_t val)
  92 {
  93         struct bop_regs rp = {0};
  94 
  95         rp.eax.byte.ah = 0x0;
  96         rp.eax.byte.al = val;
  97         rp.ebx.word.bx = 0x0;
  98 
  99         BOP_DOINT(bootops, 0x10, &rp);
 100 }
 101 
 102 
 103 /*
 104  * Setup routine called right before main(). Interposing this function
 105  * before main() allows us to call it in a machine-independent fashion.
 106  */
 107 void
 108 mlsetup(struct regs *rp)
 109 {
 110         u_longlong_t prop_value;
 111         extern struct classfuncs sys_classfuncs;
 112         extern disp_t cpu0_disp;
 113         extern char t0stack[];
 114         extern int post_fastreboot;
 115         extern uint64_t plat_dr_options;
 116 
 117         ASSERT_STACK_ALIGNED();
 118 
 119         /*
 120          * initialize cpu_self
 121          */
 122         cpu[0]->cpu_self = cpu[0];
 123 
 124 #if defined(__xpv)
 125         /*
 126          * Point at the hypervisor's virtual cpu structure
 127          */
 128         cpu[0]->cpu_m.mcpu_vcpu_info = &HYPERVISOR_shared_info->vcpu_info[0];
 129 #endif
 130 
 131         /*
 132          * Set up dummy cpu_pri_data values till psm spl code is
 133          * installed.  This allows splx() to work on amd64.
 134          */
 135 
 136         cpu[0]->cpu_pri_data = dummy_cpu_pri;
 137 
 138         /*
 139          * check if we've got special bits to clear or set
 140          * when checking cpu features
 141          */
 142 
 143         if (bootprop_getval("cpuid_feature_ecx_include", &prop_value) != 0)
 144                 cpuid_feature_ecx_include = 0;
 145         else
 146                 cpuid_feature_ecx_include = (uint32_t)prop_value;
 147 
 148         if (bootprop_getval("cpuid_feature_ecx_exclude", &prop_value) != 0)
 149                 cpuid_feature_ecx_exclude = 0;
 150         else
 151                 cpuid_feature_ecx_exclude = (uint32_t)prop_value;
 152 
 153         if (bootprop_getval("cpuid_feature_edx_include", &prop_value) != 0)
 154                 cpuid_feature_edx_include = 0;
 155         else
 156                 cpuid_feature_edx_include = (uint32_t)prop_value;
 157 
 158         if (bootprop_getval("cpuid_feature_edx_exclude", &prop_value) != 0)
 159                 cpuid_feature_edx_exclude = 0;
 160         else
 161                 cpuid_feature_edx_exclude = (uint32_t)prop_value;
 162 
 163         /*
 164          * Initialize idt0, gdt0, ldt0_default, ktss0 and dftss.
 165          */
 166         init_desctbls();
 167 
 168         /*
 169          * lgrp_init() and possibly cpuid_pass1() need PCI config
 170          * space access
 171          */
 172 #if defined(__xpv)
 173         if (DOMAIN_IS_INITDOMAIN(xen_info))
 174                 pci_cfgspace_init();
 175 #else
 176         pci_cfgspace_init();
 177         /*
 178          * Initialize the platform type from CPU 0 to ensure that
 179          * determine_platform() is only ever called once.
 180          */
 181         determine_platform();
 182 #endif
 183 
 184         /*
 185          * The first lightweight pass (pass0) through the cpuid data
 186          * was done in locore before mlsetup was called.  Do the next
 187          * pass in C code.
 188          *
 189          * The x86_featureset is initialized here based on the capabilities
 190          * of the boot CPU.  Note that if we choose to support CPUs that have
 191          * different feature sets (at which point we would almost certainly
 192          * want to set the feature bits to correspond to the feature
 193          * minimum) this value may be altered.
 194          */
 195         cpuid_pass1(cpu[0], x86_featureset);
 196 
 197 #if !defined(__xpv)
 198         if ((get_hwenv() & HW_XEN_HVM) != 0)
 199                 xen_hvm_init();
 200 
 201         /*
 202          * Before we do anything with the TSCs, we need to work around
 203          * Intel erratum BT81.  On some CPUs, warm reset does not
 204          * clear the TSC.  If we are on such a CPU, we will clear TSC ourselves
 205          * here.  Other CPUs will clear it when we boot them later, and the
 206          * resulting skew will be handled by tsc_sync_master()/_slave();
 207          * note that such skew already exists and has to be handled anyway.
 208          *
 209          * We do this only on metal.  This same problem can occur with a
 210          * hypervisor that does not happen to virtualise a TSC that starts from
 211          * zero, regardless of CPU type; however, we do not expect hypervisors
 212          * that do not virtualise TSC that way to handle writes to TSC
 213          * correctly, either.
 214          */
 215         if (get_hwenv() == HW_NATIVE &&
 216             cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
 217             cpuid_getfamily(CPU) == 6 &&
 218             (cpuid_getmodel(CPU) == 0x2d || cpuid_getmodel(CPU) == 0x3e) &&
 219             is_x86_feature(x86_featureset, X86FSET_TSC)) {
 220                 (void) wrmsr(REG_TSC, 0UL);
 221         }
 222 
 223         /*
 224          * Patch the tsc_read routine with appropriate set of instructions,
 225          * depending on the processor family and architecure, to read the
 226          * time-stamp counter while ensuring no out-of-order execution.
 227          * Patch it while the kernel text is still writable.
 228          *
 229          * Note: tsc_read is not patched for intel processors whose family
 230          * is >6 and for amd whose family >f (in case they don't support rdtscp
 231          * instruction, unlikely). By default tsc_read will use cpuid for
 232          * serialization in such cases. The following code needs to be
 233          * revisited if intel processors of family >= f retains the
 234          * instruction serialization nature of mfence instruction.
 235          * Note: tsc_read is not patched for x86 processors which do
 236          * not support "mfence". By default tsc_read will use cpuid for
 237          * serialization in such cases.
 238          *
 239          * The Xen hypervisor does not correctly report whether rdtscp is
 240          * supported or not, so we must assume that it is not.
 241          */
 242         if ((get_hwenv() & HW_XEN_HVM) == 0 &&
 243             is_x86_feature(x86_featureset, X86FSET_TSCP))
 244                 patch_tsc_read(X86_HAVE_TSCP);
 245         else if (cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
 246             cpuid_getfamily(CPU) <= 0xf &&
 247             is_x86_feature(x86_featureset, X86FSET_SSE2))
 248                 patch_tsc_read(X86_TSC_MFENCE);
 249         else if (cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
 250             cpuid_getfamily(CPU) <= 6 &&
 251             is_x86_feature(x86_featureset, X86FSET_SSE2))
 252                 patch_tsc_read(X86_TSC_LFENCE);
 253 
 254 #endif  /* !__xpv */
 255 
 256 #if defined(__i386) && !defined(__xpv)
 257         /*
 258          * Some i386 processors do not implement the rdtsc instruction,
 259          * or at least they do not implement it correctly. Patch them to
 260          * return 0.
 261          */
 262         if (!is_x86_feature(x86_featureset, X86FSET_TSC))
 263                 patch_tsc_read(X86_NO_TSC);
 264 #endif  /* __i386 && !__xpv */
 265 
 266 #if defined(__amd64) && !defined(__xpv)
 267         patch_memops(cpuid_getvendor(CPU));
 268 #endif  /* __amd64 && !__xpv */
 269 
 270 #if !defined(__xpv)
 271         /* XXPV what, if anything, should be dorked with here under xen? */
 272 
 273         /*
 274          * While we're thinking about the TSC, let's set up %cr4 so that
 275          * userland can issue rdtsc, and initialize the TSC_AUX value
 276          * (the cpuid) for the rdtscp instruction on appropriately
 277          * capable hardware.
 278          */
 279         if (is_x86_feature(x86_featureset, X86FSET_TSC))
 280                 setcr4(getcr4() & ~CR4_TSD);
 281 
 282         if (is_x86_feature(x86_featureset, X86FSET_TSCP))
 283                 (void) wrmsr(MSR_AMD_TSCAUX, 0);
 284 
 285         if (is_x86_feature(x86_featureset, X86FSET_DE))
 286                 setcr4(getcr4() | CR4_DE);
 287 #endif /* __xpv */
 288 
 289         /*
 290          * initialize t0
 291          */
 292         t0.t_stk = (caddr_t)rp - MINFRAME;
 293         t0.t_stkbase = t0stack;
 294         t0.t_pri = maxclsyspri - 3;
 295         t0.t_schedflag = TS_LOAD | TS_DONT_SWAP;
 296         t0.t_procp = &p0;
 297         t0.t_plockp = &p0lock.pl_lock;
 298         t0.t_lwp = &lwp0;
 299         t0.t_forw = &t0;
 300         t0.t_back = &t0;
 301         t0.t_next = &t0;
 302         t0.t_prev = &t0;
 303         t0.t_cpu = cpu[0];
 304         t0.t_disp_queue = &cpu0_disp;
 305         t0.t_bind_cpu = PBIND_NONE;
 306         t0.t_bind_pset = PS_NONE;
 307         t0.t_bindflag = (uchar_t)default_binding_mode;
 308         t0.t_cpupart = &cp_default;
 309         t0.t_clfuncs = &sys_classfuncs.thread;
 310         t0.t_copyops = NULL;
 311         THREAD_ONPROC(&t0, CPU);
 312 
 313         lwp0.lwp_thread = &t0;
 314         lwp0.lwp_regs = (void *)rp;
 315         lwp0.lwp_procp = &p0;
 316         t0.t_tid = p0.p_lwpcnt = p0.p_lwprcnt = p0.p_lwpid = 1;
 317 
 318         p0.p_exec = NULL;
 319         p0.p_stat = SRUN;
 320         p0.p_flag = SSYS;
 321         p0.p_tlist = &t0;
 322         p0.p_stksize = 2*PAGESIZE;
 323         p0.p_stkpageszc = 0;
 324         p0.p_as = &kas;
 325         p0.p_lockp = &p0lock;
 326         p0.p_brkpageszc = 0;
 327         p0.p_t1_lgrpid = LGRP_NONE;
 328         p0.p_tr_lgrpid = LGRP_NONE;
 329         sigorset(&p0.p_ignore, &ignoredefault);
 330 
 331         CPU->cpu_thread = &t0;
 332         bzero(&cpu0_disp, sizeof (disp_t));
 333         CPU->cpu_disp = &cpu0_disp;
 334         CPU->cpu_disp->disp_cpu = CPU;
 335         CPU->cpu_dispthread = &t0;
 336         CPU->cpu_idle_thread = &t0;
 337         CPU->cpu_flags = CPU_READY | CPU_RUNNING | CPU_EXISTS | CPU_ENABLE;
 338         CPU->cpu_dispatch_pri = t0.t_pri;
 339 
 340         CPU->cpu_id = 0;
 341 
 342         CPU->cpu_pri = 12;           /* initial PIL for the boot CPU */
 343 
 344         /*
 345          * The kernel doesn't use LDTs unless a process explicitly requests one.
 346          */
 347         p0.p_ldt_desc = null_sdesc;
 348 
 349         /*
 350          * Initialize thread/cpu microstate accounting
 351          */
 352         init_mstate(&t0, LMS_SYSTEM);
 353         init_cpu_mstate(CPU, CMS_SYSTEM);
 354 
 355         /*
 356          * Initialize lists of available and active CPUs.
 357          */
 358         cpu_list_init(CPU);
 359 
 360         pg_cpu_bootstrap(CPU);
 361 
 362         /*
 363          * Now that we have taken over the GDT, IDT and have initialized
 364          * active CPU list it's time to inform kmdb if present.
 365          */
 366         if (boothowto & RB_DEBUG)
 367                 kdi_idt_sync();
 368 
 369         /*
 370          * Explicitly set console to text mode (0x3) if this is a boot
 371          * post Fast Reboot, and the console is set to CONS_SCREEN_TEXT.
 372          */
 373         if (post_fastreboot && boot_console_type(NULL) == CONS_SCREEN_TEXT)
 374                 set_console_mode(0x3);
 375 
 376         /*
 377          * If requested (boot -d) drop into kmdb.
 378          *
 379          * This must be done after cpu_list_init() on the 64-bit kernel
 380          * since taking a trap requires that we re-compute gsbase based
 381          * on the cpu list.
 382          */
 383         if (boothowto & RB_DEBUGENTER)
 384                 kmdb_enter();
 385 
 386         cpu_vm_data_init(CPU);
 387 
 388         rp->r_fp = 0;        /* terminate kernel stack traces! */
 389 
 390         prom_init("kernel", (void *)NULL);
 391 
 392         /* User-set option overrides firmware value. */
 393         if (bootprop_getval(PLAT_DR_OPTIONS_NAME, &prop_value) == 0) {
 394                 plat_dr_options = (uint64_t)prop_value;
 395         }
 396 #if defined(__xpv)
 397         /* No support of DR operations on xpv */
 398         plat_dr_options = 0;
 399 #else   /* __xpv */
 400         /* Flag PLAT_DR_FEATURE_ENABLED should only be set by DR driver. */
 401         plat_dr_options &= ~PLAT_DR_FEATURE_ENABLED;
 402 #ifndef __amd64
 403         /* Only enable CPU/memory DR on 64 bits kernel. */
 404         plat_dr_options &= ~PLAT_DR_FEATURE_MEMORY;
 405         plat_dr_options &= ~PLAT_DR_FEATURE_CPU;
 406 #endif  /* __amd64 */
 407 #endif  /* __xpv */
 408 
 409         /*
 410          * Get value of "plat_dr_physmax" boot option.
 411          * It overrides values calculated from MSCT or SRAT table.
 412          */
 413         if (bootprop_getval(PLAT_DR_PHYSMAX_NAME, &prop_value) == 0) {
 414                 plat_dr_physmax = ((uint64_t)prop_value) >> PAGESHIFT;
 415         }
 416 
 417         /* Get value of boot_ncpus. */
 418         if (bootprop_getval(BOOT_NCPUS_NAME, &prop_value) != 0) {
 419                 boot_ncpus = NCPU;
 420         } else {
 421                 boot_ncpus = (int)prop_value;
 422                 if (boot_ncpus <= 0 || boot_ncpus > NCPU)
 423                         boot_ncpus = NCPU;
 424         }
 425 
 426         /*
 427          * Set max_ncpus and boot_max_ncpus to boot_ncpus if platform doesn't
 428          * support CPU DR operations.
 429          */
 430         if (plat_dr_support_cpu() == 0) {
 431                 max_ncpus = boot_max_ncpus = boot_ncpus;
 432         } else {
 433                 if (bootprop_getval(PLAT_MAX_NCPUS_NAME, &prop_value) != 0) {
 434                         max_ncpus = NCPU;
 435                 } else {
 436                         max_ncpus = (int)prop_value;
 437                         if (max_ncpus <= 0 || max_ncpus > NCPU) {
 438                                 max_ncpus = NCPU;
 439                         }
 440                         if (boot_ncpus > max_ncpus) {
 441                                 boot_ncpus = max_ncpus;
 442                         }
 443                 }
 444 
 445                 if (bootprop_getval(BOOT_MAX_NCPUS_NAME, &prop_value) != 0) {
 446                         boot_max_ncpus = boot_ncpus;
 447                 } else {
 448                         boot_max_ncpus = (int)prop_value;
 449                         if (boot_max_ncpus <= 0 || boot_max_ncpus > NCPU) {
 450                                 boot_max_ncpus = boot_ncpus;
 451                         } else if (boot_max_ncpus > max_ncpus) {
 452                                 boot_max_ncpus = max_ncpus;
 453                         }
 454                 }
 455         }
 456 
 457         /*
 458          * Initialize the lgrp framework
 459          */
 460         lgrp_init(LGRP_INIT_STAGE1);
 461 
 462         if (boothowto & RB_HALT) {
 463                 prom_printf("unix: kernel halted by -h flag\n");
 464                 prom_enter_mon();
 465         }
 466 
 467         ASSERT_STACK_ALIGNED();
 468 
 469         /*
 470          * Fill out cpu_ucode_info.  Update microcode if necessary.
 471          */
 472         ucode_check(CPU);
 473 
 474         if (workaround_errata(CPU) != 0)
 475                 panic("critical workaround(s) missing for boot cpu");
 476 }
 477 
 478 
 479 void
 480 mach_modpath(char *path, const char *filename)
 481 {
 482         /*
 483          * Construct the directory path from the filename.
 484          */
 485 
 486         int len;
 487         char *p;
 488         const char isastr[] = "/amd64";
 489         size_t isalen = strlen(isastr);
 490 
 491         if ((p = strrchr(filename, '/')) == NULL)
 492                 return;
 493 
 494         while (p > filename && *(p - 1) == '/')
 495                 p--;    /* remove trailing '/' characters */
 496         if (p == filename)
 497                 p++;    /* so "/" -is- the modpath in this case */
 498 
 499         /*
 500          * Remove optional isa-dependent directory name - the module
 501          * subsystem will put this back again (!)
 502          */
 503         len = p - filename;
 504         if (len > isalen &&
 505             strncmp(&filename[len - isalen], isastr, isalen) == 0)
 506                 p -= isalen;
 507 
 508         /*
 509          * "/platform/mumblefrotz" + " " + MOD_DEFPATH
 510          */
 511         len += (p - filename) + 1 + strlen(MOD_DEFPATH) + 1;
 512         (void) strncpy(path, filename, p - filename);
 513 }