Print this page
patch x2apic-x86fset
patch remove-unused-vars


   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2011 by Delphix. All rights reserved.
  24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.

  25  */
  26 /*
  27  * Copyright (c) 2010, Intel Corporation.
  28  * All rights reserved.
  29  */
  30 /*
  31  * Portions Copyright 2009 Advanced Micro Devices, Inc.
  32  */
  33 /*
  34  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  35  */
  36 /*
  37  * Various routines to handle identification
  38  * and classification of x86 processors.
  39  */
  40 
  41 #include <sys/types.h>
  42 #include <sys/archsystm.h>
  43 #include <sys/x86_archext.h>
  44 #include <sys/kmem.h>


 103  * the support infrastructure for various hardware features has been
 104  * initialized. It determines which processor features will be reported
 105  * to userland via the aux vector.
 106  *
 107  * All passes are executed on all CPUs, but only the boot CPU determines what
 108  * features the kernel will use.
 109  *
 110  * Much of the worst junk in this file is for the support of processors
 111  * that didn't really implement the cpuid instruction properly.
 112  *
 113  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
 114  * the pass numbers.  Accordingly, changes to the pass code may require changes
 115  * to the accessor code.
 116  */
 117 
 118 uint_t x86_vendor = X86_VENDOR_IntelClone;
 119 uint_t x86_type = X86_TYPE_OTHER;
 120 uint_t x86_clflush_size = 0;
 121 
 122 uint_t pentiumpro_bug4046376;
 123 uint_t pentiumpro_bug4064495;
 124 
 125 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
 126 
 127 static char *x86_feature_names[NUM_X86_FEATURES] = {
 128         "lgpg",
 129         "tsc",
 130         "msr",
 131         "mtrr",
 132         "pge",
 133         "de",
 134         "cmov",
 135         "mmx",
 136         "mca",
 137         "pae",
 138         "cv8",
 139         "pat",
 140         "sep",
 141         "sse",
 142         "sse2",
 143         "htt",


 147         "cx16",
 148         "cmp",
 149         "tscp",
 150         "mwait",
 151         "sse4a",
 152         "cpuid",
 153         "ssse3",
 154         "sse4_1",
 155         "sse4_2",
 156         "1gpg",
 157         "clfsh",
 158         "64",
 159         "aes",
 160         "pclmulqdq",
 161         "xsave",
 162         "avx",
 163         "vmx",
 164         "svm",
 165         "topoext",
 166         "f16c",
 167         "rdrand"

 168 };
 169 
 170 boolean_t
 171 is_x86_feature(void *featureset, uint_t feature)
 172 {
 173         ASSERT(feature < NUM_X86_FEATURES);
 174         return (BT_TEST((ulong_t *)featureset, feature));
 175 }
 176 
 177 void
 178 add_x86_feature(void *featureset, uint_t feature)
 179 {
 180         ASSERT(feature < NUM_X86_FEATURES);
 181         BT_SET((ulong_t *)featureset, feature);
 182 }
 183 
 184 void
 185 remove_x86_feature(void *featureset, uint_t feature)
 186 {
 187         ASSERT(feature < NUM_X86_FEATURES);


 197         if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
 198                 return (B_TRUE);
 199         } else {
 200                 return (B_FALSE);
 201         }
 202 }
 203 
 204 void
 205 print_x86_featureset(void *featureset)
 206 {
 207         uint_t i;
 208 
 209         for (i = 0; i < NUM_X86_FEATURES; i++) {
 210                 if (is_x86_feature(featureset, i)) {
 211                         cmn_err(CE_CONT, "?x86_feature: %s\n",
 212                             x86_feature_names[i]);
 213                 }
 214         }
 215 }
 216 
 217 uint_t enable486;
 218 
 219 static size_t xsave_state_size = 0;
 220 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
 221 boolean_t xsave_force_disable = B_FALSE;
 222 
 223 /*
 224  * This is set to platform type we are running on.
 225  */
 226 static int platform_type = -1;
 227 
 228 #if !defined(__xpv)
 229 /*
 230  * Variable to patch if hypervisor platform detection needs to be
 231  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
 232  */
 233 int enable_platform_detection = 1;
 234 #endif
 235 
 236 /*
 237  * monitor/mwait info.
 238  *


1016         cpi->cpi_brandid = CPI_BRANDID(cpi);
1017 
1018         /*
1019          * *default* assumptions:
1020          * - believe %edx feature word
1021          * - ignore %ecx feature word
1022          * - 32-bit virtual and physical addressing
1023          */
1024         mask_edx = 0xffffffff;
1025         mask_ecx = 0;
1026 
1027         cpi->cpi_pabits = cpi->cpi_vabits = 32;
1028 
1029         switch (cpi->cpi_vendor) {
1030         case X86_VENDOR_Intel:
1031                 if (cpi->cpi_family == 5)
1032                         x86_type = X86_TYPE_P5;
1033                 else if (IS_LEGACY_P6(cpi)) {
1034                         x86_type = X86_TYPE_P6;
1035                         pentiumpro_bug4046376 = 1;
1036                         pentiumpro_bug4064495 = 1;
1037                         /*
1038                          * Clear the SEP bit when it was set erroneously
1039                          */
1040                         if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
1041                                 cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
1042                 } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
1043                         x86_type = X86_TYPE_P4;
1044                         /*
1045                          * We don't currently depend on any of the %ecx
1046                          * features until Prescott, so we'll only check
1047                          * this from P4 onwards.  We might want to revisit
1048                          * that idea later.
1049                          */
1050                         mask_ecx = 0xffffffff;
1051                 } else if (cpi->cpi_family > 0xf)
1052                         mask_ecx = 0xffffffff;
1053                 /*
1054                  * We don't support MONITOR/MWAIT if leaf 5 is not available
1055                  * to obtain the monitor linesize.
1056                  */


1298                         add_x86_feature(featureset, X86FSET_AES);
1299                 }
1300                 if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1301                         add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1302                 }
1303 
1304                 if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1305                         add_x86_feature(featureset, X86FSET_XSAVE);
1306 
1307                         /* We only test AVX when there is XSAVE */
1308                         if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1309                                 add_x86_feature(featureset,
1310                                     X86FSET_AVX);
1311 
1312                                 if (cp->cp_ecx & CPUID_INTC_ECX_F16C)
1313                                         add_x86_feature(featureset,
1314                                             X86FSET_F16C);
1315                         }
1316                 }
1317         }



1318         if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1319                 add_x86_feature(featureset, X86FSET_DE);
1320         }
1321 #if !defined(__xpv)
1322         if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1323 
1324                 /*
1325                  * We require the CLFLUSH instruction for erratum workaround
1326                  * to use MONITOR/MWAIT.
1327                  */
1328                 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1329                         cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1330                         add_x86_feature(featureset, X86FSET_MWAIT);
1331                 } else {
1332                         extern int idle_cpu_assert_cflush_monitor;
1333 
1334                         /*
1335                          * All processors we are aware of which have
1336                          * MONITOR/MWAIT also have CLFLUSH.
1337                          */




   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2011 by Delphix. All rights reserved.
  24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  25  * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
  26  */
  27 /*
  28  * Copyright (c) 2010, Intel Corporation.
  29  * All rights reserved.
  30  */
  31 /*
  32  * Portions Copyright 2009 Advanced Micro Devices, Inc.
  33  */
  34 /*
  35  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  36  */
  37 /*
  38  * Various routines to handle identification
  39  * and classification of x86 processors.
  40  */
  41 
  42 #include <sys/types.h>
  43 #include <sys/archsystm.h>
  44 #include <sys/x86_archext.h>
  45 #include <sys/kmem.h>


 104  * the support infrastructure for various hardware features has been
 105  * initialized. It determines which processor features will be reported
 106  * to userland via the aux vector.
 107  *
 108  * All passes are executed on all CPUs, but only the boot CPU determines what
 109  * features the kernel will use.
 110  *
 111  * Much of the worst junk in this file is for the support of processors
 112  * that didn't really implement the cpuid instruction properly.
 113  *
 114  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
 115  * the pass numbers.  Accordingly, changes to the pass code may require changes
 116  * to the accessor code.
 117  */
 118 
 119 uint_t x86_vendor = X86_VENDOR_IntelClone;
 120 uint_t x86_type = X86_TYPE_OTHER;
 121 uint_t x86_clflush_size = 0;
 122 
 123 uint_t pentiumpro_bug4046376;

 124 
 125 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
 126 
 127 static char *x86_feature_names[NUM_X86_FEATURES] = {
 128         "lgpg",
 129         "tsc",
 130         "msr",
 131         "mtrr",
 132         "pge",
 133         "de",
 134         "cmov",
 135         "mmx",
 136         "mca",
 137         "pae",
 138         "cv8",
 139         "pat",
 140         "sep",
 141         "sse",
 142         "sse2",
 143         "htt",


 147         "cx16",
 148         "cmp",
 149         "tscp",
 150         "mwait",
 151         "sse4a",
 152         "cpuid",
 153         "ssse3",
 154         "sse4_1",
 155         "sse4_2",
 156         "1gpg",
 157         "clfsh",
 158         "64",
 159         "aes",
 160         "pclmulqdq",
 161         "xsave",
 162         "avx",
 163         "vmx",
 164         "svm",
 165         "topoext",
 166         "f16c",
 167         "rdrand",
 168         "x2apic",
 169 };
 170 
 171 boolean_t
 172 is_x86_feature(void *featureset, uint_t feature)
 173 {
 174         ASSERT(feature < NUM_X86_FEATURES);
 175         return (BT_TEST((ulong_t *)featureset, feature));
 176 }
 177 
 178 void
 179 add_x86_feature(void *featureset, uint_t feature)
 180 {
 181         ASSERT(feature < NUM_X86_FEATURES);
 182         BT_SET((ulong_t *)featureset, feature);
 183 }
 184 
 185 void
 186 remove_x86_feature(void *featureset, uint_t feature)
 187 {
 188         ASSERT(feature < NUM_X86_FEATURES);


 198         if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
 199                 return (B_TRUE);
 200         } else {
 201                 return (B_FALSE);
 202         }
 203 }
 204 
 205 void
 206 print_x86_featureset(void *featureset)
 207 {
 208         uint_t i;
 209 
 210         for (i = 0; i < NUM_X86_FEATURES; i++) {
 211                 if (is_x86_feature(featureset, i)) {
 212                         cmn_err(CE_CONT, "?x86_feature: %s\n",
 213                             x86_feature_names[i]);
 214                 }
 215         }
 216 }
 217 


 218 static size_t xsave_state_size = 0;
 219 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
 220 boolean_t xsave_force_disable = B_FALSE;
 221 
 222 /*
 223  * This is set to platform type we are running on.
 224  */
 225 static int platform_type = -1;
 226 
 227 #if !defined(__xpv)
 228 /*
 229  * Variable to patch if hypervisor platform detection needs to be
 230  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
 231  */
 232 int enable_platform_detection = 1;
 233 #endif
 234 
 235 /*
 236  * monitor/mwait info.
 237  *


1015         cpi->cpi_brandid = CPI_BRANDID(cpi);
1016 
1017         /*
1018          * *default* assumptions:
1019          * - believe %edx feature word
1020          * - ignore %ecx feature word
1021          * - 32-bit virtual and physical addressing
1022          */
1023         mask_edx = 0xffffffff;
1024         mask_ecx = 0;
1025 
1026         cpi->cpi_pabits = cpi->cpi_vabits = 32;
1027 
1028         switch (cpi->cpi_vendor) {
1029         case X86_VENDOR_Intel:
1030                 if (cpi->cpi_family == 5)
1031                         x86_type = X86_TYPE_P5;
1032                 else if (IS_LEGACY_P6(cpi)) {
1033                         x86_type = X86_TYPE_P6;
1034                         pentiumpro_bug4046376 = 1;

1035                         /*
1036                          * Clear the SEP bit when it was set erroneously
1037                          */
1038                         if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
1039                                 cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
1040                 } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
1041                         x86_type = X86_TYPE_P4;
1042                         /*
1043                          * We don't currently depend on any of the %ecx
1044                          * features until Prescott, so we'll only check
1045                          * this from P4 onwards.  We might want to revisit
1046                          * that idea later.
1047                          */
1048                         mask_ecx = 0xffffffff;
1049                 } else if (cpi->cpi_family > 0xf)
1050                         mask_ecx = 0xffffffff;
1051                 /*
1052                  * We don't support MONITOR/MWAIT if leaf 5 is not available
1053                  * to obtain the monitor linesize.
1054                  */


1296                         add_x86_feature(featureset, X86FSET_AES);
1297                 }
1298                 if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1299                         add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1300                 }
1301 
1302                 if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1303                         add_x86_feature(featureset, X86FSET_XSAVE);
1304 
1305                         /* We only test AVX when there is XSAVE */
1306                         if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1307                                 add_x86_feature(featureset,
1308                                     X86FSET_AVX);
1309 
1310                                 if (cp->cp_ecx & CPUID_INTC_ECX_F16C)
1311                                         add_x86_feature(featureset,
1312                                             X86FSET_F16C);
1313                         }
1314                 }
1315         }
1316         if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
1317                 add_x86_feature(featureset, X86FSET_X2APIC);
1318         }
1319         if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1320                 add_x86_feature(featureset, X86FSET_DE);
1321         }
1322 #if !defined(__xpv)
1323         if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1324 
1325                 /*
1326                  * We require the CLFLUSH instruction for erratum workaround
1327                  * to use MONITOR/MWAIT.
1328                  */
1329                 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1330                         cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1331                         add_x86_feature(featureset, X86FSET_MWAIT);
1332                 } else {
1333                         extern int idle_cpu_assert_cflush_monitor;
1334 
1335                         /*
1336                          * All processors we are aware of which have
1337                          * MONITOR/MWAIT also have CLFLUSH.
1338                          */