1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Driver to retire/unretire L2/L3 cachelines on panther
  28  */
  29 #include <sys/types.h>
  30 #include <sys/types32.h>
  31 #include <sys/time.h>
  32 #include <sys/errno.h>
  33 #include <sys/cmn_err.h>
  34 #include <sys/param.h>
  35 #include <sys/modctl.h>
  36 #include <sys/conf.h>
  37 #include <sys/open.h>
  38 #include <sys/stat.h>
  39 #include <sys/ddi.h>
  40 #include <sys/sunddi.h>
  41 #include <sys/file.h>
  42 #include <sys/cpuvar.h>
  43 #include <sys/x_call.h>
  44 #include <sys/cheetahregs.h>
  45 #include <sys/mem_cache.h>
  46 #include <sys/mem_cache_ioctl.h>
  47 
  48 extern int      retire_l2(uint64_t, uint64_t);
  49 extern int      retire_l2_alternate(uint64_t, uint64_t);
  50 extern int      unretire_l2(uint64_t, uint64_t);
  51 extern int      unretire_l2_alternate(uint64_t, uint64_t);
  52 extern int      retire_l3(uint64_t, uint64_t);
  53 extern int      retire_l3_alternate(uint64_t, uint64_t);
  54 extern int      unretire_l3(uint64_t, uint64_t);
  55 extern int      unretire_l3_alternate(uint64_t, uint64_t);
  56 
  57 extern void     retire_l2_start(uint64_t, uint64_t);
  58 extern void     retire_l2_end(uint64_t, uint64_t);
  59 extern void     unretire_l2_start(uint64_t, uint64_t);
  60 extern void     unretire_l2_end(uint64_t, uint64_t);
  61 extern void     retire_l3_start(uint64_t, uint64_t);
  62 extern void     retire_l3_end(uint64_t, uint64_t);
  63 extern void     unretire_l3_start(uint64_t, uint64_t);
  64 extern void     unretire_l3_end(uint64_t, uint64_t);
  65 
  66 extern void     get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
  67 extern void     get_l2_tag_tl1(uint64_t, uint64_t);
  68 extern void     get_l3_tag_tl1(uint64_t, uint64_t);
  69 extern const int _ncpu;
  70 
  71 /* Macro for putting 64-bit onto stack as two 32-bit ints */
  72 #define PRTF_64_TO_32(x)        (uint32_t)((x)>>32), (uint32_t)(x)
  73 
  74 
  75 uint_t l2_flush_retries_done = 0;
  76 int mem_cache_debug = 0x0;
  77 uint64_t pattern = 0;
  78 uint32_t retire_failures = 0;
  79 #ifdef DEBUG
  80 int     inject_anonymous_tag_error = 0;
  81 int32_t last_error_injected_way = 0;
  82 uint8_t last_error_injected_bit = 0;
  83 int32_t last_l3tag_error_injected_way;
  84 uint8_t last_l3tag_error_injected_bit;
  85 int32_t last_l2tag_error_injected_way;
  86 uint8_t last_l2tag_error_injected_bit;
  87 #endif
  88 
  89 /* dev_ops and cb_ops entry point function declarations */
  90 static int      mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
  91 static int      mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
  92 static int      mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
  93                                 void **);
  94 static int      mem_cache_open(dev_t *, int, int, cred_t *);
  95 static int      mem_cache_close(dev_t, int, int, cred_t *);
  96 static int      mem_cache_ioctl_ops(int, int, cache_info_t *);
  97 static int      mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
  98 
  99 struct cb_ops mem_cache_cb_ops = {
 100         mem_cache_open,
 101         mem_cache_close,
 102         nodev,
 103         nodev,
 104         nodev,                  /* dump */
 105         nodev,
 106         nodev,
 107         mem_cache_ioctl,
 108         nodev,                  /* devmap */
 109         nodev,
 110         ddi_segmap,             /* segmap */
 111         nochpoll,
 112         ddi_prop_op,
 113         NULL,                   /* for STREAMS drivers */
 114         D_NEW | D_MP            /* driver compatibility flag */
 115 };
 116 
 117 static struct dev_ops mem_cache_dev_ops = {
 118         DEVO_REV,               /* driver build version */
 119         0,                      /* device reference count */
 120         mem_cache_getinfo,
 121         nulldev,
 122         nulldev,                /* probe */
 123         mem_cache_attach,
 124         mem_cache_detach,
 125         nulldev,                /* reset */
 126         &mem_cache_cb_ops,
 127         (struct bus_ops *)NULL,
 128         nulldev,                        /* power */
 129         ddi_quiesce_not_needed,         /* quiesce */
 130 };
 131 
 132 /*
 133  * Soft state
 134  */
 135 struct mem_cache_softc {
 136         dev_info_t      *dip;
 137         kmutex_t        mutex;
 138 };
 139 #define getsoftc(inst)  ((struct mem_cache_softc *)ddi_get_soft_state(statep,\
 140                         (inst)))
 141 
 142 /* module configuration stuff */
 143 static void *statep;
 144 extern struct mod_ops mod_driverops;
 145 
 146 static struct modldrv modldrv = {
 147         &mod_driverops,
 148         "mem_cache_driver (08/01/30) ",
 149         &mem_cache_dev_ops
 150 };
 151 
 152 static struct modlinkage modlinkage = {
 153         MODREV_1,
 154         &modldrv,
 155         0
 156 };
 157 
 158 extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
 159 
 160 int
 161 _init(void)
 162 {
 163         int e;
 164 
 165         if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
 166             MAX_MEM_CACHE_INSTANCES)) {
 167                 return (e);
 168         }
 169 
 170         if ((e = mod_install(&modlinkage)) != 0)
 171                 ddi_soft_state_fini(&statep);
 172 
 173         return (e);
 174 }
 175 
 176 int
 177 _fini(void)
 178 {
 179         int e;
 180 
 181         if ((e = mod_remove(&modlinkage)) != 0)
 182                 return (e);
 183 
 184         ddi_soft_state_fini(&statep);
 185 
 186         return (DDI_SUCCESS);
 187 }
 188 
 189 int
 190 _info(struct modinfo *modinfop)
 191 {
 192         return (mod_info(&modlinkage, modinfop));
 193 }
 194 
 195 /*ARGSUSED*/
 196 static int
 197 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
 198 {
 199         int     inst;
 200         int     retval = DDI_SUCCESS;
 201         struct mem_cache_softc *softc;
 202 
 203         inst = getminor((dev_t)arg);
 204 
 205         switch (cmd) {
 206         case DDI_INFO_DEVT2DEVINFO:
 207                 if ((softc = getsoftc(inst)) == NULL) {
 208                         *result = (void *)NULL;
 209                         retval = DDI_FAILURE;
 210                 } else
 211                         *result = (void *)softc->dip;
 212                 break;
 213 
 214         case DDI_INFO_DEVT2INSTANCE:
 215                 *result = (void *)((uintptr_t)inst);
 216                 break;
 217 
 218         default:
 219                 retval = DDI_FAILURE;
 220         }
 221 
 222         return (retval);
 223 }
 224 
 225 static int
 226 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 227 {
 228         int inst;
 229         struct mem_cache_softc *softc = NULL;
 230         char name[80];
 231 
 232         switch (cmd) {
 233         case DDI_ATTACH:
 234                 inst = ddi_get_instance(dip);
 235                 if (inst >= MAX_MEM_CACHE_INSTANCES) {
 236                         cmn_err(CE_WARN, "attach failed, too many instances\n");
 237                         return (DDI_FAILURE);
 238                 }
 239                 (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
 240                 if (ddi_create_priv_minor_node(dip, name,
 241                     S_IFCHR,
 242                     inst,
 243                     DDI_PSEUDO,
 244                     0, NULL, "all", 0640) ==
 245                     DDI_FAILURE) {
 246                         ddi_remove_minor_node(dip, NULL);
 247                         return (DDI_FAILURE);
 248                 }
 249 
 250                 /* Allocate a soft state structure for this instance */
 251                 if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
 252                         cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
 253                             "for inst %d\n", inst);
 254                         goto attach_failed;
 255                 }
 256 
 257                 /* Setup soft state */
 258                 softc = getsoftc(inst);
 259                 softc->dip = dip;
 260                 mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
 261 
 262                 /* Create main environmental node */
 263                 ddi_report_dev(dip);
 264 
 265                 return (DDI_SUCCESS);
 266 
 267         case DDI_RESUME:
 268                 return (DDI_SUCCESS);
 269 
 270         default:
 271                 return (DDI_FAILURE);
 272         }
 273 
 274 attach_failed:
 275 
 276         /* Free soft state, if allocated. remove minor node if added earlier */
 277         if (softc)
 278                 ddi_soft_state_free(statep, inst);
 279 
 280         ddi_remove_minor_node(dip, NULL);
 281 
 282         return (DDI_FAILURE);
 283 }
 284 
 285 static int
 286 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 287 {
 288         int inst;
 289         struct mem_cache_softc *softc;
 290 
 291         switch (cmd) {
 292         case DDI_DETACH:
 293                 inst = ddi_get_instance(dip);
 294                 if ((softc = getsoftc(inst)) == NULL)
 295                         return (ENXIO);
 296 
 297                 /* Free the soft state and remove minor node added earlier */
 298                 mutex_destroy(&softc->mutex);
 299                 ddi_soft_state_free(statep, inst);
 300                 ddi_remove_minor_node(dip, NULL);
 301                 return (DDI_SUCCESS);
 302 
 303         case DDI_SUSPEND:
 304                 return (DDI_SUCCESS);
 305 
 306         default:
 307                 return (DDI_FAILURE);
 308         }
 309 }
 310 
 311 /*ARGSUSED*/
 312 static int
 313 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
 314 {
 315         int     inst = getminor(*devp);
 316 
 317         return (getsoftc(inst) == NULL ? ENXIO : 0);
 318 }
 319 
 320 /*ARGSUSED*/
 321 static int
 322 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
 323 {
 324         int     inst = getminor(dev);
 325 
 326         return (getsoftc(inst) == NULL ? ENXIO : 0);
 327 }
 328 
 329 static char *tstate_to_desc[] = {
 330         "Invalid",                      /* 0 */
 331         "Shared",                       /* 1 */
 332         "Exclusive",                    /* 2 */
 333         "Owner",                        /* 3 */
 334         "Modified",                     /* 4 */
 335         "NA",                           /* 5 */
 336         "Owner/Shared",                 /* 6 */
 337         "Reserved(7)",                  /* 7 */
 338 };
 339 
 340 static char *
 341 tag_state_to_desc(uint8_t tagstate)
 342 {
 343         return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
 344 }
 345 
 346 void
 347 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
 348 {
 349         uint64_t l2_subaddr;
 350         uint8_t l2_state;
 351 
 352         l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
 353         l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
 354 
 355         l2_state = (l2_tag & CH_ECSTATE_MASK);
 356         cmn_err(CE_CONT,
 357             "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
 358             PRTF_64_TO_32(l2_subaddr),
 359             PRTF_64_TO_32(l2_tag),
 360             tag_state_to_desc(l2_state));
 361 }
 362 
 363 void
 364 print_l2cache_line(ch_cpu_logout_t *clop)
 365 {
 366         uint64_t l2_subaddr;
 367         int i, offset;
 368         uint8_t way, l2_state;
 369         ch_ec_data_t *ecp;
 370 
 371 
 372         for (way = 0; way < PN_CACHE_NWAYS; way++) {
 373                 ecp = &clop->clo_data.chd_l2_data[way];
 374                 l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
 375                 l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
 376 
 377                 l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
 378                 cmn_err(CE_CONT,
 379                     "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
 380                     "E$tag 0x%08x.%08x E$state %s",
 381                     way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
 382                     PRTF_64_TO_32(ecp->ec_tag),
 383                     tag_state_to_desc(l2_state));
 384                 /*
 385                  * Dump out Ecache subblock data captured.
 386                  * For Cheetah, we need to compute the ECC for each 16-byte
 387                  * chunk and compare it with the captured chunk ECC to figure
 388                  * out which chunk is bad.
 389                  */
 390                 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
 391                         ec_data_elm_t *ecdptr;
 392                         uint64_t d_low, d_high;
 393                         uint32_t ecc;
 394                         int l2_data_idx = (i/2);
 395 
 396                         offset = i * 16;
 397                         ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
 398                             [l2_data_idx];
 399                         if ((i & 1) == 0) {
 400                                 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
 401                                 d_high = ecdptr->ec_d8[0];
 402                                 d_low  = ecdptr->ec_d8[1];
 403                         } else {
 404                                 ecc = ecdptr->ec_eccd & 0x1ff;
 405                                 d_high = ecdptr->ec_d8[2];
 406                                 d_low  = ecdptr->ec_d8[3];
 407                         }
 408 
 409                         cmn_err(CE_CONT,
 410                             "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
 411                             " ECC 0x%03x",
 412                             offset, PRTF_64_TO_32(d_high),
 413                             PRTF_64_TO_32(d_low), ecc);
 414                 }
 415         }       /* end of for way loop */
 416 }
 417 
 418 void
 419 print_ecache_line(ch_cpu_logout_t *clop)
 420 {
 421         uint64_t ec_subaddr;
 422         int i, offset;
 423         uint8_t way, ec_state;
 424         ch_ec_data_t *ecp;
 425 
 426 
 427         for (way = 0; way < PN_CACHE_NWAYS; way++) {
 428                 ecp = &clop->clo_data.chd_ec_data[way];
 429                 ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
 430                 ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
 431 
 432                 ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
 433                 cmn_err(CE_CONT,
 434                     "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
 435                     "E$tag 0x%08x.%08x E$state %s",
 436                     way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
 437                     PRTF_64_TO_32(ecp->ec_tag),
 438                     tag_state_to_desc(ec_state));
 439                 /*
 440                  * Dump out Ecache subblock data captured.
 441                  * For Cheetah, we need to compute the ECC for each 16-byte
 442                  * chunk and compare it with the captured chunk ECC to figure
 443                  * out which chunk is bad.
 444                  */
 445                 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
 446                         ec_data_elm_t *ecdptr;
 447                         uint64_t d_low, d_high;
 448                         uint32_t ecc;
 449                         int ec_data_idx = (i/2);
 450 
 451                         offset = i * 16;
 452                         ecdptr =
 453                             &clop->clo_data.chd_ec_data[way].ec_data
 454                             [ec_data_idx];
 455                         if ((i & 1) == 0) {
 456                                 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
 457                                 d_high = ecdptr->ec_d8[0];
 458                                 d_low  = ecdptr->ec_d8[1];
 459                         } else {
 460                                 ecc = ecdptr->ec_eccd & 0x1ff;
 461                                 d_high = ecdptr->ec_d8[2];
 462                                 d_low  = ecdptr->ec_d8[3];
 463                         }
 464 
 465                         cmn_err(CE_CONT,
 466                             "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
 467                             " ECC 0x%03x",
 468                             offset, PRTF_64_TO_32(d_high),
 469                             PRTF_64_TO_32(d_low), ecc);
 470                 }
 471         }
 472 }
 473 
 474 static boolean_t
 475 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
 476     retire_func_t start_of_func, retire_func_t end_of_func)
 477 {
 478         uint64_t start_paddr, end_paddr;
 479         char *type_str;
 480 
 481         start_paddr = va_to_pa((void *)start_of_func);
 482         end_paddr = va_to_pa((void *)end_of_func);
 483         switch (type) {
 484                 case L2_CACHE_TAG:
 485                 case L2_CACHE_DATA:
 486                         tag_addr &= PN_L2_INDEX_MASK;
 487                         start_paddr &= PN_L2_INDEX_MASK;
 488                         end_paddr &= PN_L2_INDEX_MASK;
 489                         type_str = "L2:";
 490                         break;
 491                 case L3_CACHE_TAG:
 492                 case L3_CACHE_DATA:
 493                         tag_addr &= PN_L3_TAG_RD_MASK;
 494                         start_paddr &= PN_L3_TAG_RD_MASK;
 495                         end_paddr &= PN_L3_TAG_RD_MASK;
 496                         type_str = "L3:";
 497                         break;
 498                 default:
 499                         /*
 500                          * Should never reach here.
 501                          */
 502                         ASSERT(0);
 503                         return (B_FALSE);
 504         }
 505         if ((tag_addr > (start_paddr - 0x100)) &&
 506             (tag_addr < (end_paddr + 0x100))) {
 507                 if (mem_cache_debug & 0x1)
 508                         cmn_err(CE_CONT,
 509                             "%s collision detected tag_addr = 0x%08x"
 510                             " start_paddr = 0x%08x end_paddr = 0x%08x\n",
 511                             type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
 512                             (uint32_t)end_paddr);
 513                 return (B_TRUE);
 514         }
 515         else
 516                 return (B_FALSE);
 517 }
 518 
 519 static uint64_t
 520 get_tag_addr(cache_info_t *cache_info)
 521 {
 522         uint64_t tag_addr, scratch;
 523 
 524         switch (cache_info->cache) {
 525                 case L2_CACHE_TAG:
 526                 case L2_CACHE_DATA:
 527                         tag_addr = (uint64_t)(cache_info->index <<
 528                             PN_CACHE_LINE_SHIFT);
 529                         scratch = (uint64_t)(cache_info->way <<
 530                             PN_L2_WAY_SHIFT);
 531                         tag_addr |= scratch;
 532                         tag_addr |= PN_L2_IDX_HW_ECC_EN;
 533                         break;
 534                 case L3_CACHE_TAG:
 535                 case L3_CACHE_DATA:
 536                         tag_addr = (uint64_t)(cache_info->index <<
 537                             PN_CACHE_LINE_SHIFT);
 538                         scratch = (uint64_t)(cache_info->way <<
 539                             PN_L3_WAY_SHIFT);
 540                         tag_addr |= scratch;
 541                         tag_addr |= PN_L3_IDX_HW_ECC_EN;
 542                         break;
 543                 default:
 544                         /*
 545                          * Should never reach here.
 546                          */
 547                         ASSERT(0);
 548                         return (uint64_t)(0);
 549         }
 550         return (tag_addr);
 551 }
 552 
 553 static int
 554 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
 555 {
 556         int     ret_val = 0;
 557         uint64_t afar, tag_addr;
 558         ch_cpu_logout_t clop;
 559         uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
 560         int     i, retire_retry_count;
 561         cpu_t   *cpu;
 562         uint64_t tag_data;
 563         uint8_t state;
 564 
 565         if (cache_info->way >= PN_CACHE_NWAYS)
 566                 return (EINVAL);
 567         switch (cache_info->cache) {
 568                 case L2_CACHE_TAG:
 569                 case L2_CACHE_DATA:
 570                         if (cache_info->index >=
 571                             (PN_L2_SET_SIZE/PN_L2_LINESIZE))
 572                                 return (EINVAL);
 573                         break;
 574                 case L3_CACHE_TAG:
 575                 case L3_CACHE_DATA:
 576                         if (cache_info->index >=
 577                             (PN_L3_SET_SIZE/PN_L3_LINESIZE))
 578                                 return (EINVAL);
 579                         break;
 580                 default:
 581                         return (ENOTSUP);
 582         }
 583         /*
 584          * Check if we have a valid cpu ID and that
 585          * CPU is ONLINE.
 586          */
 587         mutex_enter(&cpu_lock);
 588         cpu = cpu_get(cache_info->cpu_id);
 589         if ((cpu == NULL) || (!cpu_is_online(cpu))) {
 590                 mutex_exit(&cpu_lock);
 591                 return (EINVAL);
 592         }
 593         mutex_exit(&cpu_lock);
 594         pattern = 0;    /* default value of TAG PA when cacheline is retired. */
 595         switch (cmd) {
 596                 case MEM_CACHE_RETIRE:
 597                         tag_addr = get_tag_addr(cache_info);
 598                         pattern |= PN_ECSTATE_NA;
 599                         retire_retry_count = 0;
 600                         affinity_set(cache_info->cpu_id);
 601                         switch (cache_info->cache) {
 602                                 case L2_CACHE_DATA:
 603                                 case L2_CACHE_TAG:
 604                                         if ((cache_info->bit & MSB_BIT_MASK) ==
 605                                             MSB_BIT_MASK)
 606                                                 pattern |= PN_L2TAG_PA_MASK;
 607 retry_l2_retire:
 608                                         if (tag_addr_collides(tag_addr,
 609                                             cache_info->cache,
 610                                             retire_l2_start, retire_l2_end))
 611                                                 ret_val =
 612                                                     retire_l2_alternate(
 613                                                     tag_addr, pattern);
 614                                         else
 615                                                 ret_val = retire_l2(tag_addr,
 616                                                     pattern);
 617                                         if (ret_val == 1) {
 618                                                 /*
 619                                                  * cacheline was in retired
 620                                                  * STATE already.
 621                                                  * so return success.
 622                                                  */
 623                                                 ret_val = 0;
 624                                         }
 625                                         if (ret_val < 0) {
 626                                                 cmn_err(CE_WARN,
 627                 "retire_l2() failed. index = 0x%x way %d. Retrying...\n",
 628                                                     cache_info->index,
 629                                                     cache_info->way);
 630                                                 if (retire_retry_count >= 2) {
 631                                                         retire_failures++;
 632                                                         affinity_clear();
 633                                                         return (EIO);
 634                                                 }
 635                                                 retire_retry_count++;
 636                                                 goto retry_l2_retire;
 637                                         }
 638                                         if (ret_val == 2)
 639                                                 l2_flush_retries_done++;
 640                         /*
 641                          * We bind ourself to a CPU and send cross trap to
 642                          * ourself. On return from xt_one we can rely on the
 643                          * data in tag_data being filled in. Normally one would
 644                          * do a xt_sync to make sure that the CPU has completed
 645                          * the cross trap call xt_one.
 646                          */
 647                                         xt_one(cache_info->cpu_id,
 648                                             (xcfunc_t *)(get_l2_tag_tl1),
 649                                             tag_addr, (uint64_t)(&tag_data));
 650                                         state = tag_data & CH_ECSTATE_MASK;
 651                                         if (state != PN_ECSTATE_NA) {
 652                                                 retire_failures++;
 653                                                 print_l2_tag(tag_addr,
 654                                                     tag_data);
 655                                                 cmn_err(CE_WARN,
 656                 "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
 657                                                     cache_info->index,
 658                                                     cache_info->way);
 659                                                 if (retire_retry_count >= 2) {
 660                                                         retire_failures++;
 661                                                         affinity_clear();
 662                                                         return (EIO);
 663                                                 }
 664                                                 retire_retry_count++;
 665                                                 goto retry_l2_retire;
 666                                         }
 667                                         break;
 668                                 case L3_CACHE_TAG:
 669                                 case L3_CACHE_DATA:
 670                                         if ((cache_info->bit & MSB_BIT_MASK) ==
 671                                             MSB_BIT_MASK)
 672                                                 pattern |= PN_L3TAG_PA_MASK;
 673                                         if (tag_addr_collides(tag_addr,
 674                                             cache_info->cache,
 675                                             retire_l3_start, retire_l3_end))
 676                                                 ret_val =
 677                                                     retire_l3_alternate(
 678                                                     tag_addr, pattern);
 679                                         else
 680                                                 ret_val = retire_l3(tag_addr,
 681                                                     pattern);
 682                                         if (ret_val == 1) {
 683                                                 /*
 684                                                  * cacheline was in retired
 685                                                  * STATE already.
 686                                                  * so return success.
 687                                                  */
 688                                                 ret_val = 0;
 689                                         }
 690                                         if (ret_val < 0) {
 691                                                 cmn_err(CE_WARN,
 692                         "retire_l3() failed. ret_val = %d index = 0x%x\n",
 693                                                     ret_val,
 694                                                     cache_info->index);
 695                                                 retire_failures++;
 696                                                 affinity_clear();
 697                                                 return (EIO);
 698                                         }
 699                         /*
 700                          * We bind ourself to a CPU and send cross trap to
 701                          * ourself. On return from xt_one we can rely on the
 702                          * data in tag_data being filled in. Normally one would
 703                          * do a xt_sync to make sure that the CPU has completed
 704                          * the cross trap call xt_one.
 705                          */
 706                                         xt_one(cache_info->cpu_id,
 707                                             (xcfunc_t *)(get_l3_tag_tl1),
 708                                             tag_addr, (uint64_t)(&tag_data));
 709                                         state = tag_data & CH_ECSTATE_MASK;
 710                                         if (state != PN_ECSTATE_NA) {
 711                                                 cmn_err(CE_WARN,
 712                                         "L3 RETIRE failed for index 0x%x\n",
 713                                                     cache_info->index);
 714                                                 retire_failures++;
 715                                                 affinity_clear();
 716                                                 return (EIO);
 717                                         }
 718 
 719                                         break;
 720                         }
 721                         affinity_clear();
 722                         break;
 723                 case MEM_CACHE_UNRETIRE:
 724                         tag_addr = get_tag_addr(cache_info);
 725                         pattern = PN_ECSTATE_INV;
 726                         affinity_set(cache_info->cpu_id);
 727                         switch (cache_info->cache) {
 728                                 case L2_CACHE_DATA:
 729                                 case L2_CACHE_TAG:
 730                         /*
 731                          * We bind ourself to a CPU and send cross trap to
 732                          * ourself. On return from xt_one we can rely on the
 733                          * data in tag_data being filled in. Normally one would
 734                          * do a xt_sync to make sure that the CPU has completed
 735                          * the cross trap call xt_one.
 736                          */
 737                                         xt_one(cache_info->cpu_id,
 738                                             (xcfunc_t *)(get_l2_tag_tl1),
 739                                             tag_addr, (uint64_t)(&tag_data));
 740                                         state = tag_data & CH_ECSTATE_MASK;
 741                                         if (state != PN_ECSTATE_NA) {
 742                                                 affinity_clear();
 743                                                 return (EINVAL);
 744                                         }
 745                                         if (tag_addr_collides(tag_addr,
 746                                             cache_info->cache,
 747                                             unretire_l2_start, unretire_l2_end))
 748                                                 ret_val =
 749                                                     unretire_l2_alternate(
 750                                                     tag_addr, pattern);
 751                                         else
 752                                                 ret_val =
 753                                                     unretire_l2(tag_addr,
 754                                                     pattern);
 755                                         if (ret_val != 0) {
 756                                                 cmn_err(CE_WARN,
 757                         "unretire_l2() failed. ret_val = %d index = 0x%x\n",
 758                                                     ret_val,
 759                                                     cache_info->index);
 760                                                 retire_failures++;
 761                                                 affinity_clear();
 762                                                 return (EIO);
 763                                         }
 764                                         break;
 765                                 case L3_CACHE_TAG:
 766                                 case L3_CACHE_DATA:
 767                         /*
 768                          * We bind ourself to a CPU and send cross trap to
 769                          * ourself. On return from xt_one we can rely on the
 770                          * data in tag_data being filled in. Normally one would
 771                          * do a xt_sync to make sure that the CPU has completed
 772                          * the cross trap call xt_one.
 773                          */
 774                                         xt_one(cache_info->cpu_id,
 775                                             (xcfunc_t *)(get_l3_tag_tl1),
 776                                             tag_addr, (uint64_t)(&tag_data));
 777                                         state = tag_data & CH_ECSTATE_MASK;
 778                                         if (state != PN_ECSTATE_NA) {
 779                                                 affinity_clear();
 780                                                 return (EINVAL);
 781                                         }
 782                                         if (tag_addr_collides(tag_addr,
 783                                             cache_info->cache,
 784                                             unretire_l3_start, unretire_l3_end))
 785                                                 ret_val =
 786                                                     unretire_l3_alternate(
 787                                                     tag_addr, pattern);
 788                                         else
 789                                                 ret_val =
 790                                                     unretire_l3(tag_addr,
 791                                                     pattern);
 792                                         if (ret_val != 0) {
 793                                                 cmn_err(CE_WARN,
 794                         "unretire_l3() failed. ret_val = %d index = 0x%x\n",
 795                                                     ret_val,
 796                                                     cache_info->index);
 797                                                 affinity_clear();
 798                                                 return (EIO);
 799                                         }
 800                                         break;
 801                         }
 802                         affinity_clear();
 803                         break;
 804                 case MEM_CACHE_ISRETIRED:
 805                 case MEM_CACHE_STATE:
 806                         return (ENOTSUP);
 807                 case MEM_CACHE_READ_TAGS:
 808 #ifdef DEBUG
 809                 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
 810 #endif
 811                         /*
 812                          * Read tag and data for all the ways at a given afar
 813                          */
 814                         afar = (uint64_t)(cache_info->index
 815                             << PN_CACHE_LINE_SHIFT);
 816                         mutex_enter(&cpu_lock);
 817                         affinity_set(cache_info->cpu_id);
 818                         (void) pause_cpus(NULL);
 819                         mutex_exit(&cpu_lock);
 820                         /*
 821                          * We bind ourself to a CPU and send cross trap to
 822                          * ourself. On return from xt_one we can rely on the
 823                          * data in clop being filled in. Normally one would
 824                          * do a xt_sync to make sure that the CPU has completed
 825                          * the cross trap call xt_one.
 826                          */
 827                         xt_one(cache_info->cpu_id,
 828                             (xcfunc_t *)(get_ecache_dtags_tl1),
 829                             afar, (uint64_t)(&clop));
 830                         mutex_enter(&cpu_lock);
 831                         (void) start_cpus();
 832                         mutex_exit(&cpu_lock);
 833                         affinity_clear();
 834                         switch (cache_info->cache) {
 835                                 case L2_CACHE_TAG:
 836                                         for (i = 0; i < PN_CACHE_NWAYS; i++) {
 837                                                 Lxcache_tag_data[i] =
 838                                                     clop.clo_data.chd_l2_data
 839                                                     [i].ec_tag;
 840                                         }
 841 #ifdef DEBUG
 842                                         last_error_injected_bit =
 843                                             last_l2tag_error_injected_bit;
 844                                         last_error_injected_way =
 845                                             last_l2tag_error_injected_way;
 846 #endif
 847                                         break;
 848                                 case L3_CACHE_TAG:
 849                                         for (i = 0; i < PN_CACHE_NWAYS; i++) {
 850                                                 Lxcache_tag_data[i] =
 851                                                     clop.clo_data.chd_ec_data
 852                                                     [i].ec_tag;
 853                                         }
 854 #ifdef DEBUG
 855                                         last_error_injected_bit =
 856                                             last_l3tag_error_injected_bit;
 857                                         last_error_injected_way =
 858                                             last_l3tag_error_injected_way;
 859 #endif
 860                                         break;
 861                                 default:
 862                                         return (ENOTSUP);
 863                         }       /* end if switch(cache) */
 864 #ifdef DEBUG
 865                         if ((cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) &&
 866                             (inject_anonymous_tag_error == 0) &&
 867                             (last_error_injected_way >= 0) &&
 868                             (last_error_injected_way <= 3)) {
 869                                 pattern = ((uint64_t)1 <<
 870                                     last_error_injected_bit);
 871                                 /*
 872                                  * If error bit is ECC we need to make sure
 873                                  * ECC on all all WAYS are corrupted.
 874                                  */
 875                                 if ((last_error_injected_bit >= 6) &&
 876                                     (last_error_injected_bit <= 14)) {
 877                                         for (i = 0; i < PN_CACHE_NWAYS; i++)
 878                                                 Lxcache_tag_data[i] ^=
 879                                                     pattern;
 880                                 } else
 881                                         Lxcache_tag_data
 882                                             [last_error_injected_way] ^=
 883                                             pattern;
 884                         }
 885 #endif
 886                         if (ddi_copyout((caddr_t)Lxcache_tag_data,
 887                             (caddr_t)cache_info->datap,
 888                             sizeof (Lxcache_tag_data), mode)
 889                             != DDI_SUCCESS) {
 890                                 return (EFAULT);
 891                         }
 892                         break;  /* end of READ_TAGS */
 893                 default:
 894                         return (ENOTSUP);
 895         }       /* end if switch(cmd) */
 896         return (ret_val);
 897 }
 898 
 899 /*ARGSUSED*/
 900 static int
 901 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
 902                 int *rvalp)
 903 {
 904         int     inst;
 905         struct mem_cache_softc *softc;
 906         cache_info_t    cache_info;
 907         cache_info32_t  cache_info32;
 908         int     ret_val;
 909         int     is_panther;
 910 
 911         inst = getminor(dev);
 912         if ((softc = getsoftc(inst)) == NULL)
 913                 return (ENXIO);
 914 
 915         mutex_enter(&softc->mutex);
 916 
 917 #ifdef _MULTI_DATAMODEL
 918         if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
 919                 if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
 920                     sizeof (cache_info32), mode) != DDI_SUCCESS) {
 921                         mutex_exit(&softc->mutex);
 922                         return (EFAULT);
 923                 }
 924                 cache_info.cache = cache_info32.cache;
 925                 cache_info.index = cache_info32.index;
 926                 cache_info.way = cache_info32.way;
 927                 cache_info.cpu_id = cache_info32.cpu_id;
 928                 cache_info.bit = cache_info32.bit;
 929                 cache_info.datap = (void *)((uint64_t)cache_info32.datap);
 930         } else
 931 #endif
 932         if (ddi_copyin((cache_info_t *)arg, &cache_info,
 933             sizeof (cache_info), mode) != DDI_SUCCESS) {
 934                 mutex_exit(&softc->mutex);
 935                 return (EFAULT);
 936         }
 937 
 938         if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) {
 939                 mutex_exit(&softc->mutex);
 940                 return (EINVAL);
 941         }
 942         is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
 943         if (!is_panther) {
 944                 mutex_exit(&softc->mutex);
 945                 return (ENOTSUP);
 946         }
 947         switch (cmd) {
 948                 case MEM_CACHE_RETIRE:
 949                 case MEM_CACHE_UNRETIRE:
 950                         if ((mode & FWRITE) == 0) {
 951                                 ret_val = EBADF;
 952                                 break;
 953                         }
 954                 /*FALLTHROUGH*/
 955                 case MEM_CACHE_ISRETIRED:
 956                 case MEM_CACHE_STATE:
 957                 case MEM_CACHE_READ_TAGS:
 958 #ifdef DEBUG
 959                 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
 960 #endif
 961                         ret_val =  mem_cache_ioctl_ops(cmd, mode, &cache_info);
 962                         break;
 963                 default:
 964                         ret_val = ENOTSUP;
 965                         break;
 966         }
 967         mutex_exit(&softc->mutex);
 968         return (ret_val);
 969 }