1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  23 /*        All Rights Reserved   */
  24 
  25 /*
  26  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  27  * Use is subject to license terms.
  28  */
  29 
  30 #include <sys/param.h>
  31 #include <sys/types.h>
  32 #include <sys/vmparam.h>
  33 #include <sys/systm.h>
  34 #include <sys/stack.h>
  35 #include <sys/frame.h>
  36 #include <sys/proc.h>
  37 #include <sys/ucontext.h>
  38 #include <sys/cpuvar.h>
  39 #include <sys/asm_linkage.h>
  40 #include <sys/kmem.h>
  41 #include <sys/errno.h>
  42 #include <sys/bootconf.h>
  43 #include <sys/archsystm.h>
  44 #include <sys/fpu/fpusystm.h>
  45 #include <sys/debug.h>
  46 #include <sys/privregs.h>
  47 #include <sys/machpcb.h>
  48 #include <sys/psr_compat.h>
  49 #include <sys/cmn_err.h>
  50 #include <sys/asi.h>
  51 #include <sys/copyops.h>
  52 #include <sys/model.h>
  53 #include <sys/panic.h>
  54 #include <sys/exec.h>
  55 
  56 /*
  57  * By default, set the weakest model to TSO (Total Store Order)
  58  * which is the default memory model on SPARC.
  59  * If a platform does support a weaker model than TSO, this will be
  60  * updated at runtime to reflect that.
  61  */
  62 uint_t weakest_mem_model = TSTATE_MM_TSO;
  63 
  64 /*
  65  * modify the lower 32bits of a uint64_t
  66  */
  67 #define SET_LOWER_32(all, lower)        \
  68         (((uint64_t)(all) & 0xffffffff00000000) | (uint32_t)(lower))
  69 
  70 #define MEMCPY_FPU_EN           2       /* fprs on and fpu_en == 0 */
  71 
  72 static uint_t mkpsr(uint64_t tstate, uint32_t fprs);
  73 
  74 #ifdef _SYSCALL32_IMPL
  75 static void fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
  76     const struct fq32 *sfq, struct _fq *dfq);
  77 #endif /* _SYSCALL32_IMPL */
  78 
  79 /*
  80  * Set floating-point registers.
  81  * NOTE:  'lwp' might not correspond to 'curthread' since this is
  82  * called from code in /proc to set the registers of another lwp.
  83  */
  84 void
  85 setfpregs(klwp_t *lwp, fpregset_t *fp)
  86 {
  87         struct machpcb *mpcb;
  88         kfpu_t *pfp;
  89         uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
  90         model_t model = lwp_getdatamodel(lwp);
  91 
  92         mpcb = lwptompcb(lwp);
  93         pfp = lwptofpu(lwp);
  94 
  95         /*
  96          * This is always true for both "real" fp programs and memcpy fp
  97          * programs, because we force fpu_en to MEMCPY_FPU_EN in getfpregs,
  98          * for the memcpy and threads cases where (fpu_en == 0) &&
  99          * (fpu_fprs & FPRS_FEF), if setfpregs is called after getfpregs.
 100          */
 101         if (fp->fpu_en) {
 102                 kpreempt_disable();
 103 
 104                 if (!(pfp->fpu_en) && (!(pfp->fpu_fprs & FPRS_FEF)) &&
 105                     fpu_exists) {
 106                         /*
 107                          * He's not currently using the FPU but wants to in his
 108                          * new context - arrange for this on return to userland.
 109                          */
 110                         pfp->fpu_fprs = (uint32_t)fprs;
 111                 }
 112                 /*
 113                  * Get setfpregs to restore fpu_en to zero
 114                  * for the memcpy/threads case (where pfp->fpu_en == 0 &&
 115                  * (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
 116                  */
 117                 if (fp->fpu_en == MEMCPY_FPU_EN)
 118                         fp->fpu_en = 0;
 119 
 120                 /*
 121                  * Load up a user's floating point context.
 122                  */
 123                 if (fp->fpu_qcnt > MAXFPQ)        /* plug security holes */
 124                         fp->fpu_qcnt = MAXFPQ;
 125                 fp->fpu_q_entrysize = sizeof (struct _fq);
 126 
 127                 /*
 128                  * For v9 kernel, copy all of the fp regs.
 129                  * For v8 kernel, copy v8 fp regs (lower half of v9 fp regs).
 130                  * Restore entire fsr for v9, only lower half for v8.
 131                  */
 132                 (void) kcopy(fp, pfp, sizeof (fp->fpu_fr));
 133                 if (model == DATAMODEL_LP64)
 134                         pfp->fpu_fsr = fp->fpu_fsr;
 135                 else
 136                         pfp->fpu_fsr = SET_LOWER_32(pfp->fpu_fsr, fp->fpu_fsr);
 137                 pfp->fpu_qcnt = fp->fpu_qcnt;
 138                 pfp->fpu_q_entrysize = fp->fpu_q_entrysize;
 139                 pfp->fpu_en = fp->fpu_en;
 140                 pfp->fpu_q = mpcb->mpcb_fpu_q;
 141                 if (fp->fpu_qcnt)
 142                         (void) kcopy(fp->fpu_q, pfp->fpu_q,
 143                             fp->fpu_qcnt * fp->fpu_q_entrysize);
 144                 /* FSR ignores these bits on load, so they can not be set */
 145                 pfp->fpu_fsr &= ~(FSR_QNE|FSR_FTT);
 146 
 147                 /*
 148                  * If not the current process then resume() will handle it.
 149                  */
 150                 if (lwp != ttolwp(curthread)) {
 151                         /* force resume to reload fp regs */
 152                         pfp->fpu_fprs |= FPRS_FEF;
 153                         kpreempt_enable();
 154                         return;
 155                 }
 156 
 157                 /*
 158                  * Load up FPU with new floating point context.
 159                  */
 160                 if (fpu_exists) {
 161                         pfp->fpu_fprs = _fp_read_fprs();
 162                         if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
 163                                 _fp_write_fprs(fprs);
 164                                 pfp->fpu_fprs = (uint32_t)fprs;
 165 #ifdef DEBUG
 166                                 if (fpdispr)
 167                                         cmn_err(CE_NOTE,
 168                                             "setfpregs with fp disabled!\n");
 169 #endif
 170                         }
 171                         /*
 172                          * Load all fp regs for v9 user programs, but only
 173                          * load the lower half for v8[plus] programs.
 174                          */
 175                         if (model == DATAMODEL_LP64)
 176                                 fp_restore(pfp);
 177                         else
 178                                 fp_v8_load(pfp);
 179                 }
 180 
 181                 kpreempt_enable();
 182         } else {
 183                 if ((pfp->fpu_en) || /* normal fp case */
 184                     (pfp->fpu_fprs & FPRS_FEF)) { /* memcpy/threads case */
 185                         /*
 186                          * Currently the lwp has floating point enabled.
 187                          * Turn off FPRS_FEF in user's fprs, saved and
 188                          * real copies thereof.
 189                          */
 190                         pfp->fpu_en = 0;
 191                         if (fpu_exists) {
 192                                 fprs = 0;
 193                                 if (lwp == ttolwp(curthread))
 194                                         _fp_write_fprs(fprs);
 195                                 pfp->fpu_fprs = (uint32_t)fprs;
 196                         }
 197                 }
 198         }
 199 }
 200 
 201 #ifdef  _SYSCALL32_IMPL
 202 void
 203 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
 204 {
 205         fpregset_t fpregs;
 206 
 207         fpuregset_32ton(fp, &fpregs, NULL, NULL);
 208         setfpregs(lwp, &fpregs);
 209 }
 210 #endif  /* _SYSCALL32_IMPL */
 211 
 212 /*
 213  * NOTE:  'lwp' might not correspond to 'curthread' since this is
 214  * called from code in /proc to set the registers of another lwp.
 215  */
 216 void
 217 run_fpq(klwp_t *lwp, fpregset_t *fp)
 218 {
 219         /*
 220          * If the context being loaded up includes a floating queue,
 221          * we need to simulate those instructions (since we can't reload
 222          * the fpu) and pass the process any appropriate signals
 223          */
 224 
 225         if (lwp == ttolwp(curthread)) {
 226                 if (fpu_exists) {
 227                         if (fp->fpu_qcnt)
 228                                 fp_runq(lwp->lwp_regs);
 229                 }
 230         }
 231 }
 232 
 233 /*
 234  * Get floating-point registers.
 235  * NOTE:  'lwp' might not correspond to 'curthread' since this is
 236  * called from code in /proc to set the registers of another lwp.
 237  */
 238 void
 239 getfpregs(klwp_t *lwp, fpregset_t *fp)
 240 {
 241         kfpu_t *pfp;
 242         model_t model = lwp_getdatamodel(lwp);
 243 
 244         pfp = lwptofpu(lwp);
 245         kpreempt_disable();
 246         if (fpu_exists && ttolwp(curthread) == lwp)
 247                 pfp->fpu_fprs = _fp_read_fprs();
 248 
 249         /*
 250          * First check the fpu_en case, for normal fp programs.
 251          * Next check the fprs case, for fp use by memcpy/threads.
 252          */
 253         if (((fp->fpu_en = pfp->fpu_en) != 0) ||
 254             (pfp->fpu_fprs & FPRS_FEF)) {
 255                 /*
 256                  * Force setfpregs to restore the fp context in
 257                  * setfpregs for the memcpy and threads cases (where
 258                  * pfp->fpu_en == 0 && (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
 259                  */
 260                 if (pfp->fpu_en == 0)
 261                         fp->fpu_en = MEMCPY_FPU_EN;
 262                 /*
 263                  * If we have an fpu and the current thread owns the fp
 264                  * context, flush fp * registers into the pcb. Save all
 265                  * the fp regs for v9, xregs_getfpregs saves the upper half
 266                  * for v8plus. Save entire fsr for v9, only lower half for v8.
 267                  */
 268                 if (fpu_exists && ttolwp(curthread) == lwp) {
 269                         if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
 270                                 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
 271 
 272                                 _fp_write_fprs(fprs);
 273                                 pfp->fpu_fprs = fprs;
 274 #ifdef DEBUG
 275                                 if (fpdispr)
 276                                         cmn_err(CE_NOTE,
 277                                             "getfpregs with fp disabled!\n");
 278 #endif
 279                         }
 280                         if (model == DATAMODEL_LP64)
 281                                 fp_fksave(pfp);
 282                         else
 283                                 fp_v8_fksave(pfp);
 284                 }
 285                 (void) kcopy(pfp, fp, sizeof (fp->fpu_fr));
 286                 fp->fpu_q = pfp->fpu_q;
 287                 if (model == DATAMODEL_LP64)
 288                         fp->fpu_fsr = pfp->fpu_fsr;
 289                 else
 290                         fp->fpu_fsr = (uint32_t)pfp->fpu_fsr;
 291                 fp->fpu_qcnt = pfp->fpu_qcnt;
 292                 fp->fpu_q_entrysize = pfp->fpu_q_entrysize;
 293         } else {
 294                 int i;
 295                 for (i = 0; i < 32; i++)             /* NaN */
 296                         ((uint32_t *)fp->fpu_fr.fpu_regs)[i] = (uint32_t)-1;
 297                 if (model == DATAMODEL_LP64) {
 298                         for (i = 16; i < 32; i++)    /* NaN */
 299                                 ((uint64_t *)fp->fpu_fr.fpu_dregs)[i] =
 300                                     (uint64_t)-1;
 301                 }
 302                 fp->fpu_fsr = 0;
 303                 fp->fpu_qcnt = 0;
 304         }
 305         kpreempt_enable();
 306 }
 307 
 308 #ifdef  _SYSCALL32_IMPL
 309 void
 310 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
 311 {
 312         fpregset_t fpregs;
 313 
 314         getfpregs(lwp, &fpregs);
 315         fpuregset_nto32(&fpregs, fp, NULL);
 316 }
 317 #endif  /* _SYSCALL32_IMPL */
 318 
 319 /*
 320  * Set general registers.
 321  * NOTE:  'lwp' might not correspond to 'curthread' since this is
 322  * called from code in /proc to set the registers of another lwp.
 323  */
 324 
 325 /* 64-bit gregset_t */
 326 void
 327 setgregs(klwp_t *lwp, gregset_t grp)
 328 {
 329         struct regs *rp = lwptoregs(lwp);
 330         kfpu_t *fp = lwptofpu(lwp);
 331         uint64_t tbits;
 332 
 333         int current = (lwp == curthread->t_lwp);
 334 
 335         if (current)
 336                 (void) save_syscall_args();     /* copy the args first */
 337 
 338         tbits = (((grp[REG_CCR] & TSTATE_CCR_MASK) << TSTATE_CCR_SHIFT) |
 339             ((grp[REG_ASI] & TSTATE_ASI_MASK) << TSTATE_ASI_SHIFT));
 340         rp->r_tstate &= ~(((uint64_t)TSTATE_CCR_MASK << TSTATE_CCR_SHIFT) |
 341             ((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT));
 342         rp->r_tstate |= tbits;
 343         kpreempt_disable();
 344         fp->fpu_fprs = (uint32_t)grp[REG_FPRS];
 345         if (fpu_exists && (current) && (fp->fpu_fprs & FPRS_FEF))
 346                 _fp_write_fprs(fp->fpu_fprs);
 347         kpreempt_enable();
 348 
 349         /*
 350          * pc and npc must be 4-byte aligned on sparc.
 351          * We silently make it so to avoid a watchdog reset.
 352          */
 353         rp->r_pc = grp[REG_PC] & ~03L;
 354         rp->r_npc = grp[REG_nPC] & ~03L;
 355         rp->r_y = grp[REG_Y];
 356 
 357         rp->r_g1 = grp[REG_G1];
 358         rp->r_g2 = grp[REG_G2];
 359         rp->r_g3 = grp[REG_G3];
 360         rp->r_g4 = grp[REG_G4];
 361         rp->r_g5 = grp[REG_G5];
 362         rp->r_g6 = grp[REG_G6];
 363         rp->r_g7 = grp[REG_G7];
 364 
 365         rp->r_o0 = grp[REG_O0];
 366         rp->r_o1 = grp[REG_O1];
 367         rp->r_o2 = grp[REG_O2];
 368         rp->r_o3 = grp[REG_O3];
 369         rp->r_o4 = grp[REG_O4];
 370         rp->r_o5 = grp[REG_O5];
 371         rp->r_o6 = grp[REG_O6];
 372         rp->r_o7 = grp[REG_O7];
 373 
 374         if (current) {
 375                 /*
 376                  * This was called from a system call, but we
 377                  * do not want to return via the shared window;
 378                  * restoring the CPU context changes everything.
 379                  */
 380                 lwp->lwp_eosys = JUSTRETURN;
 381                 curthread->t_post_sys = 1;
 382         }
 383 }
 384 
 385 /*
 386  * Return the general registers.
 387  * NOTE:  'lwp' might not correspond to 'curthread' since this is
 388  * called from code in /proc to get the registers of another lwp.
 389  */
 390 void
 391 getgregs(klwp_t *lwp, gregset_t grp)
 392 {
 393         struct regs *rp = lwptoregs(lwp);
 394         uint32_t fprs;
 395 
 396         kpreempt_disable();
 397         if (fpu_exists && ttolwp(curthread) == lwp) {
 398                 fprs = _fp_read_fprs();
 399         } else {
 400                 kfpu_t *fp = lwptofpu(lwp);
 401                 fprs = fp->fpu_fprs;
 402         }
 403         kpreempt_enable();
 404         grp[REG_CCR] = (rp->r_tstate >> TSTATE_CCR_SHIFT) & TSTATE_CCR_MASK;
 405         grp[REG_PC] = rp->r_pc;
 406         grp[REG_nPC] = rp->r_npc;
 407         grp[REG_Y] = (uint32_t)rp->r_y;
 408         grp[REG_G1] = rp->r_g1;
 409         grp[REG_G2] = rp->r_g2;
 410         grp[REG_G3] = rp->r_g3;
 411         grp[REG_G4] = rp->r_g4;
 412         grp[REG_G5] = rp->r_g5;
 413         grp[REG_G6] = rp->r_g6;
 414         grp[REG_G7] = rp->r_g7;
 415         grp[REG_O0] = rp->r_o0;
 416         grp[REG_O1] = rp->r_o1;
 417         grp[REG_O2] = rp->r_o2;
 418         grp[REG_O3] = rp->r_o3;
 419         grp[REG_O4] = rp->r_o4;
 420         grp[REG_O5] = rp->r_o5;
 421         grp[REG_O6] = rp->r_o6;
 422         grp[REG_O7] = rp->r_o7;
 423         grp[REG_ASI] = (rp->r_tstate >> TSTATE_ASI_SHIFT) & TSTATE_ASI_MASK;
 424         grp[REG_FPRS] = fprs;
 425 }
 426 
 427 void
 428 getgregs32(klwp_t *lwp, gregset32_t grp)
 429 {
 430         struct regs *rp = lwptoregs(lwp);
 431         uint32_t fprs;
 432 
 433         kpreempt_disable();
 434         if (fpu_exists && ttolwp(curthread) == lwp) {
 435                 fprs = _fp_read_fprs();
 436         } else {
 437                 kfpu_t *fp = lwptofpu(lwp);
 438                 fprs = fp->fpu_fprs;
 439         }
 440         kpreempt_enable();
 441         grp[REG_PSR] = mkpsr(rp->r_tstate, fprs);
 442         grp[REG_PC] = rp->r_pc;
 443         grp[REG_nPC] = rp->r_npc;
 444         grp[REG_Y] = rp->r_y;
 445         grp[REG_G1] = rp->r_g1;
 446         grp[REG_G2] = rp->r_g2;
 447         grp[REG_G3] = rp->r_g3;
 448         grp[REG_G4] = rp->r_g4;
 449         grp[REG_G5] = rp->r_g5;
 450         grp[REG_G6] = rp->r_g6;
 451         grp[REG_G7] = rp->r_g7;
 452         grp[REG_O0] = rp->r_o0;
 453         grp[REG_O1] = rp->r_o1;
 454         grp[REG_O2] = rp->r_o2;
 455         grp[REG_O3] = rp->r_o3;
 456         grp[REG_O4] = rp->r_o4;
 457         grp[REG_O5] = rp->r_o5;
 458         grp[REG_O6] = rp->r_o6;
 459         grp[REG_O7] = rp->r_o7;
 460 }
 461 
 462 /*
 463  * Return the user-level PC.
 464  * If in a system call, return the address of the syscall trap.
 465  */
 466 greg_t
 467 getuserpc()
 468 {
 469         return (lwptoregs(ttolwp(curthread))->r_pc);
 470 }
 471 
 472 /*
 473  * Set register windows.
 474  */
 475 void
 476 setgwins(klwp_t *lwp, gwindows_t *gwins)
 477 {
 478         struct machpcb *mpcb = lwptompcb(lwp);
 479         int wbcnt = gwins->wbcnt;
 480         caddr_t sp;
 481         int i;
 482         struct rwindow32 *rwp;
 483         int wbuf_rwindow_size;
 484         int is64;
 485 
 486         if (mpcb->mpcb_wstate == WSTATE_USER32) {
 487                 wbuf_rwindow_size = WINDOWSIZE32;
 488                 is64 = 0;
 489         } else {
 490                 wbuf_rwindow_size = WINDOWSIZE64;
 491                 is64 = 1;
 492         }
 493         ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
 494         mpcb->mpcb_wbcnt = 0;
 495         for (i = 0; i < wbcnt; i++) {
 496                 sp = (caddr_t)gwins->spbuf[i];
 497                 mpcb->mpcb_spbuf[i] = sp;
 498                 rwp = (struct rwindow32 *)
 499                     (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
 500                 if (is64 && IS_V9STACK(sp))
 501                         bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow));
 502                 else
 503                         rwindow_nto32(&gwins->wbuf[i], rwp);
 504                 mpcb->mpcb_wbcnt++;
 505         }
 506 }
 507 
 508 void
 509 setgwins32(klwp_t *lwp, gwindows32_t *gwins)
 510 {
 511         struct machpcb *mpcb = lwptompcb(lwp);
 512         int wbcnt = gwins->wbcnt;
 513         caddr_t sp;
 514         int i;
 515 
 516         struct rwindow *rwp;
 517         int wbuf_rwindow_size;
 518         int is64;
 519 
 520         if (mpcb->mpcb_wstate == WSTATE_USER32) {
 521                 wbuf_rwindow_size = WINDOWSIZE32;
 522                 is64 = 0;
 523         } else {
 524                 wbuf_rwindow_size = WINDOWSIZE64;
 525                 is64 = 1;
 526         }
 527 
 528         ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
 529         mpcb->mpcb_wbcnt = 0;
 530         for (i = 0; i < wbcnt; i++) {
 531                 sp = (caddr_t)(uintptr_t)gwins->spbuf[i];
 532                 mpcb->mpcb_spbuf[i] = sp;
 533                 rwp = (struct rwindow *)
 534                     (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
 535                 if (is64 && IS_V9STACK(sp))
 536                         rwindow_32ton(&gwins->wbuf[i], rwp);
 537                 else
 538                         bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow32));
 539                 mpcb->mpcb_wbcnt++;
 540         }
 541 }
 542 
 543 /*
 544  * Get register windows.
 545  * NOTE:  'lwp' might not correspond to 'curthread' since this is
 546  * called from code in /proc to set the registers of another lwp.
 547  */
 548 void
 549 getgwins(klwp_t *lwp, gwindows_t *gwp)
 550 {
 551         struct machpcb *mpcb = lwptompcb(lwp);
 552         int wbcnt = mpcb->mpcb_wbcnt;
 553         caddr_t sp;
 554         int i;
 555         struct rwindow32 *rwp;
 556         int wbuf_rwindow_size;
 557         int is64;
 558 
 559         if (mpcb->mpcb_wstate == WSTATE_USER32) {
 560                 wbuf_rwindow_size = WINDOWSIZE32;
 561                 is64 = 0;
 562         } else {
 563                 wbuf_rwindow_size = WINDOWSIZE64;
 564                 is64 = 1;
 565         }
 566         ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
 567         gwp->wbcnt = wbcnt;
 568         for (i = 0; i < wbcnt; i++) {
 569                 sp = mpcb->mpcb_spbuf[i];
 570                 gwp->spbuf[i] = (greg_t *)sp;
 571                 rwp = (struct rwindow32 *)
 572                     (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
 573                 if (is64 && IS_V9STACK(sp))
 574                         bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow));
 575                 else
 576                         rwindow_32ton(rwp, &gwp->wbuf[i]);
 577         }
 578 }
 579 
 580 void
 581 getgwins32(klwp_t *lwp, gwindows32_t *gwp)
 582 {
 583         struct machpcb *mpcb = lwptompcb(lwp);
 584         int wbcnt = mpcb->mpcb_wbcnt;
 585         int i;
 586         struct rwindow *rwp;
 587         int wbuf_rwindow_size;
 588         caddr_t sp;
 589         int is64;
 590 
 591         if (mpcb->mpcb_wstate == WSTATE_USER32) {
 592                 wbuf_rwindow_size = WINDOWSIZE32;
 593                 is64 = 0;
 594         } else {
 595                 wbuf_rwindow_size = WINDOWSIZE64;
 596                 is64 = 1;
 597         }
 598 
 599         ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
 600         gwp->wbcnt = wbcnt;
 601         for (i = 0; i < wbcnt; i++) {
 602                 sp = mpcb->mpcb_spbuf[i];
 603                 rwp = (struct rwindow *)
 604                     (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
 605                 gwp->spbuf[i] = (caddr32_t)(uintptr_t)sp;
 606                 if (is64 && IS_V9STACK(sp))
 607                         rwindow_nto32(rwp, &gwp->wbuf[i]);
 608                 else
 609                         bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow32));
 610         }
 611 }
 612 
 613 /*
 614  * For things that depend on register state being on the stack,
 615  * copy any register windows that get saved into the window buffer
 616  * (in the pcb) onto the stack.  This normally gets fixed up
 617  * before returning to a user program.  Callers of this routine
 618  * require this to happen immediately because a later kernel
 619  * operation depends on window state (like instruction simulation).
 620  */
 621 int
 622 flush_user_windows_to_stack(caddr_t *psp)
 623 {
 624         int j, k;
 625         caddr_t sp;
 626         struct machpcb *mpcb = lwptompcb(ttolwp(curthread));
 627         int err;
 628         int error = 0;
 629         int wbuf_rwindow_size;
 630         int rwindow_size;
 631         int stack_align;
 632         int watched;
 633 
 634         flush_user_windows();
 635 
 636         if (mpcb->mpcb_wstate != WSTATE_USER32)
 637                 wbuf_rwindow_size = WINDOWSIZE64;
 638         else
 639                 wbuf_rwindow_size = WINDOWSIZE32;
 640 
 641         j = mpcb->mpcb_wbcnt;
 642         while (j > 0) {
 643                 sp = mpcb->mpcb_spbuf[--j];
 644 
 645                 if ((mpcb->mpcb_wstate != WSTATE_USER32) &&
 646                     IS_V9STACK(sp)) {
 647                         sp += V9BIAS64;
 648                         stack_align = STACK_ALIGN64;
 649                         rwindow_size = WINDOWSIZE64;
 650                 } else {
 651                         /*
 652                          * Reduce sp to a 32 bit value.  This was originally
 653                          * done by casting down to uint32_t and back up to
 654                          * caddr_t, but one compiler didn't like that, so the
 655                          * uintptr_t casts were added.  The temporary 32 bit
 656                          * variable was introduced to avoid depending on all
 657                          * compilers to generate the desired assembly code for a
 658                          * quadruple cast in a single expression.
 659                          */
 660                         caddr32_t sp32 = (uint32_t)(uintptr_t)sp;
 661                         sp = (caddr_t)(uintptr_t)sp32;
 662 
 663                         stack_align = STACK_ALIGN32;
 664                         rwindow_size = WINDOWSIZE32;
 665                 }
 666                 if (((uintptr_t)sp & (stack_align - 1)) != 0)
 667                         continue;
 668 
 669                 watched = watch_disable_addr(sp, rwindow_size, S_WRITE);
 670                 err = xcopyout(mpcb->mpcb_wbuf +
 671                     (j * wbuf_rwindow_size), sp, rwindow_size);
 672                 if (err != 0) {
 673                         if (psp != NULL) {
 674                                 /*
 675                                  * Determine the offending address.
 676                                  * It may not be the stack pointer itself.
 677                                  */
 678                                 uint_t *kaddr = (uint_t *)(mpcb->mpcb_wbuf +
 679                                     (j * wbuf_rwindow_size));
 680                                 uint_t *uaddr = (uint_t *)sp;
 681 
 682                                 for (k = 0;
 683                                     k < rwindow_size / sizeof (int);
 684                                     k++, kaddr++, uaddr++) {
 685                                         if (suword32(uaddr, *kaddr))
 686                                                 break;
 687                                 }
 688 
 689                                 /* can't happen? */
 690                                 if (k == rwindow_size / sizeof (int))
 691                                         uaddr = (uint_t *)sp;
 692 
 693                                 *psp = (caddr_t)uaddr;
 694                         }
 695                         error = err;
 696                 } else {
 697                         /*
 698                          * stack was aligned and copyout succeeded;
 699                          * move other windows down.
 700                          */
 701                         mpcb->mpcb_wbcnt--;
 702                         for (k = j; k < mpcb->mpcb_wbcnt; k++) {
 703                                 mpcb->mpcb_spbuf[k] = mpcb->mpcb_spbuf[k+1];
 704                                 bcopy(
 705                                     mpcb->mpcb_wbuf +
 706                                     ((k+1) * wbuf_rwindow_size),
 707                                     mpcb->mpcb_wbuf +
 708                                     (k * wbuf_rwindow_size),
 709                                     wbuf_rwindow_size);
 710                         }
 711                 }
 712                 if (watched)
 713                         watch_enable_addr(sp, rwindow_size, S_WRITE);
 714         } /* while there are windows in the wbuf */
 715         return (error);
 716 }
 717 
 718 static int
 719 copy_return_window32(int dotwo)
 720 {
 721         klwp_t *lwp = ttolwp(curthread);
 722         struct machpcb *mpcb = lwptompcb(lwp);
 723         struct rwindow32 rwindow32;
 724         caddr_t sp1;
 725         caddr_t sp2;
 726 
 727         (void) flush_user_windows_to_stack(NULL);
 728         if (mpcb->mpcb_rsp[0] == NULL) {
 729                 /*
 730                  * Reduce r_sp to a 32 bit value before storing it in sp1.  This
 731                  * was originally done by casting down to uint32_t and back up
 732                  * to caddr_t, but that generated complaints under one compiler.
 733                  * The uintptr_t cast was added to address that, and the
 734                  * temporary 32 bit variable was introduced to avoid depending
 735                  * on all compilers to generate the desired assembly code for a
 736                  * triple cast in a single expression.
 737                  */
 738                 caddr32_t sp1_32 = (uint32_t)lwptoregs(lwp)->r_sp;
 739                 sp1 = (caddr_t)(uintptr_t)sp1_32;
 740 
 741                 if ((copyin_nowatch(sp1, &rwindow32,
 742                     sizeof (struct rwindow32))) == 0)
 743                         mpcb->mpcb_rsp[0] = sp1;
 744                 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[0]);
 745         }
 746         mpcb->mpcb_rsp[1] = NULL;
 747         if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
 748             (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
 749                 if ((copyin_nowatch(sp2, &rwindow32,
 750                     sizeof (struct rwindow32)) == 0))
 751                         mpcb->mpcb_rsp[1] = sp2;
 752                 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[1]);
 753         }
 754         return (mpcb->mpcb_rsp[0] != NULL);
 755 }
 756 
 757 int
 758 copy_return_window(int dotwo)
 759 {
 760         proc_t *p = ttoproc(curthread);
 761         klwp_t *lwp;
 762         struct machpcb *mpcb;
 763         caddr_t sp1;
 764         caddr_t sp2;
 765 
 766         if (p->p_model == DATAMODEL_ILP32)
 767                 return (copy_return_window32(dotwo));
 768 
 769         lwp = ttolwp(curthread);
 770         mpcb = lwptompcb(lwp);
 771         (void) flush_user_windows_to_stack(NULL);
 772         if (mpcb->mpcb_rsp[0] == NULL) {
 773                 sp1 = (caddr_t)lwptoregs(lwp)->r_sp + STACK_BIAS;
 774                 if ((copyin_nowatch(sp1, &mpcb->mpcb_rwin[0],
 775                     sizeof (struct rwindow)) == 0))
 776                         mpcb->mpcb_rsp[0] = sp1 - STACK_BIAS;
 777         }
 778         mpcb->mpcb_rsp[1] = NULL;
 779         if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
 780             (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
 781                 sp2 += STACK_BIAS;
 782                 if ((copyin_nowatch(sp2, &mpcb->mpcb_rwin[1],
 783                     sizeof (struct rwindow)) == 0))
 784                         mpcb->mpcb_rsp[1] = sp2 - STACK_BIAS;
 785         }
 786         return (mpcb->mpcb_rsp[0] != NULL);
 787 }
 788 
 789 /*
 790  * Clear registers on exec(2).
 791  */
 792 void
 793 setregs(uarg_t *args)
 794 {
 795         struct regs *rp;
 796         klwp_t *lwp = ttolwp(curthread);
 797         kfpu_t *fpp = lwptofpu(lwp);
 798         struct machpcb *mpcb = lwptompcb(lwp);
 799         proc_t *p = ttoproc(curthread);
 800 
 801         /*
 802          * Initialize user registers.
 803          */
 804         (void) save_syscall_args();     /* copy args from registers first */
 805         rp = lwptoregs(lwp);
 806         rp->r_g1 = rp->r_g2 = rp->r_g3 = rp->r_g4 = rp->r_g5 =
 807             rp->r_g6 = rp->r_o0 = rp->r_o1 = rp->r_o2 =
 808             rp->r_o3 = rp->r_o4 = rp->r_o5 = rp->r_o7 = 0;
 809         if (p->p_model == DATAMODEL_ILP32)
 810                 rp->r_tstate = TSTATE_USER32 | weakest_mem_model;
 811         else
 812                 rp->r_tstate = TSTATE_USER64 | weakest_mem_model;
 813         if (!fpu_exists)
 814                 rp->r_tstate &= ~TSTATE_PEF;
 815         rp->r_g7 = args->thrptr;
 816         rp->r_pc = args->entry;
 817         rp->r_npc = args->entry + 4;
 818         rp->r_y = 0;
 819         curthread->t_post_sys = 1;
 820         lwp->lwp_eosys = JUSTRETURN;
 821         lwp->lwp_pcb.pcb_trap0addr = NULL;   /* no trap 0 handler */
 822         /*
 823          * Clear the fixalignment flag
 824          */
 825         p->p_fixalignment = 0;
 826 
 827         /*
 828          * Throw out old user windows, init window buf.
 829          */
 830         trash_user_windows();
 831 
 832         if (p->p_model == DATAMODEL_LP64 &&
 833             mpcb->mpcb_wstate != WSTATE_USER64) {
 834                 ASSERT(mpcb->mpcb_wbcnt == 0);
 835                 kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf);
 836                 mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
 837                 ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
 838                 mpcb->mpcb_wstate = WSTATE_USER64;
 839         } else if (p->p_model == DATAMODEL_ILP32 &&
 840             mpcb->mpcb_wstate != WSTATE_USER32) {
 841                 ASSERT(mpcb->mpcb_wbcnt == 0);
 842                 kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf);
 843                 mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
 844                 mpcb->mpcb_wstate = WSTATE_USER32;
 845         }
 846         mpcb->mpcb_pa = va_to_pa(mpcb);
 847         mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
 848 
 849         /*
 850          * Here we initialize minimal fpu state.
 851          * The rest is done at the first floating
 852          * point instruction that a process executes
 853          * or by the lib_psr memcpy routines.
 854          */
 855         if (fpu_exists) {
 856                 extern void _fp_write_fprs(unsigned);
 857                 _fp_write_fprs(0);
 858         }
 859         fpp->fpu_en = 0;
 860         fpp->fpu_fprs = 0;
 861 }
 862 
 863 void
 864 lwp_swapin(kthread_t *tp)
 865 {
 866         struct machpcb *mpcb = lwptompcb(ttolwp(tp));
 867 
 868         mpcb->mpcb_pa = va_to_pa(mpcb);
 869         mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
 870 }
 871 
 872 /*
 873  * Construct the execution environment for the user's signal
 874  * handler and arrange for control to be given to it on return
 875  * to userland.  The library code now calls setcontext() to
 876  * clean up after the signal handler, so sigret() is no longer
 877  * needed.
 878  */
 879 int
 880 sendsig(int sig, k_siginfo_t *sip, void (*hdlr)())
 881 {
 882         /*
 883          * 'volatile' is needed to ensure that values are
 884          * correct on the error return from on_fault().
 885          */
 886         volatile int minstacksz; /* min stack required to catch signal */
 887         int newstack = 0;       /* if true, switching to altstack */
 888         label_t ljb;
 889         caddr_t sp;
 890         struct regs *volatile rp;
 891         klwp_t *lwp = ttolwp(curthread);
 892         proc_t *volatile p = ttoproc(curthread);
 893         int fpq_size = 0;
 894         struct sigframe {
 895                 struct frame frwin;
 896                 ucontext_t uc;
 897         };
 898         siginfo_t *sip_addr;
 899         struct sigframe *volatile fp;
 900         ucontext_t *volatile tuc = NULL;
 901         char *volatile xregs = NULL;
 902         volatile size_t xregs_size = 0;
 903         gwindows_t *volatile gwp = NULL;
 904         volatile int gwin_size = 0;
 905         kfpu_t *fpp;
 906         struct machpcb *mpcb;
 907         volatile int watched = 0;
 908         volatile int watched2 = 0;
 909         caddr_t tos;
 910 
 911         /*
 912          * Make sure the current last user window has been flushed to
 913          * the stack save area before we change the sp.
 914          * Restore register window if a debugger modified it.
 915          */
 916         (void) flush_user_windows_to_stack(NULL);
 917         if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
 918                 xregrestore(lwp, 0);
 919 
 920         mpcb = lwptompcb(lwp);
 921         rp = lwptoregs(lwp);
 922 
 923         /*
 924          * Clear the watchpoint return stack pointers.
 925          */
 926         mpcb->mpcb_rsp[0] = NULL;
 927         mpcb->mpcb_rsp[1] = NULL;
 928 
 929         minstacksz = sizeof (struct sigframe);
 930 
 931         /*
 932          * We know that sizeof (siginfo_t) is stack-aligned:
 933          * 128 bytes for ILP32, 256 bytes for LP64.
 934          */
 935         if (sip != NULL)
 936                 minstacksz += sizeof (siginfo_t);
 937 
 938         /*
 939          * These two fields are pointed to by ABI structures and may
 940          * be of arbitrary length. Size them now so we know how big
 941          * the signal frame has to be.
 942          */
 943         fpp = lwptofpu(lwp);
 944         fpp->fpu_fprs = _fp_read_fprs();
 945         if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
 946                 fpq_size = fpp->fpu_q_entrysize * fpp->fpu_qcnt;
 947                 minstacksz += SA(fpq_size);
 948         }
 949 
 950         mpcb = lwptompcb(lwp);
 951         if (mpcb->mpcb_wbcnt != 0) {
 952                 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow)) +
 953                     (SPARC_MAXREGWINDOW * sizeof (caddr_t)) + sizeof (long);
 954                 minstacksz += SA(gwin_size);
 955         }
 956 
 957         /*
 958          * Extra registers, if support by this platform, may be of arbitrary
 959          * length. Size them now so we know how big the signal frame has to be.
 960          * For sparcv9 _LP64 user programs, use asrs instead of the xregs.
 961          */
 962         minstacksz += SA(xregs_size);
 963 
 964         /*
 965          * Figure out whether we will be handling this signal on
 966          * an alternate stack specified by the user. Then allocate
 967          * and validate the stack requirements for the signal handler
 968          * context. on_fault will catch any faults.
 969          */
 970         newstack = (sigismember(&PTOU(curproc)->u_sigonstack, sig) &&
 971             !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
 972 
 973         tos = (caddr_t)rp->r_sp + STACK_BIAS;
 974         /*
 975          * Force proper stack pointer alignment, even in the face of a
 976          * misaligned stack pointer from user-level before the signal.
 977          * Don't use the SA() macro because that rounds up, not down.
 978          */
 979         tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN - 1ul));
 980 
 981         if (newstack != 0) {
 982                 fp = (struct sigframe *)
 983                     (SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
 984                     SA((int)lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN -
 985                     SA(minstacksz));
 986         } else {
 987                 /*
 988                  * If we were unable to flush all register windows to
 989                  * the stack and we are not now on an alternate stack,
 990                  * just dump core with a SIGSEGV back in psig().
 991                  */
 992                 if (sig == SIGSEGV &&
 993                     mpcb->mpcb_wbcnt != 0 &&
 994                     !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
 995                         return (0);
 996                 fp = (struct sigframe *)(tos - SA(minstacksz));
 997                 /*
 998                  * Could call grow here, but stack growth now handled below
 999                  * in code protected by on_fault().
1000                  */
1001         }
1002         sp = (caddr_t)fp + sizeof (struct sigframe);
1003 
1004         /*
1005          * Make sure process hasn't trashed its stack.
1006          */
1007         if ((caddr_t)fp >= p->p_usrstack ||
1008             (caddr_t)fp + SA(minstacksz) >= p->p_usrstack) {
1009 #ifdef DEBUG
1010                 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1011                     PTOU(p)->u_comm, p->p_pid, sig);
1012                 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1013                     (void *)fp, (void *)hdlr, rp->r_pc);
1014                 printf("fp above USRSTACK\n");
1015 #endif
1016                 return (0);
1017         }
1018 
1019         watched = watch_disable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1020         if (on_fault(&ljb))
1021                 goto badstack;
1022 
1023         tuc = kmem_alloc(sizeof (ucontext_t), KM_SLEEP);
1024         savecontext(tuc, &lwp->lwp_sigoldmask);
1025 
1026         /*
1027          * save extra register state if it exists
1028          */
1029         if (xregs_size != 0) {
1030                 xregs_setptr(lwp, tuc, sp);
1031                 xregs = kmem_alloc(xregs_size, KM_SLEEP);
1032                 xregs_get(lwp, xregs);
1033                 copyout_noerr(xregs, sp, xregs_size);
1034                 kmem_free(xregs, xregs_size);
1035                 xregs = NULL;
1036                 sp += SA(xregs_size);
1037         }
1038 
1039         copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1040         kmem_free(tuc, sizeof (*tuc));
1041         tuc = NULL;
1042 
1043         if (sip != NULL) {
1044                 zoneid_t zoneid;
1045 
1046                 uzero(sp, sizeof (siginfo_t));
1047                 if (SI_FROMUSER(sip) &&
1048                     (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1049                     zoneid != sip->si_zoneid) {
1050                         k_siginfo_t sani_sip = *sip;
1051                         sani_sip.si_pid = p->p_zone->zone_zsched->p_pid;
1052                         sani_sip.si_uid = 0;
1053                         sani_sip.si_ctid = -1;
1054                         sani_sip.si_zoneid = zoneid;
1055                         copyout_noerr(&sani_sip, sp, sizeof (sani_sip));
1056                 } else {
1057                         copyout_noerr(sip, sp, sizeof (*sip));
1058                 }
1059                 sip_addr = (siginfo_t *)sp;
1060                 sp += sizeof (siginfo_t);
1061 
1062                 if (sig == SIGPROF &&
1063                     curthread->t_rprof != NULL &&
1064                     curthread->t_rprof->rp_anystate) {
1065                         /*
1066                          * We stand on our head to deal with
1067                          * the real time profiling signal.
1068                          * Fill in the stuff that doesn't fit
1069                          * in a normal k_siginfo structure.
1070                          */
1071                         int i = sip->si_nsysarg;
1072                         while (--i >= 0) {
1073                                 sulword_noerr(
1074                                     (ulong_t *)&sip_addr->si_sysarg[i],
1075                                     (ulong_t)lwp->lwp_arg[i]);
1076                         }
1077                         copyout_noerr(curthread->t_rprof->rp_state,
1078                             sip_addr->si_mstate,
1079                             sizeof (curthread->t_rprof->rp_state));
1080                 }
1081         } else {
1082                 sip_addr = (siginfo_t *)NULL;
1083         }
1084 
1085         /*
1086          * When flush_user_windows_to_stack() can't save all the
1087          * windows to the stack, it puts them in the lwp's pcb.
1088          */
1089         if (gwin_size != 0) {
1090                 gwp = kmem_alloc(gwin_size, KM_SLEEP);
1091                 getgwins(lwp, gwp);
1092                 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)sp);
1093                 copyout_noerr(gwp, sp, gwin_size);
1094                 kmem_free(gwp, gwin_size);
1095                 gwp = NULL;
1096                 sp += SA(gwin_size);
1097         } else
1098                 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)NULL);
1099 
1100         if (fpq_size != 0) {
1101                 struct _fq *fqp = (struct _fq *)sp;
1102                 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)fqp);
1103                 copyout_noerr(mpcb->mpcb_fpu_q, fqp, fpq_size);
1104 
1105                 /*
1106                  * forget the fp queue so that the signal handler can run
1107                  * without being harrassed--it will do a setcontext that will
1108                  * re-establish the queue if there still is one
1109                  *
1110                  * NOTE: fp_runq() relies on the qcnt field being zeroed here
1111                  *      to terminate its processing of the queue after signal
1112                  *      delivery.
1113                  */
1114                 mpcb->mpcb_fpu->fpu_qcnt = 0;
1115                 sp += SA(fpq_size);
1116 
1117                 /* Also, syscall needs to know about this */
1118                 mpcb->mpcb_flags |= FP_TRAPPED;
1119 
1120         } else {
1121                 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)NULL);
1122                 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1123         }
1124 
1125 
1126         /*
1127          * Since we flushed the user's windows and we are changing his
1128          * stack pointer, the window that the user will return to will
1129          * be restored from the save area in the frame we are setting up.
1130          * We copy in save area for old stack pointer so that debuggers
1131          * can do a proper stack backtrace from the signal handler.
1132          */
1133         if (mpcb->mpcb_wbcnt == 0) {
1134                 watched2 = watch_disable_addr(tos, sizeof (struct rwindow),
1135                     S_READ);
1136                 ucopy(tos, &fp->frwin, sizeof (struct rwindow));
1137         }
1138 
1139         lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1140 
1141         if (newstack != 0) {
1142                 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1143 
1144                 if (lwp->lwp_ustack) {
1145                         copyout_noerr(&lwp->lwp_sigaltstack,
1146                             (stack_t *)lwp->lwp_ustack, sizeof (stack_t));
1147                 }
1148         }
1149 
1150         no_fault();
1151         mpcb->mpcb_wbcnt = 0;                /* let user go on */
1152 
1153         if (watched2)
1154                 watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1155         if (watched)
1156                 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1157 
1158         /*
1159          * Set up user registers for execution of signal handler.
1160          */
1161         rp->r_sp = (uintptr_t)fp - STACK_BIAS;
1162         rp->r_pc = (uintptr_t)hdlr;
1163         rp->r_npc = (uintptr_t)hdlr + 4;
1164         /* make sure %asi is ASI_PNF */
1165         rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1166         rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1167         rp->r_o0 = sig;
1168         rp->r_o1 = (uintptr_t)sip_addr;
1169         rp->r_o2 = (uintptr_t)&fp->uc;
1170         /*
1171          * Don't set lwp_eosys here.  sendsig() is called via psig() after
1172          * lwp_eosys is handled, so setting it here would affect the next
1173          * system call.
1174          */
1175         return (1);
1176 
1177 badstack:
1178         no_fault();
1179         if (watched2)
1180                 watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1181         if (watched)
1182                 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1183         if (tuc)
1184                 kmem_free(tuc, sizeof (ucontext_t));
1185         if (xregs)
1186                 kmem_free(xregs, xregs_size);
1187         if (gwp)
1188                 kmem_free(gwp, gwin_size);
1189 #ifdef DEBUG
1190         printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1191             PTOU(p)->u_comm, p->p_pid, sig);
1192         printf("on fault, sigsp = %p, action = %p, upc = 0x%lx\n",
1193             (void *)fp, (void *)hdlr, rp->r_pc);
1194 #endif
1195         return (0);
1196 }
1197 
1198 
1199 #ifdef _SYSCALL32_IMPL
1200 
1201 /*
1202  * Construct the execution environment for the user's signal
1203  * handler and arrange for control to be given to it on return
1204  * to userland.  The library code now calls setcontext() to
1205  * clean up after the signal handler, so sigret() is no longer
1206  * needed.
1207  */
1208 int
1209 sendsig32(int sig, k_siginfo_t *sip, void (*hdlr)())
1210 {
1211         /*
1212          * 'volatile' is needed to ensure that values are
1213          * correct on the error return from on_fault().
1214          */
1215         volatile int minstacksz; /* min stack required to catch signal */
1216         int newstack = 0;       /* if true, switching to altstack */
1217         label_t ljb;
1218         caddr_t sp;
1219         struct regs *volatile rp;
1220         klwp_t *lwp = ttolwp(curthread);
1221         proc_t *volatile p = ttoproc(curthread);
1222         struct fq32 fpu_q[MAXFPQ]; /* to hold floating queue */
1223         struct fq32 *dfq = NULL;
1224         size_t fpq_size = 0;
1225         struct sigframe32 {
1226                 struct frame32 frwin;
1227                 ucontext32_t uc;
1228         };
1229         struct sigframe32 *volatile fp;
1230         siginfo32_t *sip_addr;
1231         ucontext32_t *volatile tuc = NULL;
1232         char *volatile xregs = NULL;
1233         volatile int xregs_size = 0;
1234         gwindows32_t *volatile gwp = NULL;
1235         volatile size_t gwin_size = 0;
1236         kfpu_t *fpp;
1237         struct machpcb *mpcb;
1238         volatile int watched = 0;
1239         volatile int watched2 = 0;
1240         caddr_t tos;
1241 
1242         /*
1243          * Make sure the current last user window has been flushed to
1244          * the stack save area before we change the sp.
1245          * Restore register window if a debugger modified it.
1246          */
1247         (void) flush_user_windows_to_stack(NULL);
1248         if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1249                 xregrestore(lwp, 0);
1250 
1251         mpcb = lwptompcb(lwp);
1252         rp = lwptoregs(lwp);
1253 
1254         /*
1255          * Clear the watchpoint return stack pointers.
1256          */
1257         mpcb->mpcb_rsp[0] = NULL;
1258         mpcb->mpcb_rsp[1] = NULL;
1259 
1260         minstacksz = sizeof (struct sigframe32);
1261 
1262         if (sip != NULL)
1263                 minstacksz += sizeof (siginfo32_t);
1264 
1265         /*
1266          * These two fields are pointed to by ABI structures and may
1267          * be of arbitrary length. Size them now so we know how big
1268          * the signal frame has to be.
1269          */
1270         fpp = lwptofpu(lwp);
1271         fpp->fpu_fprs = _fp_read_fprs();
1272         if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
1273                 fpq_size = sizeof (struct fpq32) * fpp->fpu_qcnt;
1274                 minstacksz += fpq_size;
1275                 dfq = fpu_q;
1276         }
1277 
1278         mpcb = lwptompcb(lwp);
1279         if (mpcb->mpcb_wbcnt != 0) {
1280                 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow32)) +
1281                     (SPARC_MAXREGWINDOW * sizeof (caddr32_t)) +
1282                     sizeof (int32_t);
1283                 minstacksz += gwin_size;
1284         }
1285 
1286         /*
1287          * Extra registers, if supported by this platform, may be of arbitrary
1288          * length. Size them now so we know how big the signal frame has to be.
1289          */
1290         xregs_size = xregs_getsize(p);
1291         minstacksz += SA32(xregs_size);
1292 
1293         /*
1294          * Figure out whether we will be handling this signal on
1295          * an alternate stack specified by the user. Then allocate
1296          * and validate the stack requirements for the signal handler
1297          * context. on_fault will catch any faults.
1298          */
1299         newstack = (sigismember(&PTOU(curproc)->u_sigonstack, sig) &&
1300             !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
1301 
1302         tos = (void *)(uintptr_t)(uint32_t)rp->r_sp;
1303         /*
1304          * Force proper stack pointer alignment, even in the face of a
1305          * misaligned stack pointer from user-level before the signal.
1306          * Don't use the SA32() macro because that rounds up, not down.
1307          */
1308         tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN32 - 1ul));
1309 
1310         if (newstack != 0) {
1311                 fp = (struct sigframe32 *)
1312                     (SA32((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
1313                     SA32((int)lwp->lwp_sigaltstack.ss_size) -
1314                     STACK_ALIGN32 -
1315                     SA32(minstacksz));
1316         } else {
1317                 /*
1318                  * If we were unable to flush all register windows to
1319                  * the stack and we are not now on an alternate stack,
1320                  * just dump core with a SIGSEGV back in psig().
1321                  */
1322                 if (sig == SIGSEGV &&
1323                     mpcb->mpcb_wbcnt != 0 &&
1324                     !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
1325                         return (0);
1326                 fp = (struct sigframe32 *)(tos - SA32(minstacksz));
1327                 /*
1328                  * Could call grow here, but stack growth now handled below
1329                  * in code protected by on_fault().
1330                  */
1331         }
1332         sp = (caddr_t)fp + sizeof (struct sigframe32);
1333 
1334         /*
1335          * Make sure process hasn't trashed its stack.
1336          */
1337         if ((caddr_t)fp >= p->p_usrstack ||
1338             (caddr_t)fp + SA32(minstacksz) >= p->p_usrstack) {
1339 #ifdef DEBUG
1340                 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1341                     PTOU(p)->u_comm, p->p_pid, sig);
1342                 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1343                     (void *)fp, (void *)hdlr, rp->r_pc);
1344                 printf("fp above USRSTACK32\n");
1345 #endif
1346                 return (0);
1347         }
1348 
1349         watched = watch_disable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1350         if (on_fault(&ljb))
1351                 goto badstack;
1352 
1353         tuc = kmem_alloc(sizeof (ucontext32_t), KM_SLEEP);
1354         savecontext32(tuc, &lwp->lwp_sigoldmask, dfq);
1355 
1356         /*
1357          * save extra register state if it exists
1358          */
1359         if (xregs_size != 0) {
1360                 xregs_setptr32(lwp, tuc, (caddr32_t)(uintptr_t)sp);
1361                 xregs = kmem_alloc(xregs_size, KM_SLEEP);
1362                 xregs_get(lwp, xregs);
1363                 copyout_noerr(xregs, sp, xregs_size);
1364                 kmem_free(xregs, xregs_size);
1365                 xregs = NULL;
1366                 sp += SA32(xregs_size);
1367         }
1368 
1369         copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1370         kmem_free(tuc, sizeof (*tuc));
1371         tuc = NULL;
1372 
1373         if (sip != NULL) {
1374                 siginfo32_t si32;
1375                 zoneid_t zoneid;
1376 
1377                 siginfo_kto32(sip, &si32);
1378                 if (SI_FROMUSER(sip) &&
1379                     (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1380                     zoneid != sip->si_zoneid) {
1381                         si32.si_pid = p->p_zone->zone_zsched->p_pid;
1382                         si32.si_uid = 0;
1383                         si32.si_ctid = -1;
1384                         si32.si_zoneid = zoneid;
1385                 }
1386                 uzero(sp, sizeof (siginfo32_t));
1387                 copyout_noerr(&si32, sp, sizeof (siginfo32_t));
1388                 sip_addr = (siginfo32_t *)sp;
1389                 sp += sizeof (siginfo32_t);
1390 
1391                 if (sig == SIGPROF &&
1392                     curthread->t_rprof != NULL &&
1393                     curthread->t_rprof->rp_anystate) {
1394                         /*
1395                          * We stand on our head to deal with
1396                          * the real time profiling signal.
1397                          * Fill in the stuff that doesn't fit
1398                          * in a normal k_siginfo structure.
1399                          */
1400                         int i = sip->si_nsysarg;
1401                         while (--i >= 0) {
1402                                 suword32_noerr(&sip_addr->si_sysarg[i],
1403                                     (uint32_t)lwp->lwp_arg[i]);
1404                         }
1405                         copyout_noerr(curthread->t_rprof->rp_state,
1406                             sip_addr->si_mstate,
1407                             sizeof (curthread->t_rprof->rp_state));
1408                 }
1409         } else {
1410                 sip_addr = NULL;
1411         }
1412 
1413         /*
1414          * When flush_user_windows_to_stack() can't save all the
1415          * windows to the stack, it puts them in the lwp's pcb.
1416          */
1417         if (gwin_size != 0) {
1418                 gwp = kmem_alloc(gwin_size, KM_SLEEP);
1419                 getgwins32(lwp, gwp);
1420                 suword32_noerr(&fp->uc.uc_mcontext.gwins,
1421                     (uint32_t)(uintptr_t)sp);
1422                 copyout_noerr(gwp, sp, gwin_size);
1423                 kmem_free(gwp, gwin_size);
1424                 gwp = NULL;
1425                 sp += gwin_size;
1426         } else {
1427                 suword32_noerr(&fp->uc.uc_mcontext.gwins, (uint32_t)NULL);
1428         }
1429 
1430         if (fpq_size != 0) {
1431                 /*
1432                  * Update the (already copied out) fpu32.fpu_q pointer
1433                  * from NULL to the 32-bit address on the user's stack
1434                  * where we then copyout the fq32 to.
1435                  */
1436                 struct fq32 *fqp = (struct fq32 *)sp;
1437                 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1438                     (uint32_t)(uintptr_t)fqp);
1439                 copyout_noerr(dfq, fqp, fpq_size);
1440 
1441                 /*
1442                  * forget the fp queue so that the signal handler can run
1443                  * without being harrassed--it will do a setcontext that will
1444                  * re-establish the queue if there still is one
1445                  *
1446                  * NOTE: fp_runq() relies on the qcnt field being zeroed here
1447                  *      to terminate its processing of the queue after signal
1448                  *      delivery.
1449                  */
1450                 mpcb->mpcb_fpu->fpu_qcnt = 0;
1451                 sp += fpq_size;
1452 
1453                 /* Also, syscall needs to know about this */
1454                 mpcb->mpcb_flags |= FP_TRAPPED;
1455 
1456         } else {
1457                 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1458                     (uint32_t)NULL);
1459                 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1460         }
1461 
1462 
1463         /*
1464          * Since we flushed the user's windows and we are changing his
1465          * stack pointer, the window that the user will return to will
1466          * be restored from the save area in the frame we are setting up.
1467          * We copy in save area for old stack pointer so that debuggers
1468          * can do a proper stack backtrace from the signal handler.
1469          */
1470         if (mpcb->mpcb_wbcnt == 0) {
1471                 watched2 = watch_disable_addr(tos, sizeof (struct rwindow32),
1472                     S_READ);
1473                 ucopy(tos, &fp->frwin, sizeof (struct rwindow32));
1474         }
1475 
1476         lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1477 
1478         if (newstack != 0) {
1479                 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1480                 if (lwp->lwp_ustack) {
1481                         stack32_t stk32;
1482 
1483                         stk32.ss_sp =
1484                             (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
1485                         stk32.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size;
1486                         stk32.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags;
1487 
1488                         copyout_noerr(&stk32, (stack32_t *)lwp->lwp_ustack,
1489                             sizeof (stack32_t));
1490                 }
1491         }
1492 
1493         no_fault();
1494         mpcb->mpcb_wbcnt = 0;                /* let user go on */
1495 
1496         if (watched2)
1497                 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1498         if (watched)
1499                 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1500 
1501         /*
1502          * Set up user registers for execution of signal handler.
1503          */
1504         rp->r_sp = (uintptr_t)fp;
1505         rp->r_pc = (uintptr_t)hdlr;
1506         rp->r_npc = (uintptr_t)hdlr + 4;
1507         /* make sure %asi is ASI_PNF */
1508         rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1509         rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1510         rp->r_o0 = sig;
1511         rp->r_o1 = (uintptr_t)sip_addr;
1512         rp->r_o2 = (uintptr_t)&fp->uc;
1513         /*
1514          * Don't set lwp_eosys here.  sendsig() is called via psig() after
1515          * lwp_eosys is handled, so setting it here would affect the next
1516          * system call.
1517          */
1518         return (1);
1519 
1520 badstack:
1521         no_fault();
1522         if (watched2)
1523                 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1524         if (watched)
1525                 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1526         if (tuc)
1527                 kmem_free(tuc, sizeof (*tuc));
1528         if (xregs)
1529                 kmem_free(xregs, xregs_size);
1530         if (gwp)
1531                 kmem_free(gwp, gwin_size);
1532 #ifdef DEBUG
1533         printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1534             PTOU(p)->u_comm, p->p_pid, sig);
1535         printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1536             (void *)fp, (void *)hdlr, rp->r_pc);
1537 #endif
1538         return (0);
1539 }
1540 
1541 #endif /* _SYSCALL32_IMPL */
1542 
1543 
1544 /*
1545  * Load user registers into lwp.  Called only from syslwp_create().
1546  * thrptr ignored for sparc.
1547  */
1548 /* ARGSUSED2 */
1549 void
1550 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
1551 {
1552         setgregs(lwp, grp);
1553         if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32)
1554                 lwptoregs(lwp)->r_tstate = TSTATE_USER32 | TSTATE_MM_TSO;
1555         else
1556                 lwptoregs(lwp)->r_tstate = TSTATE_USER64 | TSTATE_MM_TSO;
1557 
1558         if (!fpu_exists)
1559                 lwptoregs(lwp)->r_tstate &= ~TSTATE_PEF;
1560         lwp->lwp_eosys = JUSTRETURN;
1561         lwptot(lwp)->t_post_sys = 1;
1562 }
1563 
1564 /*
1565  * set syscall()'s return values for a lwp.
1566  */
1567 void
1568 lwp_setrval(klwp_t *lwp, int v1, int v2)
1569 {
1570         struct regs *rp = lwptoregs(lwp);
1571 
1572         rp->r_tstate &= ~TSTATE_IC;
1573         rp->r_o0 = v1;
1574         rp->r_o1 = v2;
1575 }
1576 
1577 /*
1578  * set stack pointer for a lwp
1579  */
1580 void
1581 lwp_setsp(klwp_t *lwp, caddr_t sp)
1582 {
1583         struct regs *rp = lwptoregs(lwp);
1584         rp->r_sp = (uintptr_t)sp;
1585 }
1586 
1587 /*
1588  * Take any PCB specific actions that are required or flagged in the PCB.
1589  */
1590 extern void trap_async_hwerr(void);
1591 #pragma weak trap_async_hwerr
1592 
1593 void
1594 lwp_pcb_exit(void)
1595 {
1596         klwp_t *lwp = ttolwp(curthread);
1597 
1598         if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1599                 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1600                 trap_async_hwerr();
1601         }
1602 }
1603 
1604 /*
1605  * Invalidate the saved user register windows in the pcb struct
1606  * for the current thread. They will no longer be preserved.
1607  */
1608 void
1609 lwp_clear_uwin(void)
1610 {
1611         struct machpcb *m = lwptompcb(ttolwp(curthread));
1612 
1613         /*
1614          * This has the effect of invalidating all (any) of the
1615          * user level windows that are currently sitting in the
1616          * kernel buffer.
1617          */
1618         m->mpcb_wbcnt = 0;
1619 }
1620 
1621 /*
1622  *  Set memory model to Total Store Order (TSO).
1623  */
1624 static void
1625 mmodel_set_tso(void)
1626 {
1627         struct regs *rp = lwptoregs(ttolwp(curthread));
1628 
1629         /*
1630          * The thread is doing something which requires TSO semantics
1631          * (creating a 2nd thread, or mapping writable shared memory).
1632          * It's no longer safe to run in WC mode.
1633          */
1634         rp->r_tstate &= ~TSTATE_MM;
1635         /* LINTED E_EXPR_NULL_EFFECT */
1636         rp->r_tstate |= TSTATE_MM_TSO;
1637 }
1638 
1639 /*
1640  * When this routine is invoked, the process is just about to add a new lwp;
1641  * making it multi threaded.
1642  *
1643  * If the program requires default stronger/legacy memory model semantics,
1644  * this is an indication that the processor memory model
1645  * should be altered to provide those semantics.
1646  */
1647 void
1648 lwp_mmodel_newlwp(void)
1649 {
1650         /*
1651          * New thread has been created and it's no longer safe
1652          * to run in WC mode, so revert back to TSO.
1653          */
1654         mmodel_set_tso();
1655 }
1656 
1657 /*
1658  * This routine is invoked immediately after the lwp has added a mapping
1659  * to shared memory to its address space. The mapping starts at address
1660  * 'addr' and extends for 'size' bytes.
1661  *
1662  * Unless we can (somehow) guarantee that all the processes we're sharing
1663  * the underlying mapped object with, are using the same memory model that
1664  * this process is using, this call should change the memory model
1665  * configuration of the processor to be the most pessimistic available.
1666  */
1667 /* ARGSUSED */
1668 void
1669 lwp_mmodel_shared_as(caddr_t addr, size_t sz)
1670 {
1671         /*
1672          * lwp has mapped shared memory and is no longer safe
1673          * to run in WC mode, so revert back to TSO.
1674          * For now, any shared memory access is enough to get back to TSO
1675          * and hence not checking on 'addr' & 'sz'.
1676          */
1677         mmodel_set_tso();
1678 }
1679 
1680 static uint_t
1681 mkpsr(uint64_t tstate, uint_t fprs)
1682 {
1683         uint_t psr, icc;
1684 
1685         psr = tstate & TSTATE_CWP_MASK;
1686         if (tstate & TSTATE_PRIV)
1687                 psr |= PSR_PS;
1688         if (fprs & FPRS_FEF)
1689                 psr |= PSR_EF;
1690         icc = (uint_t)(tstate >> PSR_TSTATE_CC_SHIFT) & PSR_ICC;
1691         psr |= icc;
1692         psr |= V9_PSR_IMPLVER;
1693         return (psr);
1694 }
1695 
1696 void
1697 sync_icache(caddr_t va, uint_t len)
1698 {
1699         caddr_t end;
1700 
1701         end = va + len;
1702         va = (caddr_t)((uintptr_t)va & -8l);        /* sparc needs 8-byte align */
1703         while (va < end) {
1704                 doflush(va);
1705                 va += 8;
1706         }
1707 }
1708 
1709 #ifdef _SYSCALL32_IMPL
1710 
1711 /*
1712  * Copy the floating point queue if and only if there is a queue and a place
1713  * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1714  * The issue is that while we are handling the fq32 in sendsig, we
1715  * still need a 64-bit pointer to it, and the caddr32_t in fpregset32_t
1716  * will not suffice, so we have the third parameter to this function.
1717  */
1718 void
1719 fpuregset_nto32(const fpregset_t *src, fpregset32_t *dest, struct fq32 *dfq)
1720 {
1721         int i;
1722 
1723         bzero(dest, sizeof (*dest));
1724         for (i = 0; i < 32; i++)
1725                 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1726         dest->fpu_q = NULL;
1727         dest->fpu_fsr = (uint32_t)src->fpu_fsr;
1728         dest->fpu_qcnt = src->fpu_qcnt;
1729         dest->fpu_q_entrysize = sizeof (struct fpq32);
1730         dest->fpu_en = src->fpu_en;
1731 
1732         if ((src->fpu_qcnt) && (dfq != NULL)) {
1733                 struct _fq *sfq = src->fpu_q;
1734                 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1735                         dfq->FQu.fpq.fpq_addr =
1736                             (caddr32_t)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1737                         dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1738                 }
1739         }
1740 }
1741 
1742 /*
1743  * Copy the floating point queue if and only if there is a queue and a place
1744  * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1745  * The *dfq is required to escape the bzero in both this function and in
1746  * ucontext_32ton. The *sfq is required because once the fq32 is copied
1747  * into the kernel, in setcontext, then we need a 64-bit pointer to it.
1748  */
1749 static void
1750 fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
1751     const struct fq32 *sfq, struct _fq *dfq)
1752 {
1753         int i;
1754 
1755         bzero(dest, sizeof (*dest));
1756         for (i = 0; i < 32; i++)
1757                 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1758         dest->fpu_q = dfq;
1759         dest->fpu_fsr = (uint64_t)src->fpu_fsr;
1760         if ((dest->fpu_qcnt = src->fpu_qcnt) > 0)
1761                 dest->fpu_q_entrysize = sizeof (struct _fpq);
1762         else
1763                 dest->fpu_q_entrysize = 0;
1764         dest->fpu_en = src->fpu_en;
1765 
1766         if ((src->fpu_qcnt) && (sfq) && (dfq)) {
1767                 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1768                         dfq->FQu.fpq.fpq_addr =
1769                             (unsigned int *)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1770                         dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1771                 }
1772         }
1773 }
1774 
1775 void
1776 ucontext_32ton(const ucontext32_t *src, ucontext_t *dest,
1777     const struct fq32 *sfq, struct _fq *dfq)
1778 {
1779         int i;
1780 
1781         bzero(dest, sizeof (*dest));
1782 
1783         dest->uc_flags = src->uc_flags;
1784         dest->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
1785 
1786         for (i = 0; i < 4; i++) {
1787                 dest->uc_sigmask.__sigbits[i] = src->uc_sigmask.__sigbits[i];
1788         }
1789 
1790         dest->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
1791         dest->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
1792         dest->uc_stack.ss_flags = src->uc_stack.ss_flags;
1793 
1794         /* REG_CCR is 0, skip over it and handle it after this loop */
1795         for (i = 1; i < _NGREG32; i++)
1796                 dest->uc_mcontext.gregs[i] =
1797                     (greg_t)(uint32_t)src->uc_mcontext.gregs[i];
1798         dest->uc_mcontext.gregs[REG_CCR] =
1799             (src->uc_mcontext.gregs[REG_PSR] & PSR_ICC) >> PSR_ICC_SHIFT;
1800         dest->uc_mcontext.gregs[REG_ASI] = ASI_PNF;
1801         /*
1802          * A valid fpregs is only copied in if (uc.uc_flags & UC_FPU),
1803          * otherwise there is no guarantee that anything in fpregs is valid.
1804          */
1805         if (src->uc_flags & UC_FPU) {
1806                 dest->uc_mcontext.gregs[REG_FPRS] =
1807                     ((src->uc_mcontext.fpregs.fpu_en) ?
1808                     (FPRS_DU|FPRS_DL|FPRS_FEF) : 0);
1809         } else {
1810                 dest->uc_mcontext.gregs[REG_FPRS] = 0;
1811         }
1812         dest->uc_mcontext.gwins =
1813             (gwindows_t *)(uintptr_t)src->uc_mcontext.gwins;
1814         if (src->uc_flags & UC_FPU) {
1815                 fpuregset_32ton(&src->uc_mcontext.fpregs,
1816                     &dest->uc_mcontext.fpregs, sfq, dfq);
1817         }
1818 }
1819 
1820 void
1821 rwindow_nto32(struct rwindow *src, struct rwindow32 *dest)
1822 {
1823         greg_t *s = (greg_t *)src;
1824         greg32_t *d = (greg32_t *)dest;
1825         int i;
1826 
1827         for (i = 0; i < 16; i++)
1828                 *d++ = (greg32_t)*s++;
1829 }
1830 
1831 void
1832 rwindow_32ton(struct rwindow32 *src, struct rwindow *dest)
1833 {
1834         greg32_t *s = (greg32_t *)src;
1835         greg_t *d = (greg_t *)dest;
1836         int i;
1837 
1838         for (i = 0; i < 16; i++)
1839                 *d++ = (uint32_t)*s++;
1840 }
1841 
1842 #endif /* _SYSCALL32_IMPL */
1843 
1844 /*
1845  * The panic code invokes panic_saveregs() to record the contents of a
1846  * regs structure into the specified panic_data structure for debuggers.
1847  */
1848 void
1849 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1850 {
1851         panic_nv_t *pnv = PANICNVGET(pdp);
1852 
1853         PANICNVADD(pnv, "tstate", rp->r_tstate);
1854         PANICNVADD(pnv, "g1", rp->r_g1);
1855         PANICNVADD(pnv, "g2", rp->r_g2);
1856         PANICNVADD(pnv, "g3", rp->r_g3);
1857         PANICNVADD(pnv, "g4", rp->r_g4);
1858         PANICNVADD(pnv, "g5", rp->r_g5);
1859         PANICNVADD(pnv, "g6", rp->r_g6);
1860         PANICNVADD(pnv, "g7", rp->r_g7);
1861         PANICNVADD(pnv, "o0", rp->r_o0);
1862         PANICNVADD(pnv, "o1", rp->r_o1);
1863         PANICNVADD(pnv, "o2", rp->r_o2);
1864         PANICNVADD(pnv, "o3", rp->r_o3);
1865         PANICNVADD(pnv, "o4", rp->r_o4);
1866         PANICNVADD(pnv, "o5", rp->r_o5);
1867         PANICNVADD(pnv, "o6", rp->r_o6);
1868         PANICNVADD(pnv, "o7", rp->r_o7);
1869         PANICNVADD(pnv, "pc", (ulong_t)rp->r_pc);
1870         PANICNVADD(pnv, "npc", (ulong_t)rp->r_npc);
1871         PANICNVADD(pnv, "y", (uint32_t)rp->r_y);
1872 
1873         PANICNVSET(pdp, pnv);
1874 }