Print this page
patch fixes
6345 remove xhat support


   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #pragma ident   "%Z%%M% %I%     %E% SMI"
  28 
  29 #include <sys/types.h>
  30 #include <sys/t_lock.h>
  31 #include <sys/param.h>
  32 #include <sys/cred.h>
  33 #include <sys/debug.h>
  34 #include <sys/inline.h>
  35 #include <sys/kmem.h>
  36 #include <sys/proc.h>
  37 #include <sys/regset.h>
  38 #include <sys/sysmacros.h>
  39 #include <sys/systm.h>
  40 #include <sys/prsystm.h>
  41 #include <sys/buf.h>
  42 #include <sys/signal.h>
  43 #include <sys/user.h>
  44 #include <sys/cpuvar.h>
  45 
  46 #include <sys/fault.h>
  47 #include <sys/syscall.h>
  48 #include <sys/procfs.h>


 165         struct as *as = p->p_as;
 166         char *eaddr = addr + size;
 167         int prot_rw = rw_to_prot(rw);
 168         int xrw = rw_to_index(rw);
 169         int rv = 0;
 170         struct watched_page *pwp;
 171         struct watched_page tpw;
 172         avl_index_t where;
 173         uint_t prot;
 174 
 175         ASSERT(as != &kas);
 176 
 177 startover:
 178         ASSERT(rv == 0);
 179         if (avl_numnodes(&as->a_wpage) == 0)
 180                 return (0);
 181 
 182         /*
 183          * as->a_wpage can only be changed while the process is totally stopped.
 184          * Don't grab p_lock here.  Holding p_lock while grabbing the address
 185          * space lock leads to deadlocks with the clock thread.  Note that if an
 186          * as_fault() is servicing a fault to a watched page on behalf of an
 187          * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
 188          * will be set to wp_oprot).  Since this is done while holding as writer
 189          * lock, we need to grab as lock (reader lock is good enough).
 190          *
 191          * p_maplock prevents simultaneous execution of this function.  Under
 192          * normal circumstances, holdwatch() will stop all other threads, so the
 193          * lock isn't really needed.  But there may be multiple threads within
 194          * stop() when SWATCHOK is set, so we need to handle multiple threads
 195          * at once.  See holdwatch() for the details of this dance.
 196          */
 197 
 198         mutex_enter(&p->p_maplock);
 199         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 200 
 201         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 202         if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
 203                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
 204 
 205         for (; pwp != NULL && pwp->wp_vaddr < eaddr;
 206                 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
 207 
 208                 /*
 209                  * If the requested protection has not been
 210                  * removed, we need not remap this page.
 211                  */
 212                 prot = pwp->wp_prot;
 213                 if (kernel || (prot & PROT_USER))
 214                         if (prot & prot_rw)
 215                                 continue;
 216                 /*
 217                  * If the requested access does not exist in the page's
 218                  * original protections, we need not remap this page.
 219                  * If the page does not exist yet, we can't test it.
 220                  */
 221                 if ((prot = pwp->wp_oprot) != 0) {
 222                         if (!(kernel || (prot & PROT_USER)))
 223                                 continue;
 224                         if (!(prot & prot_rw))
 225                                 continue;
 226                 }
 227 
 228                 if (mapin) {
 229                         /*
 230                          * Before mapping the page in, ensure that
 231                          * all other lwps are held in the kernel.
 232                          */
 233                         if (p->p_mapcnt == 0) {
 234                                 /*
 235                                  * Release as lock while in holdwatch()
 236                                  * in case other threads need to grab it.
 237                                  */
 238                                 AS_LOCK_EXIT(as, &as->a_lock);
 239                                 mutex_exit(&p->p_maplock);
 240                                 if (holdwatch() != 0) {
 241                                         /*
 242                                          * We stopped in holdwatch().
 243                                          * Start all over again because the
 244                                          * watched page list may have changed.
 245                                          */
 246                                         goto startover;
 247                                 }
 248                                 mutex_enter(&p->p_maplock);
 249                                 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 250                         }
 251                         p->p_mapcnt++;
 252                 }
 253 
 254                 addr = pwp->wp_vaddr;
 255                 rv++;
 256 
 257                 prot = pwp->wp_prot;
 258                 if (mapin) {
 259                         if (kernel)
 260                                 pwp->wp_kmap[xrw]++;
 261                         else
 262                                 pwp->wp_umap[xrw]++;
 263                         pwp->wp_flags |= WP_NOWATCH;
 264                         if (pwp->wp_kmap[X] + pwp->wp_umap[X])
 265                                 /* cannot have exec-only protection */
 266                                 prot |= PROT_READ|PROT_EXEC;
 267                         if (pwp->wp_kmap[R] + pwp->wp_umap[R])
 268                                 prot |= PROT_READ;
 269                         if (pwp->wp_kmap[W] + pwp->wp_umap[W])


 289                                         /* cannot have exec-only protection */
 290                                         prot |= PROT_READ|PROT_EXEC;
 291                                 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
 292                                         prot |= PROT_READ;
 293                                 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
 294                                         /* cannot have write-only protection */
 295                                         prot |= PROT_READ|PROT_WRITE;
 296 #if 0   /* damned broken mmu feature! */
 297                                 if (sum(pwp->wp_umap) == 0)
 298                                         prot &= ~PROT_USER;
 299 #endif
 300                         }
 301                 }
 302 
 303 
 304                 if (pwp->wp_oprot != 0) {    /* if page exists */
 305                         struct seg *seg;
 306                         uint_t oprot;
 307                         int err, retrycnt = 0;
 308 
 309                         AS_LOCK_EXIT(as, &as->a_lock);
 310                         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 311                 retry:
 312                         seg = as_segat(as, addr);
 313                         ASSERT(seg != NULL);
 314                         SEGOP_GETPROT(seg, addr, 0, &oprot);
 315                         if (prot != oprot) {
 316                                 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
 317                                 if (err == IE_RETRY) {
 318                                         ASSERT(retrycnt == 0);
 319                                         retrycnt++;
 320                                         goto retry;
 321                                 }
 322                         }
 323                         AS_LOCK_EXIT(as, &as->a_lock);
 324                 } else
 325                         AS_LOCK_EXIT(as, &as->a_lock);
 326 
 327                 /*
 328                  * When all pages are mapped back to their normal state,
 329                  * continue the other lwps.
 330                  */
 331                 if (!mapin) {
 332                         ASSERT(p->p_mapcnt > 0);
 333                         p->p_mapcnt--;
 334                         if (p->p_mapcnt == 0) {
 335                                 mutex_exit(&p->p_maplock);
 336                                 mutex_enter(&p->p_lock);
 337                                 continuelwps(p);
 338                                 mutex_exit(&p->p_lock);
 339                                 mutex_enter(&p->p_maplock);
 340                         }
 341                 }
 342 
 343                 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 344         }
 345 
 346         AS_LOCK_EXIT(as, &as->a_lock);
 347         mutex_exit(&p->p_maplock);
 348 
 349         return (rv);
 350 }
 351 
 352 /*
 353  * Restore the original page protections on an address range.
 354  * If 'kernel' is non-zero, just do it for the kernel.
 355  * pr_mappage() returns non-zero if it actually changed anything.
 356  *
 357  * pr_mappage() and pr_unmappage() must be executed in matched pairs,
 358  * but pairs may be nested within other pairs.  The reference counts
 359  * sort it all out.  See pr_do_mappage(), above.
 360  */
 361 static int
 362 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
 363 {
 364         return (pr_do_mappage(addr, size, 1, rw, kernel));
 365 }
 366 


 419                 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
 420                         /*
 421                          * No watched areas remain in this page.
 422                          * Free the watched_page structure.
 423                          */
 424                         avl_remove(&as->a_wpage, pwp);
 425                         kmem_free(pwp, sizeof (struct watched_page));
 426                 } else {
 427                         pwp->wp_flags &= ~WP_SETPROT;
 428                 }
 429 
 430                 pwp = next;
 431         }
 432         p->p_wprot = NULL;
 433 
 434         AS_LOCK_EXIT(as, &as->a_lock);
 435 }
 436 
 437 
 438 
 439 /* Must be called with as lock held */
 440 int
 441 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
 442 {
 443         register struct watched_page *pwp;
 444         struct watched_page tpw;
 445         uint_t prot;
 446         int rv = 0;
 447 
 448         switch (rw) {
 449         case S_READ:
 450         case S_WRITE:
 451         case S_EXEC:
 452                 break;
 453         default:
 454                 return (0);
 455         }
 456 
 457         /*
 458          * as->a_wpage can only be modified while the process is totally
 459          * stopped.  We need, and should use, no locks here.


 482                                 default:
 483                                         /* can't happen! */
 484                                         break;
 485                                 }
 486                         }
 487                 }
 488         }
 489 
 490         return (rv);
 491 }
 492 
 493 
 494 /*
 495  * trap() calls here to determine if a fault is in a watched page.
 496  * We return nonzero if this is true and the load/store would fail.
 497  */
 498 int
 499 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
 500 {
 501         struct as *as = curproc->p_as;
 502         int rv;
 503 
 504         if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
 505                 return (0);
 506 
 507         /* Grab the lock because of XHAT (see comment in pr_mappage()) */
 508         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 509         rv = pr_is_watchpage_as(addr, rw, as);
 510         AS_LOCK_EXIT(as, &as->a_lock);
 511 
 512         return (rv);
 513 }
 514 
 515 
 516 
 517 /*
 518  * trap() calls here to determine if a fault is a watchpoint.
 519  */
 520 int
 521 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
 522         enum seg_rw rw)
 523 {
 524         proc_t *p = curproc;
 525         caddr_t addr = *paddr;
 526         caddr_t eaddr = addr + size;
 527         register struct watched_area *pwa;
 528         struct watched_area twa;
 529         int rv = 0;
 530         int ta = 0;
 531         size_t len = 0;
 532 




   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 


  27 #include <sys/types.h>
  28 #include <sys/t_lock.h>
  29 #include <sys/param.h>
  30 #include <sys/cred.h>
  31 #include <sys/debug.h>
  32 #include <sys/inline.h>
  33 #include <sys/kmem.h>
  34 #include <sys/proc.h>
  35 #include <sys/regset.h>
  36 #include <sys/sysmacros.h>
  37 #include <sys/systm.h>
  38 #include <sys/prsystm.h>
  39 #include <sys/buf.h>
  40 #include <sys/signal.h>
  41 #include <sys/user.h>
  42 #include <sys/cpuvar.h>
  43 
  44 #include <sys/fault.h>
  45 #include <sys/syscall.h>
  46 #include <sys/procfs.h>


 163         struct as *as = p->p_as;
 164         char *eaddr = addr + size;
 165         int prot_rw = rw_to_prot(rw);
 166         int xrw = rw_to_index(rw);
 167         int rv = 0;
 168         struct watched_page *pwp;
 169         struct watched_page tpw;
 170         avl_index_t where;
 171         uint_t prot;
 172 
 173         ASSERT(as != &kas);
 174 
 175 startover:
 176         ASSERT(rv == 0);
 177         if (avl_numnodes(&as->a_wpage) == 0)
 178                 return (0);
 179 
 180         /*
 181          * as->a_wpage can only be changed while the process is totally stopped.
 182          * Don't grab p_lock here.  Holding p_lock while grabbing the address
 183          * space lock leads to deadlocks with the clock thread.




 184          *
 185          * p_maplock prevents simultaneous execution of this function.  Under
 186          * normal circumstances, holdwatch() will stop all other threads, so the
 187          * lock isn't really needed.  But there may be multiple threads within
 188          * stop() when SWATCHOK is set, so we need to handle multiple threads
 189          * at once.  See holdwatch() for the details of this dance.
 190          */
 191 
 192         mutex_enter(&p->p_maplock);

 193 
 194         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 195         if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
 196                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
 197 
 198         for (; pwp != NULL && pwp->wp_vaddr < eaddr;
 199                 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
 200 
 201                 /*
 202                  * If the requested protection has not been
 203                  * removed, we need not remap this page.
 204                  */
 205                 prot = pwp->wp_prot;
 206                 if (kernel || (prot & PROT_USER))
 207                         if (prot & prot_rw)
 208                                 continue;
 209                 /*
 210                  * If the requested access does not exist in the page's
 211                  * original protections, we need not remap this page.
 212                  * If the page does not exist yet, we can't test it.
 213                  */
 214                 if ((prot = pwp->wp_oprot) != 0) {
 215                         if (!(kernel || (prot & PROT_USER)))
 216                                 continue;
 217                         if (!(prot & prot_rw))
 218                                 continue;
 219                 }
 220 
 221                 if (mapin) {
 222                         /*
 223                          * Before mapping the page in, ensure that
 224                          * all other lwps are held in the kernel.
 225                          */
 226                         if (p->p_mapcnt == 0) {





 227                                 mutex_exit(&p->p_maplock);
 228                                 if (holdwatch() != 0) {
 229                                         /*
 230                                          * We stopped in holdwatch().
 231                                          * Start all over again because the
 232                                          * watched page list may have changed.
 233                                          */
 234                                         goto startover;
 235                                 }
 236                                 mutex_enter(&p->p_maplock);

 237                         }
 238                         p->p_mapcnt++;
 239                 }
 240 
 241                 addr = pwp->wp_vaddr;
 242                 rv++;
 243 
 244                 prot = pwp->wp_prot;
 245                 if (mapin) {
 246                         if (kernel)
 247                                 pwp->wp_kmap[xrw]++;
 248                         else
 249                                 pwp->wp_umap[xrw]++;
 250                         pwp->wp_flags |= WP_NOWATCH;
 251                         if (pwp->wp_kmap[X] + pwp->wp_umap[X])
 252                                 /* cannot have exec-only protection */
 253                                 prot |= PROT_READ|PROT_EXEC;
 254                         if (pwp->wp_kmap[R] + pwp->wp_umap[R])
 255                                 prot |= PROT_READ;
 256                         if (pwp->wp_kmap[W] + pwp->wp_umap[W])


 276                                         /* cannot have exec-only protection */
 277                                         prot |= PROT_READ|PROT_EXEC;
 278                                 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
 279                                         prot |= PROT_READ;
 280                                 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
 281                                         /* cannot have write-only protection */
 282                                         prot |= PROT_READ|PROT_WRITE;
 283 #if 0   /* damned broken mmu feature! */
 284                                 if (sum(pwp->wp_umap) == 0)
 285                                         prot &= ~PROT_USER;
 286 #endif
 287                         }
 288                 }
 289 
 290 
 291                 if (pwp->wp_oprot != 0) {    /* if page exists */
 292                         struct seg *seg;
 293                         uint_t oprot;
 294                         int err, retrycnt = 0;
 295 

 296                         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 297                 retry:
 298                         seg = as_segat(as, addr);
 299                         ASSERT(seg != NULL);
 300                         SEGOP_GETPROT(seg, addr, 0, &oprot);
 301                         if (prot != oprot) {
 302                                 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
 303                                 if (err == IE_RETRY) {
 304                                         ASSERT(retrycnt == 0);
 305                                         retrycnt++;
 306                                         goto retry;
 307                                 }
 308                         }
 309                         AS_LOCK_EXIT(as, &as->a_lock);
 310                 }

 311 
 312                 /*
 313                  * When all pages are mapped back to their normal state,
 314                  * continue the other lwps.
 315                  */
 316                 if (!mapin) {
 317                         ASSERT(p->p_mapcnt > 0);
 318                         p->p_mapcnt--;
 319                         if (p->p_mapcnt == 0) {
 320                                 mutex_exit(&p->p_maplock);
 321                                 mutex_enter(&p->p_lock);
 322                                 continuelwps(p);
 323                                 mutex_exit(&p->p_lock);
 324                                 mutex_enter(&p->p_maplock);
 325                         }
 326                 }


 327         }
 328 

 329         mutex_exit(&p->p_maplock);
 330 
 331         return (rv);
 332 }
 333 
 334 /*
 335  * Restore the original page protections on an address range.
 336  * If 'kernel' is non-zero, just do it for the kernel.
 337  * pr_mappage() returns non-zero if it actually changed anything.
 338  *
 339  * pr_mappage() and pr_unmappage() must be executed in matched pairs,
 340  * but pairs may be nested within other pairs.  The reference counts
 341  * sort it all out.  See pr_do_mappage(), above.
 342  */
 343 static int
 344 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
 345 {
 346         return (pr_do_mappage(addr, size, 1, rw, kernel));
 347 }
 348 


 401                 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
 402                         /*
 403                          * No watched areas remain in this page.
 404                          * Free the watched_page structure.
 405                          */
 406                         avl_remove(&as->a_wpage, pwp);
 407                         kmem_free(pwp, sizeof (struct watched_page));
 408                 } else {
 409                         pwp->wp_flags &= ~WP_SETPROT;
 410                 }
 411 
 412                 pwp = next;
 413         }
 414         p->p_wprot = NULL;
 415 
 416         AS_LOCK_EXIT(as, &as->a_lock);
 417 }
 418 
 419 
 420 

 421 int
 422 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
 423 {
 424         register struct watched_page *pwp;
 425         struct watched_page tpw;
 426         uint_t prot;
 427         int rv = 0;
 428 
 429         switch (rw) {
 430         case S_READ:
 431         case S_WRITE:
 432         case S_EXEC:
 433                 break;
 434         default:
 435                 return (0);
 436         }
 437 
 438         /*
 439          * as->a_wpage can only be modified while the process is totally
 440          * stopped.  We need, and should use, no locks here.


 463                                 default:
 464                                         /* can't happen! */
 465                                         break;
 466                                 }
 467                         }
 468                 }
 469         }
 470 
 471         return (rv);
 472 }
 473 
 474 
 475 /*
 476  * trap() calls here to determine if a fault is in a watched page.
 477  * We return nonzero if this is true and the load/store would fail.
 478  */
 479 int
 480 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
 481 {
 482         struct as *as = curproc->p_as;

 483 
 484         if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
 485                 return (0);
 486 
 487         return (pr_is_watchpage_as(addr, rw, as));





 488 }
 489 
 490 
 491 
 492 /*
 493  * trap() calls here to determine if a fault is a watchpoint.
 494  */
 495 int
 496 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
 497         enum seg_rw rw)
 498 {
 499         proc_t *p = curproc;
 500         caddr_t addr = *paddr;
 501         caddr_t eaddr = addr + size;
 502         register struct watched_area *pwa;
 503         struct watched_area twa;
 504         int rv = 0;
 505         int ta = 0;
 506         size_t len = 0;
 507