Print this page
patch fixes
6345 remove xhat support

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/watchpoint.c
          +++ new/usr/src/uts/common/os/watchpoint.c
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * fields enclosed by brackets "[]" replaced with your own identifying
  18   18   * information: Portions Copyright [yyyy] [name of copyright owner]
  19   19   *
  20   20   * CDDL HEADER END
  21   21   */
  22   22  /*
  23   23   * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
  27      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  28      -
  29   27  #include <sys/types.h>
  30   28  #include <sys/t_lock.h>
  31   29  #include <sys/param.h>
  32   30  #include <sys/cred.h>
  33   31  #include <sys/debug.h>
  34   32  #include <sys/inline.h>
  35   33  #include <sys/kmem.h>
  36   34  #include <sys/proc.h>
  37   35  #include <sys/regset.h>
  38   36  #include <sys/sysmacros.h>
↓ open down ↓ 136 lines elided ↑ open up ↑
 175  173          ASSERT(as != &kas);
 176  174  
 177  175  startover:
 178  176          ASSERT(rv == 0);
 179  177          if (avl_numnodes(&as->a_wpage) == 0)
 180  178                  return (0);
 181  179  
 182  180          /*
 183  181           * as->a_wpage can only be changed while the process is totally stopped.
 184  182           * Don't grab p_lock here.  Holding p_lock while grabbing the address
 185      -         * space lock leads to deadlocks with the clock thread.  Note that if an
 186      -         * as_fault() is servicing a fault to a watched page on behalf of an
 187      -         * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
 188      -         * will be set to wp_oprot).  Since this is done while holding as writer
 189      -         * lock, we need to grab as lock (reader lock is good enough).
      183 +         * space lock leads to deadlocks with the clock thread.
 190  184           *
 191  185           * p_maplock prevents simultaneous execution of this function.  Under
 192  186           * normal circumstances, holdwatch() will stop all other threads, so the
 193  187           * lock isn't really needed.  But there may be multiple threads within
 194  188           * stop() when SWATCHOK is set, so we need to handle multiple threads
 195  189           * at once.  See holdwatch() for the details of this dance.
 196  190           */
 197  191  
 198  192          mutex_enter(&p->p_maplock);
 199      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 200  193  
 201  194          tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 202  195          if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
 203  196                  pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
 204  197  
 205  198          for (; pwp != NULL && pwp->wp_vaddr < eaddr;
 206  199                  pwp = AVL_NEXT(&as->a_wpage, pwp)) {
 207  200  
 208  201                  /*
 209  202                   * If the requested protection has not been
↓ open down ↓ 14 lines elided ↑ open up ↑
 224  217                          if (!(prot & prot_rw))
 225  218                                  continue;
 226  219                  }
 227  220  
 228  221                  if (mapin) {
 229  222                          /*
 230  223                           * Before mapping the page in, ensure that
 231  224                           * all other lwps are held in the kernel.
 232  225                           */
 233  226                          if (p->p_mapcnt == 0) {
 234      -                                /*
 235      -                                 * Release as lock while in holdwatch()
 236      -                                 * in case other threads need to grab it.
 237      -                                 */
 238      -                                AS_LOCK_EXIT(as, &as->a_lock);
 239  227                                  mutex_exit(&p->p_maplock);
 240  228                                  if (holdwatch() != 0) {
 241  229                                          /*
 242  230                                           * We stopped in holdwatch().
 243  231                                           * Start all over again because the
 244  232                                           * watched page list may have changed.
 245  233                                           */
 246  234                                          goto startover;
 247  235                                  }
 248  236                                  mutex_enter(&p->p_maplock);
 249      -                                AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 250  237                          }
 251  238                          p->p_mapcnt++;
 252  239                  }
 253  240  
 254  241                  addr = pwp->wp_vaddr;
 255  242                  rv++;
 256  243  
 257  244                  prot = pwp->wp_prot;
 258  245                  if (mapin) {
 259  246                          if (kernel)
↓ open down ↓ 39 lines elided ↑ open up ↑
 299  286  #endif
 300  287                          }
 301  288                  }
 302  289  
 303  290  
 304  291                  if (pwp->wp_oprot != 0) {       /* if page exists */
 305  292                          struct seg *seg;
 306  293                          uint_t oprot;
 307  294                          int err, retrycnt = 0;
 308  295  
 309      -                        AS_LOCK_EXIT(as, &as->a_lock);
 310  296                          AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 311  297                  retry:
 312  298                          seg = as_segat(as, addr);
 313  299                          ASSERT(seg != NULL);
 314  300                          SEGOP_GETPROT(seg, addr, 0, &oprot);
 315  301                          if (prot != oprot) {
 316  302                                  err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
 317  303                                  if (err == IE_RETRY) {
 318  304                                          ASSERT(retrycnt == 0);
 319  305                                          retrycnt++;
 320  306                                          goto retry;
 321  307                                  }
 322  308                          }
 323  309                          AS_LOCK_EXIT(as, &as->a_lock);
 324      -                } else
 325      -                        AS_LOCK_EXIT(as, &as->a_lock);
      310 +                }
 326  311  
 327  312                  /*
 328  313                   * When all pages are mapped back to their normal state,
 329  314                   * continue the other lwps.
 330  315                   */
 331  316                  if (!mapin) {
 332  317                          ASSERT(p->p_mapcnt > 0);
 333  318                          p->p_mapcnt--;
 334  319                          if (p->p_mapcnt == 0) {
 335  320                                  mutex_exit(&p->p_maplock);
 336  321                                  mutex_enter(&p->p_lock);
 337  322                                  continuelwps(p);
 338  323                                  mutex_exit(&p->p_lock);
 339  324                                  mutex_enter(&p->p_maplock);
 340  325                          }
 341  326                  }
 342      -
 343      -                AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 344  327          }
 345  328  
 346      -        AS_LOCK_EXIT(as, &as->a_lock);
 347  329          mutex_exit(&p->p_maplock);
 348  330  
 349  331          return (rv);
 350  332  }
 351  333  
 352  334  /*
 353  335   * Restore the original page protections on an address range.
 354  336   * If 'kernel' is non-zero, just do it for the kernel.
 355  337   * pr_mappage() returns non-zero if it actually changed anything.
 356  338   *
↓ open down ↓ 72 lines elided ↑ open up ↑
 429  411  
 430  412                  pwp = next;
 431  413          }
 432  414          p->p_wprot = NULL;
 433  415  
 434  416          AS_LOCK_EXIT(as, &as->a_lock);
 435  417  }
 436  418  
 437  419  
 438  420  
 439      -/* Must be called with as lock held */
 440  421  int
 441  422  pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
 442  423  {
 443  424          register struct watched_page *pwp;
 444  425          struct watched_page tpw;
 445  426          uint_t prot;
 446  427          int rv = 0;
 447  428  
 448  429          switch (rw) {
 449  430          case S_READ:
↓ open down ↓ 42 lines elided ↑ open up ↑
 492  473  
 493  474  
 494  475  /*
 495  476   * trap() calls here to determine if a fault is in a watched page.
 496  477   * We return nonzero if this is true and the load/store would fail.
 497  478   */
 498  479  int
 499  480  pr_is_watchpage(caddr_t addr, enum seg_rw rw)
 500  481  {
 501  482          struct as *as = curproc->p_as;
 502      -        int rv;
 503  483  
 504  484          if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
 505  485                  return (0);
 506  486  
 507      -        /* Grab the lock because of XHAT (see comment in pr_mappage()) */
 508      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 509      -        rv = pr_is_watchpage_as(addr, rw, as);
 510      -        AS_LOCK_EXIT(as, &as->a_lock);
 511      -
 512      -        return (rv);
      487 +        return (pr_is_watchpage_as(addr, rw, as));
 513  488  }
 514  489  
 515  490  
 516  491  
 517  492  /*
 518  493   * trap() calls here to determine if a fault is a watchpoint.
 519  494   */
 520  495  int
 521  496  pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
 522  497          enum seg_rw rw)
↓ open down ↓ 1222 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX