Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/vm_as.c
          +++ new/usr/src/uts/common/vm/vm_as.c
↓ open down ↓ 352 lines elided ↑ open up ↑
 353  353   *
 354  354   * a_seglast is used to cache the last found segment for repeated
 355  355   * searches to the same addr (which happens frequently).
 356  356   */
 357  357  struct seg *
 358  358  as_findseg(struct as *as, caddr_t addr, int tail)
 359  359  {
 360  360          struct seg *seg = as->a_seglast;
 361  361          avl_index_t where;
 362  362  
 363      -        ASSERT(AS_LOCK_HELD(as, &as->a_lock));
      363 +        ASSERT(AS_LOCK_HELD(as));
 364  364  
 365  365          if (seg != NULL &&
 366  366              seg->s_base <= addr &&
 367  367              addr < seg->s_base + seg->s_size)
 368  368                  return (seg);
 369  369  
 370  370          seg = avl_find(&as->a_segtree, &addr, &where);
 371  371          if (seg != NULL)
 372  372                  return (as->a_seglast = seg);
 373  373  
↓ open down ↓ 41 lines elided ↑ open up ↑
 415  415   * in as_gap() as an insertion point.
 416  416   */
 417  417  int
 418  418  as_addseg(struct as  *as, struct seg *newseg)
 419  419  {
 420  420          struct seg *seg;
 421  421          caddr_t addr;
 422  422          caddr_t eaddr;
 423  423          avl_index_t where;
 424  424  
 425      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
      425 +        ASSERT(AS_WRITE_HELD(as));
 426  426  
 427  427          as->a_updatedir = 1;    /* inform /proc */
 428  428          gethrestime(&as->a_updatetime);
 429  429  
 430  430          if (as->a_lastgaphl != NULL) {
 431  431                  struct seg *hseg = NULL;
 432  432                  struct seg *lseg = NULL;
 433  433  
 434  434                  if (as->a_lastgaphl->s_base > newseg->s_base) {
 435  435                          hseg = as->a_lastgaphl;
↓ open down ↓ 61 lines elided ↑ open up ↑
 497  497          as_verify(as);
 498  498  #endif
 499  499          return (0);
 500  500  }
 501  501  
 502  502  struct seg *
 503  503  as_removeseg(struct as *as, struct seg *seg)
 504  504  {
 505  505          avl_tree_t *t;
 506  506  
 507      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
      507 +        ASSERT(AS_WRITE_HELD(as));
 508  508  
 509  509          as->a_updatedir = 1;    /* inform /proc */
 510  510          gethrestime(&as->a_updatetime);
 511  511  
 512  512          if (seg == NULL)
 513  513                  return (NULL);
 514  514  
 515  515          t = &as->a_segtree;
 516  516          if (as->a_seglast == seg)
 517  517                  as->a_seglast = NULL;
↓ open down ↓ 19 lines elided ↑ open up ↑
 537  537  }
 538  538  
 539  539  /*
 540  540   * Find a segment containing addr.
 541  541   */
 542  542  struct seg *
 543  543  as_segat(struct as *as, caddr_t addr)
 544  544  {
 545  545          struct seg *seg = as->a_seglast;
 546  546  
 547      -        ASSERT(AS_LOCK_HELD(as, &as->a_lock));
      547 +        ASSERT(AS_LOCK_HELD(as));
 548  548  
 549  549          if (seg != NULL && seg->s_base <= addr &&
 550  550              addr < seg->s_base + seg->s_size)
 551  551                  return (seg);
 552  552  
 553  553          seg = avl_find(&as->a_segtree, &addr, NULL);
 554  554          return (seg);
 555  555  }
 556  556  
 557  557  /*
↓ open down ↓ 102 lines elided ↑ open up ↑
 660  660          as->a_resvsize          = 0;
 661  661          as->a_updatedir         = 0;
 662  662          gethrestime(&as->a_updatetime);
 663  663          as->a_objectdir         = NULL;
 664  664          as->a_sizedir           = 0;
 665  665          as->a_userlimit         = (caddr_t)USERLIMIT;
 666  666          as->a_lastgap           = NULL;
 667  667          as->a_lastgaphl         = NULL;
 668  668          as->a_callbacks         = NULL;
 669  669  
 670      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
      670 +        AS_LOCK_ENTER(as, RW_WRITER);
 671  671          as->a_hat = hat_alloc(as);      /* create hat for default system mmu */
 672      -        AS_LOCK_EXIT(as, &as->a_lock);
      672 +        AS_LOCK_EXIT(as);
 673  673  
 674  674          as->a_xhat = NULL;
 675  675  
 676  676          return (as);
 677  677  }
 678  678  
 679  679  /*
 680  680   * Free an address space data structure.
 681  681   * Need to free the hat first and then
 682  682   * all the segments on this as and finally
↓ open down ↓ 13 lines elided ↑ open up ↑
 696  696           * When as_do_callbacks returns zero, all callbacks have completed.
 697  697           */
 698  698          mutex_enter(&as->a_contents);
 699  699          while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0))
 700  700                  ;
 701  701  
 702  702          /* This will prevent new XHATs from attaching to as */
 703  703          if (!called)
 704  704                  AS_SETBUSY(as);
 705  705          mutex_exit(&as->a_contents);
 706      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
      706 +        AS_LOCK_ENTER(as, RW_WRITER);
 707  707  
 708  708          if (!called) {
 709  709                  called = 1;
 710  710                  hat_free_start(hat);
 711  711                  if (as->a_xhat != NULL)
 712  712                          xhat_free_start_all(as);
 713  713          }
 714  714          for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) {
 715  715                  int err;
 716  716  
 717  717                  next = AS_SEGNEXT(as, seg);
 718  718  retry:
 719  719                  err = SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
 720  720                  if (err == EAGAIN) {
 721  721                          mutex_enter(&as->a_contents);
 722  722                          if (as->a_callbacks) {
 723      -                                AS_LOCK_EXIT(as, &as->a_lock);
      723 +                                AS_LOCK_EXIT(as);
 724  724                          } else if (!AS_ISNOUNMAPWAIT(as)) {
 725  725                                  /*
 726  726                                   * Memory is currently locked. Wait for a
 727  727                                   * cv_signal that it has been unlocked, then
 728  728                                   * try the operation again.
 729  729                                   */
 730  730                                  if (AS_ISUNMAPWAIT(as) == 0)
 731  731                                          cv_broadcast(&as->a_cv);
 732  732                                  AS_SETUNMAPWAIT(as);
 733      -                                AS_LOCK_EXIT(as, &as->a_lock);
      733 +                                AS_LOCK_EXIT(as);
 734  734                                  while (AS_ISUNMAPWAIT(as))
 735  735                                          cv_wait(&as->a_cv, &as->a_contents);
 736  736                          } else {
 737  737                                  /*
 738  738                                   * We may have raced with
 739  739                                   * segvn_reclaim()/segspt_reclaim(). In this
 740  740                                   * case clean nounmapwait flag and retry since
 741  741                                   * softlockcnt in this segment may be already
 742  742                                   * 0.  We don't drop as writer lock so our
 743  743                                   * number of retries without sleeping should
↓ open down ↓ 10 lines elided ↑ open up ↑
 754  754                          /*
 755  755                           * We do not expect any other error return at this
 756  756                           * time. This is similar to an ASSERT in seg_unmap()
 757  757                           */
 758  758                          ASSERT(err == 0);
 759  759                  }
 760  760          }
 761  761          hat_free_end(hat);
 762  762          if (as->a_xhat != NULL)
 763  763                  xhat_free_end_all(as);
 764      -        AS_LOCK_EXIT(as, &as->a_lock);
      764 +        AS_LOCK_EXIT(as);
 765  765  
 766  766          /* /proc stuff */
 767  767          ASSERT(avl_numnodes(&as->a_wpage) == 0);
 768  768          if (as->a_objectdir) {
 769  769                  kmem_free(as->a_objectdir, as->a_sizedir * sizeof (vnode_t *));
 770  770                  as->a_objectdir = NULL;
 771  771                  as->a_sizedir = 0;
 772  772          }
 773  773  
 774  774          /*
↓ open down ↓ 4 lines elided ↑ open up ↑
 779  779  }
 780  780  
 781  781  int
 782  782  as_dup(struct as *as, struct proc *forkedproc)
 783  783  {
 784  784          struct as *newas;
 785  785          struct seg *seg, *newseg;
 786  786          size_t  purgesize = 0;
 787  787          int error;
 788  788  
 789      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
      789 +        AS_LOCK_ENTER(as, RW_WRITER);
 790  790          as_clearwatch(as);
 791  791          newas = as_alloc();
 792  792          newas->a_userlimit = as->a_userlimit;
 793  793          newas->a_proc = forkedproc;
 794  794  
 795      -        AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER);
      795 +        AS_LOCK_ENTER(newas, RW_WRITER);
 796  796  
 797  797          /* This will prevent new XHATs from attaching */
 798  798          mutex_enter(&as->a_contents);
 799  799          AS_SETBUSY(as);
 800  800          mutex_exit(&as->a_contents);
 801  801          mutex_enter(&newas->a_contents);
 802  802          AS_SETBUSY(newas);
 803  803          mutex_exit(&newas->a_contents);
 804  804  
 805  805          (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD);
 806  806  
 807  807          for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
 808  808  
 809  809                  if (seg->s_flags & S_PURGE) {
 810  810                          purgesize += seg->s_size;
 811  811                          continue;
 812  812                  }
 813  813  
 814  814                  newseg = seg_alloc(newas, seg->s_base, seg->s_size);
 815  815                  if (newseg == NULL) {
 816      -                        AS_LOCK_EXIT(newas, &newas->a_lock);
      816 +                        AS_LOCK_EXIT(newas);
 817  817                          as_setwatch(as);
 818  818                          mutex_enter(&as->a_contents);
 819  819                          AS_CLRBUSY(as);
 820  820                          mutex_exit(&as->a_contents);
 821      -                        AS_LOCK_EXIT(as, &as->a_lock);
      821 +                        AS_LOCK_EXIT(as);
 822  822                          as_free(newas);
 823  823                          return (-1);
 824  824                  }
 825  825                  if ((error = SEGOP_DUP(seg, newseg)) != 0) {
 826  826                          /*
 827  827                           * We call seg_free() on the new seg
 828  828                           * because the segment is not set up
 829  829                           * completely; i.e. it has no ops.
 830  830                           */
 831  831                          as_setwatch(as);
 832  832                          mutex_enter(&as->a_contents);
 833  833                          AS_CLRBUSY(as);
 834  834                          mutex_exit(&as->a_contents);
 835      -                        AS_LOCK_EXIT(as, &as->a_lock);
      835 +                        AS_LOCK_EXIT(as);
 836  836                          seg_free(newseg);
 837      -                        AS_LOCK_EXIT(newas, &newas->a_lock);
      837 +                        AS_LOCK_EXIT(newas);
 838  838                          as_free(newas);
 839  839                          return (error);
 840  840                  }
 841  841                  newas->a_size += seg->s_size;
 842  842          }
 843  843          newas->a_resvsize = as->a_resvsize - purgesize;
 844  844  
 845  845          error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL);
 846  846          if (as->a_xhat != NULL)
 847  847                  error |= xhat_dup_all(as, newas, NULL, 0, HAT_DUP_ALL);
 848  848  
 849  849          mutex_enter(&newas->a_contents);
 850  850          AS_CLRBUSY(newas);
 851  851          mutex_exit(&newas->a_contents);
 852      -        AS_LOCK_EXIT(newas, &newas->a_lock);
      852 +        AS_LOCK_EXIT(newas);
 853  853  
 854  854          as_setwatch(as);
 855  855          mutex_enter(&as->a_contents);
 856  856          AS_CLRBUSY(as);
 857  857          mutex_exit(&as->a_contents);
 858      -        AS_LOCK_EXIT(as, &as->a_lock);
      858 +        AS_LOCK_EXIT(as);
 859  859          if (error != 0) {
 860  860                  as_free(newas);
 861  861                  return (error);
 862  862          }
 863  863          forkedproc->p_as = newas;
 864  864          return (0);
 865  865  }
 866  866  
 867  867  /*
 868  868   * Handle a ``fault'' at addr for size bytes.
↓ open down ↓ 83 lines elided ↑ open up ↑
 952  952           */
 953  953          if (as == &kas && segkmap && segkmap->s_base <= raddr &&
 954  954              raddr + size < segkmap->s_base + segkmap->s_size) {
 955  955                  /*
 956  956                   * if (as==&kas), this can't be XHAT: we've already returned
 957  957                   * FC_NOSUPPORT.
 958  958                   */
 959  959                  seg = segkmap;
 960  960                  as_lock_held = 0;
 961  961          } else {
 962      -                AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
      962 +                AS_LOCK_ENTER(as, RW_READER);
 963  963                  if (is_xhat && avl_numnodes(&as->a_wpage) != 0) {
 964  964                          /*
 965  965                           * Grab and hold the writers' lock on the as
 966  966                           * if the fault is to a watched page.
 967  967                           * This will keep CPUs from "peeking" at the
 968  968                           * address range while we're temporarily boosting
 969  969                           * the permissions for the XHAT device to
 970  970                           * resolve the fault in the segment layer.
 971  971                           *
 972  972                           * We could check whether faulted address
 973  973                           * is within a watched page and only then grab
 974  974                           * the writer lock, but this is simpler.
 975  975                           */
 976      -                        AS_LOCK_EXIT(as, &as->a_lock);
 977      -                        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
      976 +                        AS_LOCK_EXIT(as);
      977 +                        AS_LOCK_ENTER(as, RW_WRITER);
 978  978                  }
 979  979  
 980  980                  seg = as_segat(as, raddr);
 981  981                  if (seg == NULL) {
 982      -                        AS_LOCK_EXIT(as, &as->a_lock);
      982 +                        AS_LOCK_EXIT(as);
 983  983                          if ((lwp != NULL) && (!is_xhat))
 984  984                                  lwp->lwp_nostop--;
 985  985                          return (FC_NOMAP);
 986  986                  }
 987  987  
 988  988                  as_lock_held = 1;
 989  989          }
 990  990  
 991  991          addrsav = raddr;
 992  992          segsav = seg;
↓ open down ↓ 60 lines elided ↑ open up ↑
1053 1053                           */
1054 1054                          if (raddr > seg->s_base + seg->s_size)
1055 1055                                  ssize = seg->s_base + seg->s_size - addrsav;
1056 1056                          else
1057 1057                                  ssize = raddr - addrsav;
1058 1058                          (void) SEGOP_FAULT(hat, seg, addrsav, ssize,
1059 1059                              F_SOFTUNLOCK, S_OTHER);
1060 1060                  }
1061 1061          }
1062 1062          if (as_lock_held)
1063      -                AS_LOCK_EXIT(as, &as->a_lock);
     1063 +                AS_LOCK_EXIT(as);
1064 1064          if ((lwp != NULL) && (!is_xhat))
1065 1065                  lwp->lwp_nostop--;
1066 1066  
1067 1067          /*
1068 1068           * If the lower levels returned EDEADLK for a fault,
1069 1069           * It means that we should retry the fault.  Let's wait
1070 1070           * a bit also to let the deadlock causing condition clear.
1071 1071           * This is part of a gross hack to work around a design flaw
1072 1072           * in the ufs/sds logging code and should go away when the
1073 1073           * logging code is re-designed to fix the problem. See bug
↓ open down ↓ 27 lines elided ↑ open up ↑
1101 1101           * for a pagefault.  This is to avoid deadlock while debugging
1102 1102           * a process via /proc over NFS (in particular).
1103 1103           */
1104 1104          if (lwp != NULL)
1105 1105                  lwp->lwp_nostop++;
1106 1106  
1107 1107          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1108 1108          rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
1109 1109              (size_t)raddr;
1110 1110  
1111      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     1111 +        AS_LOCK_ENTER(as, RW_READER);
1112 1112          seg = as_segat(as, raddr);
1113 1113          if (seg == NULL) {
1114      -                AS_LOCK_EXIT(as, &as->a_lock);
     1114 +                AS_LOCK_EXIT(as);
1115 1115                  if (lwp != NULL)
1116 1116                          lwp->lwp_nostop--;
1117 1117                  return (FC_NOMAP);
1118 1118          }
1119 1119  
1120 1120          for (; rsize != 0; rsize -= PAGESIZE, raddr += PAGESIZE) {
1121 1121                  if (raddr >= seg->s_base + seg->s_size) {
1122 1122                          seg = AS_SEGNEXT(as, seg);
1123 1123                          if (seg == NULL || raddr != seg->s_base) {
1124 1124                                  res = FC_NOMAP;
1125 1125                                  break;
1126 1126                          }
1127 1127                  }
1128 1128                  res = SEGOP_FAULTA(seg, raddr);
1129 1129                  if (res != 0)
1130 1130                          break;
1131 1131          }
1132      -        AS_LOCK_EXIT(as, &as->a_lock);
     1132 +        AS_LOCK_EXIT(as);
1133 1133          if (lwp != NULL)
1134 1134                  lwp->lwp_nostop--;
1135 1135          /*
1136 1136           * If the lower levels returned EDEADLK for a fault,
1137 1137           * It means that we should retry the fault.  Let's wait
1138 1138           * a bit also to let the deadlock causing condition clear.
1139 1139           * This is part of a gross hack to work around a design flaw
1140 1140           * in the ufs/sds logging code and should go away when the
1141 1141           * logging code is re-designed to fix the problem. See bug
1142 1142           * 4125102 for details of the problem.
↓ open down ↓ 39 lines elided ↑ open up ↑
1182 1182           * Normally we only lock the as as a reader. But
1183 1183           * if due to setprot the segment driver needs to split
1184 1184           * a segment it will return IE_RETRY. Therefore we re-acquire
1185 1185           * the as lock as a writer so the segment driver can change
1186 1186           * the seg list. Also the segment driver will return IE_RETRY
1187 1187           * after it has changed the segment list so we therefore keep
1188 1188           * locking as a writer. Since these opeartions should be rare
1189 1189           * want to only lock as a writer when necessary.
1190 1190           */
1191 1191          if (writer || avl_numnodes(&as->a_wpage) != 0) {
1192      -                AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     1192 +                AS_LOCK_ENTER(as, RW_WRITER);
1193 1193          } else {
1194      -                AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     1194 +                AS_LOCK_ENTER(as, RW_READER);
1195 1195          }
1196 1196  
1197 1197          as_clearwatchprot(as, raddr, rsize);
1198 1198          seg = as_segat(as, raddr);
1199 1199          if (seg == NULL) {
1200 1200                  as_setwatch(as);
1201      -                AS_LOCK_EXIT(as, &as->a_lock);
     1201 +                AS_LOCK_EXIT(as);
1202 1202                  return (ENOMEM);
1203 1203          }
1204 1204  
1205 1205          for (; rsize != 0; rsize -= ssize, raddr += ssize) {
1206 1206                  if (raddr >= seg->s_base + seg->s_size) {
1207 1207                          seg = AS_SEGNEXT(as, seg);
1208 1208                          if (seg == NULL || raddr != seg->s_base) {
1209 1209                                  error = ENOMEM;
1210 1210                                  break;
1211 1211                          }
↓ open down ↓ 4 lines elided ↑ open up ↑
1216 1216                          ssize = rsize;
1217 1217  retry:
1218 1218                  error = SEGOP_SETPROT(seg, raddr, ssize, prot);
1219 1219  
1220 1220                  if (error == IE_NOMEM) {
1221 1221                          error = EAGAIN;
1222 1222                          break;
1223 1223                  }
1224 1224  
1225 1225                  if (error == IE_RETRY) {
1226      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1226 +                        AS_LOCK_EXIT(as);
1227 1227                          writer = 1;
1228 1228                          goto setprot_top;
1229 1229                  }
1230 1230  
1231 1231                  if (error == EAGAIN) {
1232 1232                          /*
1233 1233                           * Make sure we have a_lock as writer.
1234 1234                           */
1235 1235                          if (writer == 0) {
1236      -                                AS_LOCK_EXIT(as, &as->a_lock);
     1236 +                                AS_LOCK_EXIT(as);
1237 1237                                  writer = 1;
1238 1238                                  goto setprot_top;
1239 1239                          }
1240 1240  
1241 1241                          /*
1242 1242                           * Memory is currently locked.  It must be unlocked
1243 1243                           * before this operation can succeed through a retry.
1244 1244                           * The possible reasons for locked memory and
1245 1245                           * corresponding strategies for unlocking are:
1246 1246                           * (1) Normal I/O
↓ open down ↓ 20 lines elided ↑ open up ↑
1267 1267                           *      before the callback, so only one callback can
1268 1268                           *      be done at a time. Go to the top and do more
1269 1269                           *      until zero is returned. If zero is returned,
1270 1270                           *      either there were no callbacks for this event
1271 1271                           *      or they were already in progress.
1272 1272                           */
1273 1273                          mutex_enter(&as->a_contents);
1274 1274                          if (as->a_callbacks &&
1275 1275                              (cb = as_find_callback(as, AS_SETPROT_EVENT,
1276 1276                              seg->s_base, seg->s_size))) {
1277      -                                AS_LOCK_EXIT(as, &as->a_lock);
     1277 +                                AS_LOCK_EXIT(as);
1278 1278                                  as_execute_callback(as, cb, AS_SETPROT_EVENT);
1279 1279                          } else if (!AS_ISNOUNMAPWAIT(as)) {
1280 1280                                  if (AS_ISUNMAPWAIT(as) == 0)
1281 1281                                          cv_broadcast(&as->a_cv);
1282 1282                                  AS_SETUNMAPWAIT(as);
1283      -                                AS_LOCK_EXIT(as, &as->a_lock);
     1283 +                                AS_LOCK_EXIT(as);
1284 1284                                  while (AS_ISUNMAPWAIT(as))
1285 1285                                          cv_wait(&as->a_cv, &as->a_contents);
1286 1286                          } else {
1287 1287                                  /*
1288 1288                                   * We may have raced with
1289 1289                                   * segvn_reclaim()/segspt_reclaim(). In this
1290 1290                                   * case clean nounmapwait flag and retry since
1291 1291                                   * softlockcnt in this segment may be already
1292 1292                                   * 0.  We don't drop as writer lock so our
1293 1293                                   * number of retries without sleeping should
↓ open down ↓ 7 lines elided ↑ open up ↑
1301 1301                          mutex_exit(&as->a_contents);
1302 1302                          goto setprot_top;
1303 1303                  } else if (error != 0)
1304 1304                          break;
1305 1305          }
1306 1306          if (error != 0) {
1307 1307                  as_setwatch(as);
1308 1308          } else {
1309 1309                  as_setwatchprot(as, saveraddr, saversize, prot);
1310 1310          }
1311      -        AS_LOCK_EXIT(as, &as->a_lock);
     1311 +        AS_LOCK_EXIT(as);
1312 1312          return (error);
1313 1313  }
1314 1314  
1315 1315  /*
1316 1316   * Check to make sure that the interval [addr, addr + size)
1317 1317   * in address space `as' has at least the specified protection.
1318 1318   * It is ok for the range to cross over several segments, as long
1319 1319   * as they are contiguous.
1320 1320   */
1321 1321  int
↓ open down ↓ 13 lines elided ↑ open up ↑
1335 1335                  return (ENOMEM);
1336 1336  
1337 1337          /*
1338 1338           * This is ugly as sin...
1339 1339           * Normally, we only acquire the address space readers lock.
1340 1340           * However, if the address space has watchpoints present,
1341 1341           * we must acquire the writer lock on the address space for
1342 1342           * the benefit of as_clearwatchprot() and as_setwatchprot().
1343 1343           */
1344 1344          if (avl_numnodes(&as->a_wpage) != 0)
1345      -                AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     1345 +                AS_LOCK_ENTER(as, RW_WRITER);
1346 1346          else
1347      -                AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     1347 +                AS_LOCK_ENTER(as, RW_READER);
1348 1348          as_clearwatchprot(as, raddr, rsize);
1349 1349          seg = as_segat(as, raddr);
1350 1350          if (seg == NULL) {
1351 1351                  as_setwatch(as);
1352      -                AS_LOCK_EXIT(as, &as->a_lock);
     1352 +                AS_LOCK_EXIT(as);
1353 1353                  return (ENOMEM);
1354 1354          }
1355 1355  
1356 1356          for (; rsize != 0; rsize -= ssize, raddr += ssize) {
1357 1357                  if (raddr >= seg->s_base + seg->s_size) {
1358 1358                          seg = AS_SEGNEXT(as, seg);
1359 1359                          if (seg == NULL || raddr != seg->s_base) {
1360 1360                                  error = ENOMEM;
1361 1361                                  break;
1362 1362                          }
↓ open down ↓ 1 lines elided ↑ open up ↑
1364 1364                  if ((raddr + rsize) > (seg->s_base + seg->s_size))
1365 1365                          ssize = seg->s_base + seg->s_size - raddr;
1366 1366                  else
1367 1367                          ssize = rsize;
1368 1368  
1369 1369                  error = SEGOP_CHECKPROT(seg, raddr, ssize, prot);
1370 1370                  if (error != 0)
1371 1371                          break;
1372 1372          }
1373 1373          as_setwatch(as);
1374      -        AS_LOCK_EXIT(as, &as->a_lock);
     1374 +        AS_LOCK_EXIT(as);
1375 1375          return (error);
1376 1376  }
1377 1377  
1378 1378  int
1379 1379  as_unmap(struct as *as, caddr_t addr, size_t size)
1380 1380  {
1381 1381          struct seg *seg, *seg_next;
1382 1382          struct as_callback *cb;
1383 1383          caddr_t raddr, eaddr;
1384 1384          size_t ssize, rsize = 0;
1385 1385          int err;
1386 1386  
1387 1387  top:
1388 1388          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1389 1389          eaddr = (caddr_t)(((uintptr_t)(addr + size) + PAGEOFFSET) &
1390 1390              (uintptr_t)PAGEMASK);
1391 1391  
1392      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     1392 +        AS_LOCK_ENTER(as, RW_WRITER);
1393 1393  
1394 1394          as->a_updatedir = 1;    /* inform /proc */
1395 1395          gethrestime(&as->a_updatetime);
1396 1396  
1397 1397          /*
1398 1398           * Use as_findseg to find the first segment in the range, then
1399 1399           * step through the segments in order, following s_next.
1400 1400           */
1401 1401          as_clearwatchprot(as, raddr, eaddr - raddr);
1402 1402  
↓ open down ↓ 60 lines elided ↑ open up ↑
1463 1463                           *      before the callback, so only one callback can
1464 1464                           *      be done at a time. Go to the top and do more
1465 1465                           *      until zero is returned. If zero is returned,
1466 1466                           *      either there were no callbacks for this event
1467 1467                           *      or they were already in progress.
1468 1468                           */
1469 1469                          mutex_enter(&as->a_contents);
1470 1470                          if (as->a_callbacks &&
1471 1471                              (cb = as_find_callback(as, AS_UNMAP_EVENT,
1472 1472                              seg->s_base, seg->s_size))) {
1473      -                                AS_LOCK_EXIT(as, &as->a_lock);
     1473 +                                AS_LOCK_EXIT(as);
1474 1474                                  as_execute_callback(as, cb, AS_UNMAP_EVENT);
1475 1475                          } else if (!AS_ISNOUNMAPWAIT(as)) {
1476 1476                                  if (AS_ISUNMAPWAIT(as) == 0)
1477 1477                                          cv_broadcast(&as->a_cv);
1478 1478                                  AS_SETUNMAPWAIT(as);
1479      -                                AS_LOCK_EXIT(as, &as->a_lock);
     1479 +                                AS_LOCK_EXIT(as);
1480 1480                                  while (AS_ISUNMAPWAIT(as))
1481 1481                                          cv_wait(&as->a_cv, &as->a_contents);
1482 1482                          } else {
1483 1483                                  /*
1484 1484                                   * We may have raced with
1485 1485                                   * segvn_reclaim()/segspt_reclaim(). In this
1486 1486                                   * case clean nounmapwait flag and retry since
1487 1487                                   * softlockcnt in this segment may be already
1488 1488                                   * 0.  We don't drop as writer lock so our
1489 1489                                   * number of retries without sleeping should
1490 1490                                   * be very small. See segvn_reclaim() for
1491 1491                                   * more comments.
1492 1492                                   */
1493 1493                                  AS_CLRNOUNMAPWAIT(as);
1494 1494                                  mutex_exit(&as->a_contents);
1495 1495                                  goto retry;
1496 1496                          }
1497 1497                          mutex_exit(&as->a_contents);
1498 1498                          goto top;
1499 1499                  } else if (err == IE_RETRY) {
1500      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1500 +                        AS_LOCK_EXIT(as);
1501 1501                          goto top;
1502 1502                  } else if (err) {
1503 1503                          as_setwatch(as);
1504      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1504 +                        AS_LOCK_EXIT(as);
1505 1505                          return (-1);
1506 1506                  }
1507 1507  
1508 1508                  as->a_size -= ssize;
1509 1509                  if (rsize)
1510 1510                          as->a_resvsize -= rsize;
1511 1511                  raddr += ssize;
1512 1512          }
1513      -        AS_LOCK_EXIT(as, &as->a_lock);
     1513 +        AS_LOCK_EXIT(as);
1514 1514          return (0);
1515 1515  }
1516 1516  
1517 1517  static int
1518 1518  as_map_segvn_segs(struct as *as, caddr_t addr, size_t size, uint_t szcvec,
1519 1519      int (*crfp)(), struct segvn_crargs *vn_a, int *segcreated)
1520 1520  {
1521 1521          uint_t szc;
1522 1522          uint_t nszc;
1523 1523          int error;
1524 1524          caddr_t a;
1525 1525          caddr_t eaddr;
1526 1526          size_t segsize;
1527 1527          struct seg *seg;
1528 1528          size_t pgsz;
1529 1529          int do_off = (vn_a->vp != NULL || vn_a->amp != NULL);
1530 1530          uint_t save_szcvec;
1531 1531  
1532      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     1532 +        ASSERT(AS_WRITE_HELD(as));
1533 1533          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
1534 1534          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
1535 1535          ASSERT(vn_a->vp == NULL || vn_a->amp == NULL);
1536 1536          if (!do_off) {
1537 1537                  vn_a->offset = 0;
1538 1538          }
1539 1539  
1540 1540          if (szcvec <= 1) {
1541 1541                  seg = seg_alloc(as, addr, size);
1542 1542                  if (seg == NULL) {
↓ open down ↓ 93 lines elided ↑ open up ↑
1636 1636          int type = (vn_a->type == MAP_SHARED) ? MAPPGSZC_SHM : MAPPGSZC_PRIVM;
1637 1637          uint_t szcvec = map_pgszcvec(addr, size, (uintptr_t)addr, mapflags,
1638 1638              type, 0);
1639 1639          int error;
1640 1640          struct seg *seg;
1641 1641          struct vattr va;
1642 1642          u_offset_t eoff;
1643 1643          size_t save_size = 0;
1644 1644          extern size_t textrepl_size_thresh;
1645 1645  
1646      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     1646 +        ASSERT(AS_WRITE_HELD(as));
1647 1647          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
1648 1648          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
1649 1649          ASSERT(vn_a->vp != NULL);
1650 1650          ASSERT(vn_a->amp == NULL);
1651 1651  
1652 1652  again:
1653 1653          if (szcvec <= 1) {
1654 1654                  seg = seg_alloc(as, addr, size);
1655 1655                  if (seg == NULL) {
1656 1656                          return (ENOMEM);
↓ open down ↓ 68 lines elided ↑ open up ↑
1725 1725                          type = MAPPGSZC_HEAP;
1726 1726                  } else if (vn_a->szc == AS_MAP_STACK) {
1727 1727                          type = MAPPGSZC_STACK;
1728 1728                  } else {
1729 1729                          type = MAPPGSZC_PRIVM;
1730 1730                  }
1731 1731          }
1732 1732          szcvec = map_pgszcvec(addr, size, vn_a->amp == NULL ?
1733 1733              (uintptr_t)addr : (uintptr_t)P2ROUNDUP(vn_a->offset, PAGESIZE),
1734 1734              (vn_a->flags & MAP_TEXT), type, 0);
1735      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     1735 +        ASSERT(AS_WRITE_HELD(as));
1736 1736          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
1737 1737          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
1738 1738          ASSERT(vn_a->vp == NULL);
1739 1739  
1740 1740          return (as_map_segvn_segs(as, addr, size, szcvec,
1741 1741              crfp, vn_a, segcreated));
1742 1742  }
1743 1743  
1744 1744  int
1745 1745  as_map(struct as *as, caddr_t addr, size_t size, int (*crfp)(), void *argsp)
1746 1746  {
1747      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     1747 +        AS_LOCK_ENTER(as, RW_WRITER);
1748 1748          return (as_map_locked(as, addr, size, crfp, argsp));
1749 1749  }
1750 1750  
1751 1751  int
1752 1752  as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(),
1753 1753                  void *argsp)
1754 1754  {
1755 1755          struct seg *seg = NULL;
1756 1756          caddr_t raddr;                  /* rounded down addr */
1757 1757          size_t rsize;                   /* rounded up size */
↓ open down ↓ 3 lines elided ↑ open up ↑
1761 1761          struct segvn_crargs crargs;
1762 1762  
1763 1763          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1764 1764          rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
1765 1765              (size_t)raddr;
1766 1766  
1767 1767          /*
1768 1768           * check for wrap around
1769 1769           */
1770 1770          if ((raddr + rsize < raddr) || (as->a_size > (ULONG_MAX - size))) {
1771      -                AS_LOCK_EXIT(as, &as->a_lock);
     1771 +                AS_LOCK_EXIT(as);
1772 1772                  return (ENOMEM);
1773 1773          }
1774 1774  
1775 1775          as->a_updatedir = 1;    /* inform /proc */
1776 1776          gethrestime(&as->a_updatetime);
1777 1777  
1778 1778          if (as != &kas && as->a_size + rsize > (size_t)p->p_vmem_ctl) {
1779      -                AS_LOCK_EXIT(as, &as->a_lock);
     1779 +                AS_LOCK_EXIT(as);
1780 1780  
1781 1781                  (void) rctl_action(rctlproc_legacy[RLIMIT_VMEM], p->p_rctls, p,
1782 1782                      RCA_UNSAFE_ALL);
1783 1783  
1784 1784                  return (ENOMEM);
1785 1785          }
1786 1786  
1787 1787          if (AS_MAP_CHECK_VNODE_LPOOB(crfp, argsp)) {
1788 1788                  crargs = *(struct segvn_crargs *)argsp;
1789 1789                  error = as_map_vnsegs(as, raddr, rsize, crfp, &crargs, &unmap);
1790 1790                  if (error != 0) {
1791      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1791 +                        AS_LOCK_EXIT(as);
1792 1792                          if (unmap) {
1793 1793                                  (void) as_unmap(as, addr, size);
1794 1794                          }
1795 1795                          return (error);
1796 1796                  }
1797 1797          } else if (AS_MAP_CHECK_ANON_LPOOB(crfp, argsp)) {
1798 1798                  crargs = *(struct segvn_crargs *)argsp;
1799 1799                  error = as_map_ansegs(as, raddr, rsize, crfp, &crargs, &unmap);
1800 1800                  if (error != 0) {
1801      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1801 +                        AS_LOCK_EXIT(as);
1802 1802                          if (unmap) {
1803 1803                                  (void) as_unmap(as, addr, size);
1804 1804                          }
1805 1805                          return (error);
1806 1806                  }
1807 1807          } else {
1808 1808                  seg = seg_alloc(as, addr, size);
1809 1809                  if (seg == NULL) {
1810      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1810 +                        AS_LOCK_EXIT(as);
1811 1811                          return (ENOMEM);
1812 1812                  }
1813 1813  
1814 1814                  error = (*crfp)(seg, argsp);
1815 1815                  if (error != 0) {
1816 1816                          seg_free(seg);
1817      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1817 +                        AS_LOCK_EXIT(as);
1818 1818                          return (error);
1819 1819                  }
1820 1820                  /*
1821 1821                   * Add size now so as_unmap will work if as_ctl fails.
1822 1822                   */
1823 1823                  as->a_size += rsize;
1824 1824                  as->a_resvsize += rsize;
1825 1825          }
1826 1826  
1827 1827          as_setwatch(as);
1828 1828  
1829 1829          /*
1830 1830           * If the address space is locked,
1831 1831           * establish memory locks for the new segment.
1832 1832           */
1833 1833          mutex_enter(&as->a_contents);
1834 1834          if (AS_ISPGLCK(as)) {
1835 1835                  mutex_exit(&as->a_contents);
1836      -                AS_LOCK_EXIT(as, &as->a_lock);
     1836 +                AS_LOCK_EXIT(as);
1837 1837                  error = as_ctl(as, addr, size, MC_LOCK, 0, 0, NULL, 0);
1838 1838                  if (error != 0)
1839 1839                          (void) as_unmap(as, addr, size);
1840 1840          } else {
1841 1841                  mutex_exit(&as->a_contents);
1842      -                AS_LOCK_EXIT(as, &as->a_lock);
     1842 +                AS_LOCK_EXIT(as);
1843 1843          }
1844 1844          return (error);
1845 1845  }
1846 1846  
1847 1847  
1848 1848  /*
1849 1849   * Delete all segments in the address space marked with S_PURGE.
1850 1850   * This is currently used for Sparc V9 nofault ASI segments (seg_nf.c).
1851 1851   * These segments are deleted as a first step before calls to as_gap(), so
1852 1852   * that they don't affect mmap() or shmat().
↓ open down ↓ 4 lines elided ↑ open up ↑
1857 1857          struct seg *seg;
1858 1858          struct seg *next_seg;
1859 1859  
1860 1860          /*
1861 1861           * the setting of NEEDSPURGE is protect by as_rangelock(), so
1862 1862           * no need to grab a_contents mutex for this check
1863 1863           */
1864 1864          if ((as->a_flags & AS_NEEDSPURGE) == 0)
1865 1865                  return;
1866 1866  
1867      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     1867 +        AS_LOCK_ENTER(as, RW_WRITER);
1868 1868          next_seg = NULL;
1869 1869          seg = AS_SEGFIRST(as);
1870 1870          while (seg != NULL) {
1871 1871                  next_seg = AS_SEGNEXT(as, seg);
1872 1872                  if (seg->s_flags & S_PURGE)
1873 1873                          SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
1874 1874                  seg = next_seg;
1875 1875          }
1876      -        AS_LOCK_EXIT(as, &as->a_lock);
     1876 +        AS_LOCK_EXIT(as);
1877 1877  
1878 1878          mutex_enter(&as->a_contents);
1879 1879          as->a_flags &= ~AS_NEEDSPURGE;
1880 1880          mutex_exit(&as->a_contents);
1881 1881  }
1882 1882  
1883 1883  /*
1884 1884   * Find a hole within [*basep, *basep + *lenp), which contains a mappable
1885 1885   * range of addresses at least "minlen" long, where the base of the range is
1886 1886   * at "off" phase from an "align" boundary and there is space for a
↓ open down ↓ 42 lines elided ↑ open up ↑
1929 1929           * a_lastgap->s_base which will likely allow us to find an
1930 1930           * acceptable hole in the address space quicker.
1931 1931           * If we can't find a hole with this fast_path, then we look for
1932 1932           * smaller holes in which the alignment and offset may allow
1933 1933           * the allocation to fit.
1934 1934           */
1935 1935          minlen += align;
1936 1936          minlen += 2 * redzone;
1937 1937          redzone = 0;
1938 1938  
1939      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     1939 +        AS_LOCK_ENTER(as, RW_READER);
1940 1940          if (AS_SEGFIRST(as) == NULL) {
1941 1941                  if (valid_va_range_aligned(basep, lenp, minlen, flags & AH_DIR,
1942 1942                      align, redzone, off)) {
1943      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1943 +                        AS_LOCK_EXIT(as);
1944 1944                          return (0);
1945 1945                  } else {
1946      -                        AS_LOCK_EXIT(as, &as->a_lock);
     1946 +                        AS_LOCK_EXIT(as);
1947 1947                          *basep = save_base;
1948 1948                          *lenp = save_len;
1949 1949                          return (-1);
1950 1950                  }
1951 1951          }
1952 1952  
1953 1953  retry:
1954 1954          /*
1955 1955           * Set up to iterate over all the inter-segment holes in the given
1956 1956           * direction.  lseg is NULL for the lowest-addressed hole and hseg is
↓ open down ↓ 60 lines elided ↑ open up ↑
2017 2017                  if (*lenp >= minlen && valid_va_range_aligned(basep, lenp,
2018 2018                      minlen, forward ? AH_LO : AH_HI, align, redzone, off) &&
2019 2019                      ((flags & AH_CONTAIN) == 0 ||
2020 2020                      (*basep <= addr && *basep + *lenp > addr))) {
2021 2021                          if (!forward)
2022 2022                                  as->a_lastgap = hseg;
2023 2023                          if (hseg != NULL)
2024 2024                                  as->a_lastgaphl = hseg;
2025 2025                          else
2026 2026                                  as->a_lastgaphl = lseg;
2027      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2027 +                        AS_LOCK_EXIT(as);
2028 2028                          return (0);
2029 2029                  }
2030 2030          cont:
2031 2031                  /*
2032 2032                   * Move to the next hole.
2033 2033                   */
2034 2034                  if (forward) {
2035 2035                          lseg = hseg;
2036 2036                          if (lseg == NULL)
2037 2037                                  break;
↓ open down ↓ 6 lines elided ↑ open up ↑
2044 2044                  }
2045 2045          }
2046 2046          if (fast_path && (align != 0 || save_redzone != 0)) {
2047 2047                  fast_path = 0;
2048 2048                  minlen = save_minlen;
2049 2049                  redzone = save_redzone;
2050 2050                  goto retry;
2051 2051          }
2052 2052          *basep = save_base;
2053 2053          *lenp = save_len;
2054      -        AS_LOCK_EXIT(as, &as->a_lock);
     2054 +        AS_LOCK_EXIT(as);
2055 2055          return (-1);
2056 2056  }
2057 2057  
2058 2058  /*
2059 2059   * Find a hole of at least size minlen within [*basep, *basep + *lenp).
2060 2060   *
2061 2061   * If flags specifies AH_HI, the hole will have the highest possible address
2062 2062   * in the range.  We use the as->a_lastgap field to figure out where to
2063 2063   * start looking for a gap.
2064 2064   *
↓ open down ↓ 21 lines elided ↑ open up ↑
2086 2086   * We're lazy and only return one segment at a time.
2087 2087   */
2088 2088  int
2089 2089  as_memory(struct as *as, caddr_t *basep, size_t *lenp)
2090 2090  {
2091 2091          extern struct seg_ops segspt_shmops;    /* needs a header file */
2092 2092          struct seg *seg;
2093 2093          caddr_t addr, eaddr;
2094 2094          caddr_t segend;
2095 2095  
2096      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2096 +        AS_LOCK_ENTER(as, RW_READER);
2097 2097  
2098 2098          addr = *basep;
2099 2099          eaddr = addr + *lenp;
2100 2100  
2101 2101          seg = as_findseg(as, addr, 0);
2102 2102          if (seg != NULL)
2103 2103                  addr = MAX(seg->s_base, addr);
2104 2104  
2105 2105          for (;;) {
2106 2106                  if (seg == NULL || addr >= eaddr || eaddr <= seg->s_base) {
2107      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2107 +                        AS_LOCK_EXIT(as);
2108 2108                          return (EINVAL);
2109 2109                  }
2110 2110  
2111 2111                  if (seg->s_ops == &segvn_ops) {
2112 2112                          segend = seg->s_base + seg->s_size;
2113 2113                          break;
2114 2114                  }
2115 2115  
2116 2116                  /*
2117 2117                   * We do ISM by looking into the private data
↓ open down ↓ 11 lines elided ↑ open up ↑
2129 2129                          addr = seg->s_base;
2130 2130          }
2131 2131  
2132 2132          *basep = addr;
2133 2133  
2134 2134          if (segend > eaddr)
2135 2135                  *lenp = eaddr - addr;
2136 2136          else
2137 2137                  *lenp = segend - addr;
2138 2138  
2139      -        AS_LOCK_EXIT(as, &as->a_lock);
     2139 +        AS_LOCK_EXIT(as);
2140 2140          return (0);
2141 2141  }
2142 2142  
2143 2143  /*
2144 2144   * Swap the pages associated with the address space as out to
2145 2145   * secondary storage, returning the number of bytes actually
2146 2146   * swapped.
2147 2147   *
2148 2148   * The value returned is intended to correlate well with the process's
2149 2149   * memory requirements.  Its usefulness for this purpose depends on
↓ open down ↓ 7 lines elided ↑ open up ↑
2157 2157          size_t swpcnt = 0;
2158 2158  
2159 2159          /*
2160 2160           * Kernel-only processes have given up their address
2161 2161           * spaces.  Of course, we shouldn't be attempting to
2162 2162           * swap out such processes in the first place...
2163 2163           */
2164 2164          if (as == NULL)
2165 2165                  return (0);
2166 2166  
2167      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2167 +        AS_LOCK_ENTER(as, RW_READER);
2168 2168  
2169 2169          /* Prevent XHATs from attaching */
2170 2170          mutex_enter(&as->a_contents);
2171 2171          AS_SETBUSY(as);
2172 2172          mutex_exit(&as->a_contents);
2173 2173  
2174 2174  
2175 2175          /*
2176 2176           * Free all mapping resources associated with the address
2177 2177           * space.  The segment-level swapout routines capitalize
↓ open down ↓ 18 lines elided ↑ open up ↑
2196 2196  
2197 2197                  /*
2198 2198                   * We have to check to see if the seg has
2199 2199                   * an ops vector because the seg may have
2200 2200                   * been in the middle of being set up when
2201 2201                   * the process was picked for swapout.
2202 2202                   */
2203 2203                  if ((ov != NULL) && (ov->swapout != NULL))
2204 2204                          swpcnt += SEGOP_SWAPOUT(seg);
2205 2205          }
2206      -        AS_LOCK_EXIT(as, &as->a_lock);
     2206 +        AS_LOCK_EXIT(as);
2207 2207          return (swpcnt);
2208 2208  }
2209 2209  
2210 2210  /*
2211 2211   * Determine whether data from the mappings in interval [addr, addr + size)
2212 2212   * are in the primary memory (core) cache.
2213 2213   */
2214 2214  int
2215 2215  as_incore(struct as *as, caddr_t addr,
2216 2216      size_t size, char *vec, size_t *sizep)
↓ open down ↓ 6 lines elided ↑ open up ↑
2223 2223          int error = 0;          /* result, assume success */
2224 2224  
2225 2225          *sizep = 0;
2226 2226          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2227 2227          rsize = ((((size_t)addr + size) + PAGEOFFSET) & PAGEMASK) -
2228 2228              (size_t)raddr;
2229 2229  
2230 2230          if (raddr + rsize < raddr)              /* check for wraparound */
2231 2231                  return (ENOMEM);
2232 2232  
2233      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2233 +        AS_LOCK_ENTER(as, RW_READER);
2234 2234          seg = as_segat(as, raddr);
2235 2235          if (seg == NULL) {
2236      -                AS_LOCK_EXIT(as, &as->a_lock);
     2236 +                AS_LOCK_EXIT(as);
2237 2237                  return (-1);
2238 2238          }
2239 2239  
2240 2240          for (; rsize != 0; rsize -= ssize, raddr += ssize) {
2241 2241                  if (raddr >= seg->s_base + seg->s_size) {
2242 2242                          seg = AS_SEGNEXT(as, seg);
2243 2243                          if (seg == NULL || raddr != seg->s_base) {
2244 2244                                  error = -1;
2245 2245                                  break;
2246 2246                          }
↓ open down ↓ 2 lines elided ↑ open up ↑
2249 2249                          ssize = seg->s_base + seg->s_size - raddr;
2250 2250                  else
2251 2251                          ssize = rsize;
2252 2252                  *sizep += isize = SEGOP_INCORE(seg, raddr, ssize, vec);
2253 2253                  if (isize != ssize) {
2254 2254                          error = -1;
2255 2255                          break;
2256 2256                  }
2257 2257                  vec += btopr(ssize);
2258 2258          }
2259      -        AS_LOCK_EXIT(as, &as->a_lock);
     2259 +        AS_LOCK_EXIT(as);
2260 2260          return (error);
2261 2261  }
2262 2262  
2263 2263  static void
2264 2264  as_segunlock(struct seg *seg, caddr_t addr, int attr,
2265 2265          ulong_t *bitmap, size_t position, size_t npages)
2266 2266  {
2267 2267          caddr_t range_start;
2268 2268          size_t  pos1 = position;
2269 2269          size_t  pos2;
↓ open down ↓ 49 lines elided ↑ open up ↑
2319 2319          size_t rsize;           /* rounded up size */
2320 2320          size_t initrsize;       /* saved initial rounded up size */
2321 2321          size_t ssize;           /* size of seg */
2322 2322          int error = 0;                  /* result */
2323 2323          size_t mlock_size;      /* size of bitmap */
2324 2324          ulong_t *mlock_map;     /* pointer to bitmap used */
2325 2325                                  /* to represent the locked */
2326 2326                                  /* pages. */
2327 2327  retry:
2328 2328          if (error == IE_RETRY)
2329      -                AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     2329 +                AS_LOCK_ENTER(as, RW_WRITER);
2330 2330          else
2331      -                AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2331 +                AS_LOCK_ENTER(as, RW_READER);
2332 2332  
2333 2333          /*
2334 2334           * If these are address space lock/unlock operations, loop over
2335 2335           * all segments in the address space, as appropriate.
2336 2336           */
2337 2337          if (func == MC_LOCKAS) {
2338 2338                  size_t npages, idx;
2339 2339                  size_t rlen = 0;        /* rounded as length */
2340 2340  
2341 2341                  idx = pos;
2342 2342  
2343 2343                  if (arg & MCL_FUTURE) {
2344 2344                          mutex_enter(&as->a_contents);
2345 2345                          AS_SETPGLCK(as);
2346 2346                          mutex_exit(&as->a_contents);
2347 2347                  }
2348 2348                  if ((arg & MCL_CURRENT) == 0) {
2349      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2349 +                        AS_LOCK_EXIT(as);
2350 2350                          return (0);
2351 2351                  }
2352 2352  
2353 2353                  seg = AS_SEGFIRST(as);
2354 2354                  if (seg == NULL) {
2355      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2355 +                        AS_LOCK_EXIT(as);
2356 2356                          return (0);
2357 2357                  }
2358 2358  
2359 2359                  do {
2360 2360                          raddr = (caddr_t)((uintptr_t)seg->s_base &
2361 2361                              (uintptr_t)PAGEMASK);
2362 2362                          rlen += (((uintptr_t)(seg->s_base + seg->s_size) +
2363 2363                              PAGEOFFSET) & PAGEMASK) - (uintptr_t)raddr;
2364 2364                  } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2365 2365  
2366 2366                  mlock_size = BT_BITOUL(btopr(rlen));
2367 2367                  if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size *
2368 2368                      sizeof (ulong_t), KM_NOSLEEP)) == NULL) {
2369      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2369 +                                AS_LOCK_EXIT(as);
2370 2370                                  return (EAGAIN);
2371 2371                  }
2372 2372  
2373 2373                  for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2374 2374                          error = SEGOP_LOCKOP(seg, seg->s_base,
2375 2375                              seg->s_size, attr, MC_LOCK, mlock_map, pos);
2376 2376                          if (error != 0)
2377 2377                                  break;
2378 2378                          pos += seg_pages(seg);
2379 2379                  }
↓ open down ↓ 5 lines elided ↑ open up ↑
2385 2385                                  raddr = (caddr_t)((uintptr_t)seg->s_base &
2386 2386                                      (uintptr_t)PAGEMASK);
2387 2387                                  npages = seg_pages(seg);
2388 2388                                  as_segunlock(seg, raddr, attr, mlock_map,
2389 2389                                      idx, npages);
2390 2390                                  idx += npages;
2391 2391                          }
2392 2392                  }
2393 2393  
2394 2394                  kmem_free(mlock_map, mlock_size * sizeof (ulong_t));
2395      -                AS_LOCK_EXIT(as, &as->a_lock);
     2395 +                AS_LOCK_EXIT(as);
2396 2396                  goto lockerr;
2397 2397          } else if (func == MC_UNLOCKAS) {
2398 2398                  mutex_enter(&as->a_contents);
2399 2399                  AS_CLRPGLCK(as);
2400 2400                  mutex_exit(&as->a_contents);
2401 2401  
2402 2402                  for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2403 2403                          error = SEGOP_LOCKOP(seg, seg->s_base,
2404 2404                              seg->s_size, attr, MC_UNLOCK, NULL, 0);
2405 2405                          if (error != 0)
2406 2406                                  break;
2407 2407                  }
2408 2408  
2409      -                AS_LOCK_EXIT(as, &as->a_lock);
     2409 +                AS_LOCK_EXIT(as);
2410 2410                  goto lockerr;
2411 2411          }
2412 2412  
2413 2413          /*
2414 2414           * Normalize addresses and sizes.
2415 2415           */
2416 2416          initraddr = raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2417 2417          initrsize = rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2418 2418              (size_t)raddr;
2419 2419  
2420 2420          if (raddr + rsize < raddr) {            /* check for wraparound */
2421      -                AS_LOCK_EXIT(as, &as->a_lock);
     2421 +                AS_LOCK_EXIT(as);
2422 2422                  return (ENOMEM);
2423 2423          }
2424 2424  
2425 2425          /*
2426 2426           * Get initial segment.
2427 2427           */
2428 2428          if ((seg = as_segat(as, raddr)) == NULL) {
2429      -                AS_LOCK_EXIT(as, &as->a_lock);
     2429 +                AS_LOCK_EXIT(as);
2430 2430                  return (ENOMEM);
2431 2431          }
2432 2432  
2433 2433          if (func == MC_LOCK) {
2434 2434                  mlock_size = BT_BITOUL(btopr(rsize));
2435 2435                  if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size *
2436 2436                      sizeof (ulong_t), KM_NOSLEEP)) == NULL) {
2437      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2437 +                                AS_LOCK_EXIT(as);
2438 2438                                  return (EAGAIN);
2439 2439                  }
2440 2440          }
2441 2441  
2442 2442          /*
2443 2443           * Loop over all segments.  If a hole in the address range is
2444 2444           * discovered, then fail.  For each segment, perform the appropriate
2445 2445           * control operation.
2446 2446           */
2447 2447          while (rsize != 0) {
↓ open down ↓ 4 lines elided ↑ open up ↑
2452 2452                   */
2453 2453                  if (raddr >= seg->s_base + seg->s_size) {
2454 2454                          seg = AS_SEGNEXT(as, seg);
2455 2455                          if (seg == NULL || raddr != seg->s_base) {
2456 2456                                  if (func == MC_LOCK) {
2457 2457                                          as_unlockerr(as, attr, mlock_map,
2458 2458                                              initraddr, initrsize - rsize);
2459 2459                                          kmem_free(mlock_map,
2460 2460                                              mlock_size * sizeof (ulong_t));
2461 2461                                  }
2462      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2462 +                                AS_LOCK_EXIT(as);
2463 2463                                  return (ENOMEM);
2464 2464                          }
2465 2465                  }
2466 2466                  if ((raddr + rsize) > (seg->s_base + seg->s_size))
2467 2467                          ssize = seg->s_base + seg->s_size - raddr;
2468 2468                  else
2469 2469                          ssize = rsize;
2470 2470  
2471 2471                  /*
2472 2472                   * Dispatch on specific function.
2473 2473                   */
2474 2474                  switch (func) {
2475 2475  
2476 2476                  /*
2477 2477                   * Synchronize cached data from mappings with backing
2478 2478                   * objects.
2479 2479                   */
2480 2480                  case MC_SYNC:
2481 2481                          if (error = SEGOP_SYNC(seg, raddr, ssize,
2482 2482                              attr, (uint_t)arg)) {
2483      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2483 +                                AS_LOCK_EXIT(as);
2484 2484                                  return (error);
2485 2485                          }
2486 2486                          break;
2487 2487  
2488 2488                  /*
2489 2489                   * Lock pages in memory.
2490 2490                   */
2491 2491                  case MC_LOCK:
2492 2492                          if (error = SEGOP_LOCKOP(seg, raddr, ssize,
2493 2493                              attr, func, mlock_map, pos)) {
2494 2494                                  as_unlockerr(as, attr, mlock_map, initraddr,
2495 2495                                      initrsize - rsize + ssize);
2496 2496                                  kmem_free(mlock_map, mlock_size *
2497 2497                                      sizeof (ulong_t));
2498      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2498 +                                AS_LOCK_EXIT(as);
2499 2499                                  goto lockerr;
2500 2500                          }
2501 2501                          break;
2502 2502  
2503 2503                  /*
2504 2504                   * Unlock mapped pages.
2505 2505                   */
2506 2506                  case MC_UNLOCK:
2507 2507                          (void) SEGOP_LOCKOP(seg, raddr, ssize, attr, func,
2508 2508                              (ulong_t *)NULL, (size_t)NULL);
↓ open down ↓ 8 lines elided ↑ open up ↑
2517 2517                          /*
2518 2518                           * Check for regular errors and special retry error
2519 2519                           */
2520 2520                          if (error) {
2521 2521                                  if (error == IE_RETRY) {
2522 2522                                          /*
2523 2523                                           * Need to acquire writers lock, so
2524 2524                                           * have to drop readers lock and start
2525 2525                                           * all over again
2526 2526                                           */
2527      -                                        AS_LOCK_EXIT(as, &as->a_lock);
     2527 +                                        AS_LOCK_EXIT(as);
2528 2528                                          goto retry;
2529 2529                                  } else if (error == IE_REATTACH) {
2530 2530                                          /*
2531 2531                                           * Find segment for current address
2532 2532                                           * because current segment just got
2533 2533                                           * split or concatenated
2534 2534                                           */
2535 2535                                          seg = as_segat(as, raddr);
2536 2536                                          if (seg == NULL) {
2537      -                                                AS_LOCK_EXIT(as, &as->a_lock);
     2537 +                                                AS_LOCK_EXIT(as);
2538 2538                                                  return (ENOMEM);
2539 2539                                          }
2540 2540                                  } else {
2541 2541                                          /*
2542 2542                                           * Regular error
2543 2543                                           */
2544      -                                        AS_LOCK_EXIT(as, &as->a_lock);
     2544 +                                        AS_LOCK_EXIT(as);
2545 2545                                          return (error);
2546 2546                                  }
2547 2547                          }
2548 2548                          break;
2549 2549  
2550 2550                  case MC_INHERIT_ZERO:
2551 2551                          if (seg->s_ops->inherit == NULL) {
2552 2552                                  error = ENOTSUP;
2553 2553                          } else {
2554 2554                                  error = SEGOP_INHERIT(seg, raddr, ssize,
2555 2555                                      SEGP_INH_ZERO);
2556 2556                          }
2557 2557                          if (error != 0) {
2558      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2558 +                                AS_LOCK_EXIT(as);
2559 2559                                  return (error);
2560 2560                          }
2561 2561                          break;
2562 2562  
2563 2563                  /*
2564 2564                   * Can't happen.
2565 2565                   */
2566 2566                  default:
2567 2567                          panic("as_ctl: bad operation %d", func);
2568 2568                          /*NOTREACHED*/
2569 2569                  }
2570 2570  
2571 2571                  rsize -= ssize;
2572 2572                  raddr += ssize;
2573 2573          }
2574 2574  
2575 2575          if (func == MC_LOCK)
2576 2576                  kmem_free(mlock_map, mlock_size * sizeof (ulong_t));
2577      -        AS_LOCK_EXIT(as, &as->a_lock);
     2577 +        AS_LOCK_EXIT(as);
2578 2578          return (0);
2579 2579  lockerr:
2580 2580  
2581 2581          /*
2582 2582           * If the lower levels returned EDEADLK for a segment lockop,
2583 2583           * it means that we should retry the operation.  Let's wait
2584 2584           * a bit also to let the deadlock causing condition clear.
2585 2585           * This is part of a gross hack to work around a design flaw
2586 2586           * in the ufs/sds logging code and should go away when the
2587 2587           * logging code is re-designed to fix the problem. See bug
↓ open down ↓ 44 lines elided ↑ open up ↑
2632 2632          size_t ssize;
2633 2633          pgcnt_t npages = btop(size);
2634 2634          page_t **plist;
2635 2635          page_t **pl;
2636 2636          int error;
2637 2637          caddr_t eaddr;
2638 2638          faultcode_t fault_err = 0;
2639 2639          pgcnt_t pl_off;
2640 2640          extern struct seg_ops segspt_shmops;
2641 2641  
2642      -        ASSERT(AS_LOCK_HELD(as, &as->a_lock));
     2642 +        ASSERT(AS_LOCK_HELD(as));
2643 2643          ASSERT(seg != NULL);
2644 2644          ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2645 2645          ASSERT(addr + size > seg->s_base + seg->s_size);
2646 2646          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2647 2647          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2648 2648  
2649 2649          /*
2650 2650           * Count the number of segments covered by the range we are about to
2651 2651           * lock. The segment count is used to size the shadow list we return
2652 2652           * back to the caller.
2653 2653           */
2654 2654          for (; size != 0; size -= ssize, addr += ssize) {
2655 2655                  if (addr >= seg->s_base + seg->s_size) {
2656 2656  
2657 2657                          seg = AS_SEGNEXT(as, seg);
2658 2658                          if (seg == NULL || addr != seg->s_base) {
2659      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2659 +                                AS_LOCK_EXIT(as);
2660 2660                                  return (EFAULT);
2661 2661                          }
2662 2662                          /*
2663 2663                           * Do a quick check if subsequent segments
2664 2664                           * will most likely support pagelock.
2665 2665                           */
2666 2666                          if (seg->s_ops == &segvn_ops) {
2667 2667                                  vnode_t *vp;
2668 2668  
2669 2669                                  if (SEGOP_GETVP(seg, addr, &vp) != 0 ||
2670 2670                                      vp != NULL) {
2671      -                                        AS_LOCK_EXIT(as, &as->a_lock);
     2671 +                                        AS_LOCK_EXIT(as);
2672 2672                                          goto slow;
2673 2673                                  }
2674 2674                          } else if (seg->s_ops != &segspt_shmops) {
2675      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2675 +                                AS_LOCK_EXIT(as);
2676 2676                                  goto slow;
2677 2677                          }
2678 2678                          segcnt++;
2679 2679                  }
2680 2680                  if (addr + size > seg->s_base + seg->s_size) {
2681 2681                          ssize = seg->s_base + seg->s_size - addr;
2682 2682                  } else {
2683 2683                          ssize = size;
2684 2684                  }
2685 2685          }
↓ open down ↓ 24 lines elided ↑ open up ↑
2710 2710                          break;
2711 2711                  }
2712 2712                  ASSERT(plist[npages + cnt] != NULL);
2713 2713                  ASSERT(pl_off + btop(ssize) <= npages);
2714 2714                  bcopy(plist[npages + cnt], &plist[pl_off],
2715 2715                      btop(ssize) * sizeof (page_t *));
2716 2716                  pl_off += btop(ssize);
2717 2717          }
2718 2718  
2719 2719          if (size == 0) {
2720      -                AS_LOCK_EXIT(as, &as->a_lock);
     2720 +                AS_LOCK_EXIT(as);
2721 2721                  ASSERT(cnt == segcnt - 1);
2722 2722                  *ppp = plist;
2723 2723                  return (0);
2724 2724          }
2725 2725  
2726 2726          /*
2727 2727           * one of pagelock calls failed. The error type is in error variable.
2728 2728           * Unlock what we've locked so far and retry with F_SOFTLOCK if error
2729 2729           * type is either EFAULT or ENOTSUP. Otherwise just return the error
2730 2730           * back to the caller.
↓ open down ↓ 13 lines elided ↑ open up ↑
2744 2744                          ssize = seg->s_base + seg->s_size - addr;
2745 2745                  } else {
2746 2746                          ssize = eaddr - addr;
2747 2747                  }
2748 2748                  pl = &plist[npages + cnt];
2749 2749                  ASSERT(*pl != NULL);
2750 2750                  (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
2751 2751                      L_PAGEUNLOCK, rw);
2752 2752          }
2753 2753  
2754      -        AS_LOCK_EXIT(as, &as->a_lock);
     2754 +        AS_LOCK_EXIT(as);
2755 2755  
2756 2756          kmem_free(plist, (npages + segcnt) * sizeof (page_t *));
2757 2757  
2758 2758          if (error != ENOTSUP && error != EFAULT) {
2759 2759                  return (error);
2760 2760          }
2761 2761  
2762 2762  slow:
2763 2763          /*
2764 2764           * If we are here because pagelock failed due to the need to cow fault
↓ open down ↓ 28 lines elided ↑ open up ↑
2793 2793              "as_pagelock_start: addr %p size %ld", addr, size);
2794 2794  
2795 2795          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2796 2796          rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2797 2797              (size_t)raddr;
2798 2798  
2799 2799          /*
2800 2800           * if the request crosses two segments let
2801 2801           * as_fault handle it.
2802 2802           */
2803      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2803 +        AS_LOCK_ENTER(as, RW_READER);
2804 2804  
2805 2805          seg = as_segat(as, raddr);
2806 2806          if (seg == NULL) {
2807      -                AS_LOCK_EXIT(as, &as->a_lock);
     2807 +                AS_LOCK_EXIT(as);
2808 2808                  return (EFAULT);
2809 2809          }
2810 2810          ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2811 2811          if (raddr + rsize > seg->s_base + seg->s_size) {
2812 2812                  return (as_pagelock_segs(as, seg, ppp, raddr, rsize, rw));
2813 2813          }
2814 2814          if (raddr + rsize <= raddr) {
2815      -                AS_LOCK_EXIT(as, &as->a_lock);
     2815 +                AS_LOCK_EXIT(as);
2816 2816                  return (EFAULT);
2817 2817          }
2818 2818  
2819 2819          TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_START,
2820 2820              "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize);
2821 2821  
2822 2822          /*
2823 2823           * try to lock pages and pass back shadow list
2824 2824           */
2825 2825          err = SEGOP_PAGELOCK(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
2826 2826  
2827 2827          TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end");
2828 2828  
2829      -        AS_LOCK_EXIT(as, &as->a_lock);
     2829 +        AS_LOCK_EXIT(as);
2830 2830  
2831 2831          if (err == 0 || (err != ENOTSUP && err != EFAULT)) {
2832 2832                  return (err);
2833 2833          }
2834 2834  
2835 2835          /*
2836 2836           * Use F_SOFTLOCK to lock the pages because pagelock failed either due
2837 2837           * to no pagelock support for this segment or pages need to be cow
2838 2838           * faulted in. If fault is needed F_SOFTLOCK will do this job for
2839 2839           * this as_pagelock() call and in the next as_pagelock() call for the
↓ open down ↓ 17 lines elided ↑ open up ↑
2857 2857  static void
2858 2858  as_pageunlock_segs(struct as *as, struct seg *seg, caddr_t addr, size_t size,
2859 2859      struct page **plist, enum seg_rw rw)
2860 2860  {
2861 2861          ulong_t cnt;
2862 2862          caddr_t eaddr = addr + size;
2863 2863          pgcnt_t npages = btop(size);
2864 2864          size_t ssize;
2865 2865          page_t **pl;
2866 2866  
2867      -        ASSERT(AS_LOCK_HELD(as, &as->a_lock));
     2867 +        ASSERT(AS_LOCK_HELD(as));
2868 2868          ASSERT(seg != NULL);
2869 2869          ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2870 2870          ASSERT(addr + size > seg->s_base + seg->s_size);
2871 2871          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2872 2872          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2873 2873          ASSERT(plist != NULL);
2874 2874  
2875 2875          for (cnt = 0; addr < eaddr; addr += ssize) {
2876 2876                  if (addr >= seg->s_base + seg->s_size) {
2877 2877                          seg = AS_SEGNEXT(as, seg);
↓ open down ↓ 4 lines elided ↑ open up ↑
2882 2882                          ssize = seg->s_base + seg->s_size - addr;
2883 2883                  } else {
2884 2884                          ssize = eaddr - addr;
2885 2885                  }
2886 2886                  pl = &plist[npages + cnt];
2887 2887                  ASSERT(*pl != NULL);
2888 2888                  (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
2889 2889                      L_PAGEUNLOCK, rw);
2890 2890          }
2891 2891          ASSERT(cnt > 0);
2892      -        AS_LOCK_EXIT(as, &as->a_lock);
     2892 +        AS_LOCK_EXIT(as);
2893 2893  
2894 2894          cnt++;
2895 2895          kmem_free(plist, (npages + cnt) * sizeof (page_t *));
2896 2896  }
2897 2897  
2898 2898  /*
2899 2899   * unlock pages in a given address range
2900 2900   */
2901 2901  void
2902 2902  as_pageunlock(struct as *as, struct page **pp, caddr_t addr, size_t size,
↓ open down ↓ 12 lines elided ↑ open up ↑
2915 2915           */
2916 2916          if (pp == NULL) {
2917 2917                  (void) as_fault(as->a_hat, as, addr, size, F_SOFTUNLOCK, rw);
2918 2918                  return;
2919 2919          }
2920 2920  
2921 2921          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2922 2922          rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2923 2923              (size_t)raddr;
2924 2924  
2925      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2925 +        AS_LOCK_ENTER(as, RW_READER);
2926 2926          seg = as_segat(as, raddr);
2927 2927          ASSERT(seg != NULL);
2928 2928  
2929 2929          TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START,
2930 2930              "seg_unlock_start: raddr %p rsize %ld", raddr, rsize);
2931 2931  
2932 2932          ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2933 2933          if (raddr + rsize <= seg->s_base + seg->s_size) {
2934 2934                  SEGOP_PAGELOCK(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
2935 2935          } else {
2936 2936                  as_pageunlock_segs(as, seg, raddr, rsize, pp, rw);
2937 2937                  return;
2938 2938          }
2939      -        AS_LOCK_EXIT(as, &as->a_lock);
     2939 +        AS_LOCK_EXIT(as);
2940 2940          TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_AS_UNLOCK_END, "as_pageunlock_end");
2941 2941  }
2942 2942  
2943 2943  int
2944 2944  as_setpagesize(struct as *as, caddr_t addr, size_t size, uint_t szc,
2945 2945      boolean_t wait)
2946 2946  {
2947 2947          struct seg *seg;
2948 2948          size_t ssize;
2949 2949          caddr_t raddr;                  /* rounded down addr */
↓ open down ↓ 5 lines elided ↑ open up ↑
2955 2955          if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(size, pgsz)) {
2956 2956                  return (EINVAL);
2957 2957          }
2958 2958  
2959 2959          raddr = addr;
2960 2960          rsize = size;
2961 2961  
2962 2962          if (raddr + rsize < raddr)              /* check for wraparound */
2963 2963                  return (ENOMEM);
2964 2964  
2965      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     2965 +        AS_LOCK_ENTER(as, RW_WRITER);
2966 2966          as_clearwatchprot(as, raddr, rsize);
2967 2967          seg = as_segat(as, raddr);
2968 2968          if (seg == NULL) {
2969 2969                  as_setwatch(as);
2970      -                AS_LOCK_EXIT(as, &as->a_lock);
     2970 +                AS_LOCK_EXIT(as);
2971 2971                  return (ENOMEM);
2972 2972          }
2973 2973  
2974 2974          for (; rsize != 0; rsize -= ssize, raddr += ssize) {
2975 2975                  if (raddr >= seg->s_base + seg->s_size) {
2976 2976                          seg = AS_SEGNEXT(as, seg);
2977 2977                          if (seg == NULL || raddr != seg->s_base) {
2978 2978                                  error = ENOMEM;
2979 2979                                  break;
2980 2980                          }
↓ open down ↓ 6 lines elided ↑ open up ↑
2987 2987  
2988 2988  retry:
2989 2989                  error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
2990 2990  
2991 2991                  if (error == IE_NOMEM) {
2992 2992                          error = EAGAIN;
2993 2993                          break;
2994 2994                  }
2995 2995  
2996 2996                  if (error == IE_RETRY) {
2997      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2997 +                        AS_LOCK_EXIT(as);
2998 2998                          goto setpgsz_top;
2999 2999                  }
3000 3000  
3001 3001                  if (error == ENOTSUP) {
3002 3002                          error = EINVAL;
3003 3003                          break;
3004 3004                  }
3005 3005  
3006 3006                  if (wait && (error == EAGAIN)) {
3007 3007                          /*
↓ open down ↓ 19 lines elided ↑ open up ↑
3027 3027                           *      fail with a different error than EAGAIN so
3028 3028                           *      there's no need to trigger as callbacks like
3029 3029                           *      as_unmap, as_setprot or as_free would do.
3030 3030                           */
3031 3031                          mutex_enter(&as->a_contents);
3032 3032                          if (!AS_ISNOUNMAPWAIT(as)) {
3033 3033                                  if (AS_ISUNMAPWAIT(as) == 0) {
3034 3034                                          cv_broadcast(&as->a_cv);
3035 3035                                  }
3036 3036                                  AS_SETUNMAPWAIT(as);
3037      -                                AS_LOCK_EXIT(as, &as->a_lock);
     3037 +                                AS_LOCK_EXIT(as);
3038 3038                                  while (AS_ISUNMAPWAIT(as)) {
3039 3039                                          cv_wait(&as->a_cv, &as->a_contents);
3040 3040                                  }
3041 3041                          } else {
3042 3042                                  /*
3043 3043                                   * We may have raced with
3044 3044                                   * segvn_reclaim()/segspt_reclaim(). In this
3045 3045                                   * case clean nounmapwait flag and retry since
3046 3046                                   * softlockcnt in this segment may be already
3047 3047                                   * 0.  We don't drop as writer lock so our
↓ open down ↓ 5 lines elided ↑ open up ↑
3053 3053                                  mutex_exit(&as->a_contents);
3054 3054                                  goto retry;
3055 3055                          }
3056 3056                          mutex_exit(&as->a_contents);
3057 3057                          goto setpgsz_top;
3058 3058                  } else if (error != 0) {
3059 3059                          break;
3060 3060                  }
3061 3061          }
3062 3062          as_setwatch(as);
3063      -        AS_LOCK_EXIT(as, &as->a_lock);
     3063 +        AS_LOCK_EXIT(as);
3064 3064          return (error);
3065 3065  }
3066 3066  
3067 3067  /*
3068 3068   * as_iset3_default_lpsize() just calls SEGOP_SETPAGESIZE() on all segments
3069 3069   * in its chunk where s_szc is less than the szc we want to set.
3070 3070   */
3071 3071  static int
3072 3072  as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc,
3073 3073      int *retry)
3074 3074  {
3075 3075          struct seg *seg;
3076 3076          size_t ssize;
3077 3077          int error;
3078 3078  
3079      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3079 +        ASSERT(AS_WRITE_HELD(as));
3080 3080  
3081 3081          seg = as_segat(as, raddr);
3082 3082          if (seg == NULL) {
3083 3083                  panic("as_iset3_default_lpsize: no seg");
3084 3084          }
3085 3085  
3086 3086          for (; rsize != 0; rsize -= ssize, raddr += ssize) {
3087 3087                  if (raddr >= seg->s_base + seg->s_size) {
3088 3088                          seg = AS_SEGNEXT(as, seg);
3089 3089                          if (seg == NULL || raddr != seg->s_base) {
↓ open down ↓ 36 lines elided ↑ open up ↑
3126 3126   * match the bigger sizes, and (b) it's hard to get this offset (to begin
3127 3127   * with) to pass to map_pgszcvec().
3128 3128   */
3129 3129  static int
3130 3130  as_iset2_default_lpsize(struct as *as, caddr_t addr, size_t size, uint_t szc,
3131 3131      uint_t szcvec)
3132 3132  {
3133 3133          int error;
3134 3134          int retry;
3135 3135  
3136      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3136 +        ASSERT(AS_WRITE_HELD(as));
3137 3137  
3138 3138          for (;;) {
3139 3139                  error = as_iset3_default_lpsize(as, addr, size, szc, &retry);
3140 3140                  if (error == EINVAL && retry) {
3141 3141                          szcvec &= ~(1 << szc);
3142 3142                          if (szcvec <= 1) {
3143 3143                                  return (EINVAL);
3144 3144                          }
3145 3145                          szc = highbit(szcvec) - 1;
3146 3146                  } else {
↓ open down ↓ 11 lines elided ↑ open up ↑
3158 3158  as_iset1_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc,
3159 3159      uint_t szcvec)
3160 3160  {
3161 3161          struct seg *seg;
3162 3162          size_t ssize;
3163 3163          caddr_t setaddr = raddr;
3164 3164          size_t setsize = 0;
3165 3165          int set;
3166 3166          int error;
3167 3167  
3168      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3168 +        ASSERT(AS_WRITE_HELD(as));
3169 3169  
3170 3170          seg = as_segat(as, raddr);
3171 3171          if (seg == NULL) {
3172 3172                  panic("as_iset1_default_lpsize: no seg");
3173 3173          }
3174 3174          if (seg->s_szc < szc) {
3175 3175                  set = 1;
3176 3176          } else {
3177 3177                  set = 0;
3178 3178          }
↓ open down ↓ 47 lines elided ↑ open up ↑
3226 3226              flags, rtype, 1);
3227 3227          uint_t szc;
3228 3228          uint_t nszc;
3229 3229          int error;
3230 3230          caddr_t a;
3231 3231          caddr_t eaddr;
3232 3232          size_t segsize;
3233 3233          size_t pgsz;
3234 3234          uint_t save_szcvec;
3235 3235  
3236      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3236 +        ASSERT(AS_WRITE_HELD(as));
3237 3237          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
3238 3238          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
3239 3239  
3240 3240          szcvec &= ~1;
3241 3241          if (szcvec <= 1) {      /* skip if base page size */
3242 3242                  return (0);
3243 3243          }
3244 3244  
3245 3245          /* Get the pagesize of the first larger page size. */
3246 3246          szc = lowbit(szcvec) - 1;
↓ open down ↓ 71 lines elided ↑ open up ↑
3318 3318          int rtype, rflags;
3319 3319          int stype, sflags;
3320 3320          int error;
3321 3321          caddr_t setaddr;
3322 3322          size_t setsize;
3323 3323          int segvn;
3324 3324  
3325 3325          if (size == 0)
3326 3326                  return (0);
3327 3327  
3328      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     3328 +        AS_LOCK_ENTER(as, RW_WRITER);
3329 3329  again:
3330 3330          error = 0;
3331 3331  
3332 3332          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3333 3333          rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
3334 3334              (size_t)raddr;
3335 3335  
3336 3336          if (raddr + rsize < raddr) {            /* check for wraparound */
3337      -                AS_LOCK_EXIT(as, &as->a_lock);
     3337 +                AS_LOCK_EXIT(as);
3338 3338                  return (ENOMEM);
3339 3339          }
3340 3340          as_clearwatchprot(as, raddr, rsize);
3341 3341          seg = as_segat(as, raddr);
3342 3342          if (seg == NULL) {
3343 3343                  as_setwatch(as);
3344      -                AS_LOCK_EXIT(as, &as->a_lock);
     3344 +                AS_LOCK_EXIT(as);
3345 3345                  return (ENOMEM);
3346 3346          }
3347 3347          if (seg->s_ops == &segvn_ops) {
3348 3348                  rtype = SEGOP_GETTYPE(seg, addr);
3349 3349                  rflags = rtype & (MAP_TEXT | MAP_INITDATA);
3350 3350                  rtype = rtype & (MAP_SHARED | MAP_PRIVATE);
3351 3351                  segvn = 1;
3352 3352          } else {
3353 3353                  segvn = 0;
3354 3354          }
↓ open down ↓ 64 lines elided ↑ open up ↑
3419 3419                  error = EAGAIN;
3420 3420          } else if (error == ENOTSUP) {
3421 3421                  error = EINVAL;
3422 3422          } else if (error == EAGAIN) {
3423 3423                  mutex_enter(&as->a_contents);
3424 3424                  if (!AS_ISNOUNMAPWAIT(as)) {
3425 3425                          if (AS_ISUNMAPWAIT(as) == 0) {
3426 3426                                  cv_broadcast(&as->a_cv);
3427 3427                          }
3428 3428                          AS_SETUNMAPWAIT(as);
3429      -                        AS_LOCK_EXIT(as, &as->a_lock);
     3429 +                        AS_LOCK_EXIT(as);
3430 3430                          while (AS_ISUNMAPWAIT(as)) {
3431 3431                                  cv_wait(&as->a_cv, &as->a_contents);
3432 3432                          }
3433 3433                          mutex_exit(&as->a_contents);
3434      -                        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     3434 +                        AS_LOCK_ENTER(as, RW_WRITER);
3435 3435                  } else {
3436 3436                          /*
3437 3437                           * We may have raced with
3438 3438                           * segvn_reclaim()/segspt_reclaim(). In this case
3439 3439                           * clean nounmapwait flag and retry since softlockcnt
3440 3440                           * in this segment may be already 0.  We don't drop as
3441 3441                           * writer lock so our number of retries without
3442 3442                           * sleeping should be very small. See segvn_reclaim()
3443 3443                           * for more comments.
3444 3444                           */
3445 3445                          AS_CLRNOUNMAPWAIT(as);
3446 3446                          mutex_exit(&as->a_contents);
3447 3447                  }
3448 3448                  goto again;
3449 3449          }
3450 3450  
3451 3451          as_setwatch(as);
3452      -        AS_LOCK_EXIT(as, &as->a_lock);
     3452 +        AS_LOCK_EXIT(as);
3453 3453          return (error);
3454 3454  }
3455 3455  
3456 3456  /*
3457 3457   * Setup all of the uninitialized watched pages that we can.
3458 3458   */
3459 3459  void
3460 3460  as_setwatch(struct as *as)
3461 3461  {
3462 3462          struct watched_page *pwp;
3463 3463          struct seg *seg;
3464 3464          caddr_t vaddr;
3465 3465          uint_t prot;
3466 3466          int  err, retrycnt;
3467 3467  
3468 3468          if (avl_numnodes(&as->a_wpage) == 0)
3469 3469                  return;
3470 3470  
3471      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3471 +        ASSERT(AS_WRITE_HELD(as));
3472 3472  
3473 3473          for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3474 3474              pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3475 3475                  retrycnt = 0;
3476 3476          retry:
3477 3477                  vaddr = pwp->wp_vaddr;
3478 3478                  if (pwp->wp_oprot != 0 ||       /* already set up */
3479 3479                      (seg = as_segat(as, vaddr)) == NULL ||
3480 3480                      SEGOP_GETPROT(seg, vaddr, 0, &prot) != 0)
3481 3481                          continue;
↓ open down ↓ 26 lines elided ↑ open up ↑
3508 3508  {
3509 3509          struct watched_page *pwp;
3510 3510          struct seg *seg;
3511 3511          caddr_t vaddr;
3512 3512          uint_t prot;
3513 3513          int err, retrycnt;
3514 3514  
3515 3515          if (avl_numnodes(&as->a_wpage) == 0)
3516 3516                  return;
3517 3517  
3518      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3518 +        ASSERT(AS_WRITE_HELD(as));
3519 3519  
3520 3520          for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3521 3521              pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3522 3522                  retrycnt = 0;
3523 3523          retry:
3524 3524                  vaddr = pwp->wp_vaddr;
3525 3525                  if (pwp->wp_oprot == 0 ||       /* not set up */
3526 3526                      (seg = as_segat(as, vaddr)) == NULL)
3527 3527                          continue;
3528 3528  
↓ open down ↓ 21 lines elided ↑ open up ↑
3550 3550          caddr_t eaddr = addr + size;
3551 3551          caddr_t vaddr;
3552 3552          struct seg *seg;
3553 3553          int err, retrycnt;
3554 3554          uint_t  wprot;
3555 3555          avl_index_t where;
3556 3556  
3557 3557          if (avl_numnodes(&as->a_wpage) == 0)
3558 3558                  return;
3559 3559  
3560      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3560 +        ASSERT(AS_WRITE_HELD(as));
3561 3561  
3562 3562          tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3563 3563          if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
3564 3564                  pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
3565 3565  
3566 3566          while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3567 3567                  retrycnt = 0;
3568 3568                  vaddr = pwp->wp_vaddr;
3569 3569  
3570 3570                  wprot = prot;
↓ open down ↓ 38 lines elided ↑ open up ↑
3609 3609          int err, retrycnt;
3610 3610          avl_index_t where;
3611 3611  
3612 3612          if (avl_numnodes(&as->a_wpage) == 0)
3613 3613                  return;
3614 3614  
3615 3615          tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3616 3616          if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
3617 3617                  pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
3618 3618  
3619      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3619 +        ASSERT(AS_WRITE_HELD(as));
3620 3620  
3621 3621          while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3622 3622  
3623 3623                  if ((prot = pwp->wp_oprot) != 0) {
3624 3624                          retrycnt = 0;
3625 3625  
3626 3626                          if (prot != pwp->wp_prot) {
3627 3627                          retry:
3628 3628                                  seg = as_segat(as, pwp->wp_vaddr);
3629 3629                                  if (seg == NULL)
↓ open down ↓ 34 lines elided ↑ open up ↑
3664 3664  
3665 3665  /*
3666 3666   * return memory object ID
3667 3667   */
3668 3668  int
3669 3669  as_getmemid(struct as *as, caddr_t addr, memid_t *memidp)
3670 3670  {
3671 3671          struct seg      *seg;
3672 3672          int             sts;
3673 3673  
3674      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     3674 +        AS_LOCK_ENTER(as, RW_READER);
3675 3675          seg = as_segat(as, addr);
3676 3676          if (seg == NULL) {
3677      -                AS_LOCK_EXIT(as, &as->a_lock);
     3677 +                AS_LOCK_EXIT(as);
3678 3678                  return (EFAULT);
3679 3679          }
3680 3680          /*
3681 3681           * catch old drivers which may not support getmemid
3682 3682           */
3683 3683          if (seg->s_ops->getmemid == NULL) {
3684      -                AS_LOCK_EXIT(as, &as->a_lock);
     3684 +                AS_LOCK_EXIT(as);
3685 3685                  return (ENODEV);
3686 3686          }
3687 3687  
3688 3688          sts = SEGOP_GETMEMID(seg, addr, memidp);
3689 3689  
3690      -        AS_LOCK_EXIT(as, &as->a_lock);
     3690 +        AS_LOCK_EXIT(as);
3691 3691          return (sts);
3692 3692  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX