Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_spt.c
          +++ new/usr/src/uts/common/vm/seg_spt.c
↓ open down ↓ 233 lines elided ↑ open up ↑
 234  234  /*
 235  235   * called from seg_free().
 236  236   * free (i.e., unlock, unmap, return to free list)
 237  237   *  all the pages in the given seg.
 238  238   */
 239  239  void
 240  240  segspt_free(struct seg  *seg)
 241  241  {
 242  242          struct spt_data *sptd = (struct spt_data *)seg->s_data;
 243  243  
 244      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      244 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 245  245  
 246  246          if (sptd != NULL) {
 247  247                  if (sptd->spt_realsize)
 248  248                          segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 249  249  
 250  250          if (sptd->spt_ppa_lckcnt)
 251  251                  kmem_free(sptd->spt_ppa_lckcnt,
 252  252                      sizeof (*sptd->spt_ppa_lckcnt)
 253  253                      * btopr(sptd->spt_amp->size));
 254  254                  kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
↓ open down ↓ 1 lines elided ↑ open up ↑
 256  256                  mutex_destroy(&sptd->spt_lock);
 257  257                  kmem_free(sptd, sizeof (*sptd));
 258  258          }
 259  259  }
 260  260  
 261  261  /*ARGSUSED*/
 262  262  static int
 263  263  segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 264  264          uint_t flags)
 265  265  {
 266      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
      266 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
 267  267  
 268  268          return (0);
 269  269  }
 270  270  
 271  271  /*ARGSUSED*/
 272  272  static size_t
 273  273  segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 274  274  {
 275  275          caddr_t eo_seg;
 276  276          pgcnt_t npages;
 277  277          struct shm_data *shmd = (struct shm_data *)seg->s_data;
 278  278          struct seg      *sptseg;
 279  279          struct spt_data *sptd;
 280  280  
 281      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
      281 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
 282  282  #ifdef lint
 283  283          seg = seg;
 284  284  #endif
 285  285          sptseg = shmd->shm_sptseg;
 286  286          sptd = sptseg->s_data;
 287  287  
 288  288          if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 289  289                  eo_seg = addr + len;
 290  290                  while (addr < eo_seg) {
 291  291                          /* page exists, and it's locked. */
↓ open down ↓ 43 lines elided ↑ open up ↑
 335  335                  ANON_LOCK_EXIT(&amp->a_rwlock);
 336  336                  return (len);
 337  337          }
 338  338  }
 339  339  
 340  340  static int
 341  341  segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 342  342  {
 343  343          size_t share_size;
 344  344  
 345      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      345 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 346  346  
 347  347          /*
 348  348           * seg.s_size may have been rounded up to the largest page size
 349  349           * in shmat().
 350  350           * XXX This should be cleanedup. sptdestroy should take a length
 351  351           * argument which should be the same as sptcreate. Then
 352  352           * this rounding would not be needed (or is done in shm.c)
 353  353           * Only the check for full segment will be needed.
 354  354           *
 355  355           * XXX -- shouldn't raddr == 0 always? These tests don't seem
↓ open down ↓ 30 lines elided ↑ open up ↑
 386  386          pgcnt_t         pidx;
 387  387          size_t          sz;
 388  388          proc_t          *procp = curproc;
 389  389          rctl_qty_t      lockedbytes = 0;
 390  390          kproject_t      *proj;
 391  391  
 392  392          /*
 393  393           * We are holding the a_lock on the underlying dummy as,
 394  394           * so we can make calls to the HAT layer.
 395  395           */
 396      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      396 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 397  397          ASSERT(sp != NULL);
 398  398  
 399  399  #ifdef DEBUG
 400  400          TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 401  401              tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 402  402  #endif
 403  403          if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 404  404                  if (err = anon_swap_adjust(npages))
 405  405                          return (err);
 406  406          }
↓ open down ↓ 215 lines elided ↑ open up ↑
 622  622          struct vnode    *vp;
 623  623          u_offset_t      off;
 624  624          uint_t          hat_flags;
 625  625          int             root = 0;
 626  626          pgcnt_t         pgs, curnpgs = 0;
 627  627          page_t          *rootpp;
 628  628          rctl_qty_t      unlocked_bytes = 0;
 629  629          kproject_t      *proj;
 630  630          kshmid_t        *sp;
 631  631  
 632      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      632 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 633  633  
 634  634          len = P2ROUNDUP(len, PAGESIZE);
 635  635  
 636  636          npages = btop(len);
 637  637  
 638  638          hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 639  639          if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 640  640              (sptd->spt_flags & SHM_PAGEABLE)) {
 641  641                  hat_flags = HAT_UNLOAD_UNMAP;
 642  642          }
↓ open down ↓ 188 lines elided ↑ open up ↑
 831  831          struct  anon_map *amp;
 832  832          spgcnt_t        an_idx;
 833  833          int     ret = ENOTSUP;
 834  834          uint_t  pl_built = 0;
 835  835          struct  anon *ap;
 836  836          struct  vnode *vp;
 837  837          u_offset_t off;
 838  838          pgcnt_t claim_availrmem = 0;
 839  839          uint_t  szc;
 840  840  
 841      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
      841 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
 842  842          ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 843  843  
 844  844          /*
 845  845           * We want to lock/unlock the entire ISM segment. Therefore,
 846  846           * we will be using the underlying sptseg and it's base address
 847  847           * and length for the caching arguments.
 848  848           */
 849  849          ASSERT(sptseg);
 850  850          ASSERT(sptd);
 851  851  
↓ open down ↓ 334 lines elided ↑ open up ↑
1186 1186          caddr_t a, spt_base;
1187 1187          struct page **pplist, **pl, *pp;
1188 1188          struct anon_map *amp;
1189 1189          ulong_t anon_index;
1190 1190          int ret = ENOTSUP;
1191 1191          uint_t  pl_built = 0;
1192 1192          struct anon *ap;
1193 1193          struct vnode *vp;
1194 1194          u_offset_t off;
1195 1195  
1196      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     1196 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1197 1197          ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1198 1198  
1199 1199  
1200 1200          /*
1201 1201           * We want to lock/unlock the entire ISM segment. Therefore,
1202 1202           * we will be using the underlying sptseg and it's base address
1203 1203           * and length for the caching arguments.
1204 1204           */
1205 1205          ASSERT(sptseg);
1206 1206          ASSERT(sptd);
↓ open down ↓ 237 lines elided ↑ open up ↑
1444 1444  #ifdef lint
1445 1445          addr = addr;
1446 1446  #endif
1447 1447          sptseg = shmd->shm_sptseg;
1448 1448          sptd = sptseg->s_data;
1449 1449          npages = (len >> PAGESHIFT);
1450 1450          ASSERT(npages);
1451 1451          ASSERT(sptd->spt_pcachecnt != 0);
1452 1452          ASSERT(sptd->spt_ppa == pplist);
1453 1453          ASSERT(npages == btopr(sptd->spt_amp->size));
1454      -        ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     1454 +        ASSERT(async || AS_LOCK_HELD(seg->s_as));
1455 1455  
1456 1456          /*
1457 1457           * Acquire the lock on the dummy seg and destroy the
1458 1458           * ppa array IF this is the last pcachecnt.
1459 1459           */
1460 1460          mutex_enter(&sptd->spt_lock);
1461 1461          if (--sptd->spt_pcachecnt == 0) {
1462 1462                  for (i = 0; i < npages; i++) {
1463 1463                          if (pplist[i] == NULL) {
1464 1464                                  continue;
↓ open down ↓ 113 lines elided ↑ open up ↑
1578 1578          struct spt_data *sptd;
1579 1579          page_t *pp;
1580 1580          caddr_t adr;
1581 1581          struct vnode *vp;
1582 1582          u_offset_t offset;
1583 1583          ulong_t anon_index;
1584 1584          struct anon_map *amp;           /* XXX - for locknest */
1585 1585          struct anon *ap = NULL;
1586 1586          pgcnt_t npages;
1587 1587  
1588      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     1588 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1589 1589  
1590 1590          sptseg = shmd->shm_sptseg;
1591 1591          sptd = sptseg->s_data;
1592 1592  
1593 1593          /*
1594 1594           * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1595 1595           * and therefore their pages are SE_SHARED locked
1596 1596           * for the entire life of the segment.
1597 1597           */
1598 1598          if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
↓ open down ↓ 3 lines elided ↑ open up ↑
1602 1602  
1603 1603          /*
1604 1604           * Any thread is free to do a page_find and
1605 1605           * page_unlock() on the pages within this seg.
1606 1606           *
1607 1607           * We are already holding the as->a_lock on the user's
1608 1608           * real segment, but we need to hold the a_lock on the
1609 1609           * underlying dummy as. This is mostly to satisfy the
1610 1610           * underlying HAT layer.
1611 1611           */
1612      -        AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
     1612 +        AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1613 1613          hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1614      -        AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
     1614 +        AS_LOCK_EXIT(sptseg->s_as);
1615 1615  
1616 1616          amp = sptd->spt_amp;
1617 1617          ASSERT(amp != NULL);
1618 1618          anon_index = seg_page(sptseg, sptseg_addr);
1619 1619  
1620 1620          for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1621 1621                  ap = anon_get_ptr(amp->ahp, anon_index++);
1622 1622                  ASSERT(ap != NULL);
1623 1623                  swap_xlate(ap, &vp, &offset);
1624 1624  
↓ open down ↓ 44 lines elided ↑ open up ↑
1669 1669  
1670 1670  int
1671 1671  segspt_shmattach(struct seg *seg, caddr_t *argsp)
1672 1672  {
1673 1673          struct shm_data *shmd_arg = (struct shm_data *)argsp;
1674 1674          struct shm_data *shmd;
1675 1675          struct anon_map *shm_amp = shmd_arg->shm_amp;
1676 1676          struct spt_data *sptd;
1677 1677          int error = 0;
1678 1678  
1679      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     1679 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1680 1680  
1681 1681          shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1682 1682          if (shmd == NULL)
1683 1683                  return (ENOMEM);
1684 1684  
1685 1685          shmd->shm_sptas = shmd_arg->shm_sptas;
1686 1686          shmd->shm_amp = shm_amp;
1687 1687          shmd->shm_sptseg = shmd_arg->shm_sptseg;
1688 1688  
1689 1689          (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
↓ open down ↓ 38 lines elided ↑ open up ↑
1728 1728          }
1729 1729          return (error);
1730 1730  }
1731 1731  
1732 1732  int
1733 1733  segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1734 1734  {
1735 1735          struct shm_data *shmd = (struct shm_data *)seg->s_data;
1736 1736          int reclaim = 1;
1737 1737  
1738      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     1738 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1739 1739  retry:
1740 1740          if (shmd->shm_softlockcnt > 0) {
1741 1741                  if (reclaim == 1) {
1742 1742                          segspt_purge(seg);
1743 1743                          reclaim = 0;
1744 1744                          goto retry;
1745 1745                  }
1746 1746                  return (EAGAIN);
1747 1747          }
1748 1748  
↓ open down ↓ 13 lines elided ↑ open up ↑
1762 1762  
1763 1763          return (0);
1764 1764  }
1765 1765  
1766 1766  void
1767 1767  segspt_shmfree(struct seg *seg)
1768 1768  {
1769 1769          struct shm_data *shmd = (struct shm_data *)seg->s_data;
1770 1770          struct anon_map *shm_amp = shmd->shm_amp;
1771 1771  
1772      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     1772 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1773 1773  
1774 1774          (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1775 1775              MC_UNLOCK, NULL, 0);
1776 1776  
1777 1777          /*
1778 1778           * Need to increment refcnt when attaching
1779 1779           * and decrement when detaching because of dup().
1780 1780           */
1781 1781          ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1782 1782          shm_amp->refcnt--;
↓ open down ↓ 12 lines elided ↑ open up ↑
1795 1795          mutex_enter(&shmd->shm_segfree_syncmtx);
1796 1796          mutex_destroy(&shmd->shm_segfree_syncmtx);
1797 1797  
1798 1798          kmem_free(shmd, sizeof (*shmd));
1799 1799  }
1800 1800  
1801 1801  /*ARGSUSED*/
1802 1802  int
1803 1803  segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1804 1804  {
1805      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     1805 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1806 1806  
1807 1807          /*
1808 1808           * Shared page table is more than shared mapping.
1809 1809           *  Individual process sharing page tables can't change prot
1810 1810           *  because there is only one set of page tables.
1811 1811           *  This will be allowed after private page table is
1812 1812           *  supported.
1813 1813           */
1814 1814  /* need to return correct status error? */
1815 1815          return (0);
↓ open down ↓ 17 lines elided ↑ open up ↑
1833 1833          int     err = 0;
1834 1834          int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1835 1835          size_t  pgsz;
1836 1836          pgcnt_t pgcnt;
1837 1837          caddr_t a;
1838 1838          pgcnt_t pidx;
1839 1839  
1840 1840  #ifdef lint
1841 1841          hat = hat;
1842 1842  #endif
1843      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     1843 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1844 1844  
1845 1845          /*
1846 1846           * Because of the way spt is implemented
1847 1847           * the realsize of the segment does not have to be
1848 1848           * equal to the segment size itself. The segment size is
1849 1849           * often in multiples of a page size larger than PAGESIZE.
1850 1850           * The realsize is rounded up to the nearest PAGESIZE
1851 1851           * based on what the user requested. This is a bit of
1852 1852           * ungliness that is historical but not easily fixed
1853 1853           * without re-designing the higher levels of ISM.
↓ open down ↓ 48 lines elided ↑ open up ↑
1902 1902                  ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1903 1903  
1904 1904                  err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1905 1905                  if (err != 0) {
1906 1906                          if (type == F_SOFTLOCK) {
1907 1907                                  atomic_add_long((ulong_t *)(
1908 1908                                      &(shmd->shm_softlockcnt)), -npages);
1909 1909                          }
1910 1910                          goto dism_err;
1911 1911                  }
1912      -                AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
     1912 +                AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1913 1913                  a = segspt_addr;
1914 1914                  pidx = 0;
1915 1915                  if (type == F_SOFTLOCK) {
1916 1916  
1917 1917                          /*
1918 1918                           * Load up the translation keeping it
1919 1919                           * locked and don't unlock the page.
1920 1920                           */
1921 1921                          for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1922 1922                                  hat_memload_array(sptseg->s_as->a_hat,
↓ open down ↓ 40 lines elided ↑ open up ↑
1963 1963                              seg->s_szc) != 0) {
1964 1964                                  panic("hat_share err in DISM fault");
1965 1965                                  /* NOTREACHED */
1966 1966                          }
1967 1967                          if (type == F_INVAL) {
1968 1968                                  for (i = 0; i < npages; i++) {
1969 1969                                          page_unlock(ppa[i]);
1970 1970                                  }
1971 1971                          }
1972 1972                  }
1973      -                AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
     1973 +                AS_LOCK_EXIT(sptseg->s_as);
1974 1974  dism_err:
1975 1975                  kmem_free(ppa, npages * sizeof (page_t *));
1976 1976                  return (err);
1977 1977  
1978 1978          case F_SOFTUNLOCK:
1979 1979  
1980 1980                  /*
1981 1981                   * This is a bit ugly, we pass in the real seg pointer,
1982 1982                   * but the segspt_addr is the virtual address within the
1983 1983                   * dummy seg.
↓ open down ↓ 46 lines elided ↑ open up ↑
2030 2030          size_t          pgsz;
2031 2031          pgcnt_t         pgcnt;
2032 2032          caddr_t         a;
2033 2033          pgcnt_t         pidx;
2034 2034          size_t          sz;
2035 2035  
2036 2036  #ifdef lint
2037 2037          hat = hat;
2038 2038  #endif
2039 2039  
2040      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2040 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2041 2041  
2042 2042          if (sptd->spt_flags & SHM_PAGEABLE) {
2043 2043                  return (segspt_dismfault(hat, seg, addr, len, type, rw));
2044 2044          }
2045 2045  
2046 2046          /*
2047 2047           * Because of the way spt is implemented
2048 2048           * the realsize of the segment does not have to be
2049 2049           * equal to the segment size itself. The segment size is
2050 2050           * often in multiples of a page size larger than PAGESIZE.
↓ open down ↓ 111 lines elided ↑ open up ↑
2162 2162                  }
2163 2163                  ANON_LOCK_EXIT(&amp->a_rwlock);
2164 2164                  ASSERT(i == npages);
2165 2165  
2166 2166                  /*
2167 2167                   * We are already holding the as->a_lock on the user's
2168 2168                   * real segment, but we need to hold the a_lock on the
2169 2169                   * underlying dummy as. This is mostly to satisfy the
2170 2170                   * underlying HAT layer.
2171 2171                   */
2172      -                AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
     2172 +                AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2173 2173                  a = sptseg_addr;
2174 2174                  pidx = 0;
2175 2175                  if (type == F_SOFTLOCK) {
2176 2176                          /*
2177 2177                           * Load up the translation keeping it
2178 2178                           * locked and don't unlock the page.
2179 2179                           */
2180 2180                          for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2181 2181                                  sz = MIN(pgsz, ptob(npages - pidx));
2182 2182                                  hat_memload_array(sptseg->s_as->a_hat, a,
↓ open down ↓ 24 lines elided ↑ open up ↑
2207 2207                                      ptob(npages), ppa, sptd->spt_prot,
2208 2208                                      HAT_LOAD_SHARE);
2209 2209                          }
2210 2210  
2211 2211                          /*
2212 2212                           * And now drop the SE_SHARED lock(s).
2213 2213                           */
2214 2214                          for (i = 0; i < npages; i++)
2215 2215                                  page_unlock(ppa[i]);
2216 2216                  }
2217      -                AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
     2217 +                AS_LOCK_EXIT(sptseg->s_as);
2218 2218  
2219 2219                  kmem_free(ppa, sizeof (page_t *) * npages);
2220 2220                  return (0);
2221 2221          case F_SOFTUNLOCK:
2222 2222  
2223 2223                  /*
2224 2224                   * This is a bit ugly, we pass in the real seg pointer,
2225 2225                   * but the sptseg_addr is the virtual address within the
2226 2226                   * dummy seg.
2227 2227                   */
↓ open down ↓ 49 lines elided ↑ open up ↑
2277 2277  int
2278 2278  segspt_shmdup(struct seg *seg, struct seg *newseg)
2279 2279  {
2280 2280          struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2281 2281          struct anon_map         *amp = shmd->shm_amp;
2282 2282          struct shm_data         *shmd_new;
2283 2283          struct seg              *spt_seg = shmd->shm_sptseg;
2284 2284          struct spt_data         *sptd = spt_seg->s_data;
2285 2285          int                     error = 0;
2286 2286  
2287      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     2287 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2288 2288  
2289 2289          shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2290 2290          newseg->s_data = (void *)shmd_new;
2291 2291          shmd_new->shm_sptas = shmd->shm_sptas;
2292 2292          shmd_new->shm_amp = amp;
2293 2293          shmd_new->shm_sptseg = shmd->shm_sptseg;
2294 2294          newseg->s_ops = &segspt_shmops;
2295 2295          newseg->s_szc = seg->s_szc;
2296 2296          ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2297 2297  
↓ open down ↓ 21 lines elided ↑ open up ↑
2319 2319          }
2320 2320  }
2321 2321  
2322 2322  /*ARGSUSED*/
2323 2323  int
2324 2324  segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2325 2325  {
2326 2326          struct shm_data *shmd = (struct shm_data *)seg->s_data;
2327 2327          struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2328 2328  
2329      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2329 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2330 2330  
2331 2331          /*
2332 2332           * ISM segment is always rw.
2333 2333           */
2334 2334          return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2335 2335  }
2336 2336  
2337 2337  /*
2338 2338   * Return an array of locked large pages, for empty slots allocate
2339 2339   * private zero-filled anon pages.
↓ open down ↓ 330 lines elided ↑ open up ↑
2670 2670          caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2671 2671          size_t          a_len;                  /* aligned len */
2672 2672          size_t          share_sz;
2673 2673          ulong_t         i;
2674 2674          int             sts = 0;
2675 2675          rctl_qty_t      unlocked = 0;
2676 2676          rctl_qty_t      locked = 0;
2677 2677          struct proc     *p = curproc;
2678 2678          kproject_t      *proj;
2679 2679  
2680      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2680 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2681 2681          ASSERT(sp != NULL);
2682 2682  
2683 2683          if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2684 2684                  return (0);
2685 2685          }
2686 2686  
2687 2687          addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2688 2688          an_idx = seg_page(seg, addr);
2689 2689          npages = btopr(len);
2690 2690  
↓ open down ↓ 109 lines elided ↑ open up ↑
2800 2800  }
2801 2801  
2802 2802  /*ARGSUSED*/
2803 2803  int
2804 2804  segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2805 2805  {
2806 2806          struct shm_data *shmd = (struct shm_data *)seg->s_data;
2807 2807          struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2808 2808          spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2809 2809  
2810      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2810 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2811 2811  
2812 2812          /*
2813 2813           * ISM segment is always rw.
2814 2814           */
2815 2815          while (--pgno >= 0)
2816 2816                  *protv++ = sptd->spt_prot;
2817 2817          return (0);
2818 2818  }
2819 2819  
2820 2820  /*ARGSUSED*/
2821 2821  u_offset_t
2822 2822  segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2823 2823  {
2824      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2824 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2825 2825  
2826 2826          /* Offset does not matter in ISM memory */
2827 2827  
2828 2828          return ((u_offset_t)0);
2829 2829  }
2830 2830  
2831 2831  /* ARGSUSED */
2832 2832  int
2833 2833  segspt_shmgettype(struct seg *seg, caddr_t addr)
2834 2834  {
2835 2835          struct shm_data *shmd = (struct shm_data *)seg->s_data;
2836 2836          struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2837 2837  
2838      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2838 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2839 2839  
2840 2840          /*
2841 2841           * The shared memory mapping is always MAP_SHARED, SWAP is only
2842 2842           * reserved for DISM
2843 2843           */
2844 2844          return (MAP_SHARED |
2845 2845              ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2846 2846  }
2847 2847  
2848 2848  /*ARGSUSED*/
2849 2849  int
2850 2850  segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2851 2851  {
2852 2852          struct shm_data *shmd = (struct shm_data *)seg->s_data;
2853 2853          struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2854 2854  
2855      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2855 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2856 2856  
2857 2857          *vpp = sptd->spt_vp;
2858 2858          return (0);
2859 2859  }
2860 2860  
2861 2861  /*
2862 2862   * We need to wait for pending IO to complete to a DISM segment in order for
2863 2863   * pages to get kicked out of the seg_pcache.  120 seconds should be more
2864 2864   * than enough time to wait.
2865 2865   */
↓ open down ↓ 5 lines elided ↑ open up ↑
2871 2871  {
2872 2872          struct shm_data *shmd = (struct shm_data *)seg->s_data;
2873 2873          struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2874 2874          struct anon_map *amp;
2875 2875          pgcnt_t pg_idx;
2876 2876          ushort_t gen;
2877 2877          clock_t end_lbolt;
2878 2878          int writer;
2879 2879          page_t **ppa;
2880 2880  
2881      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2881 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2882 2882  
2883 2883          if (behav == MADV_FREE) {
2884 2884                  if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2885 2885                          return (0);
2886 2886  
2887 2887                  amp = sptd->spt_amp;
2888 2888                  pg_idx = seg_page(seg, addr);
2889 2889  
2890 2890                  mutex_enter(&sptd->spt_lock);
2891 2891                  if ((ppa = sptd->spt_ppa) == NULL) {
↓ open down ↓ 13 lines elided ↑ open up ↑
2905 2905                   * Purge all DISM cached pages
2906 2906                   */
2907 2907                  seg_ppurge_wiredpp(ppa);
2908 2908  
2909 2909                  /*
2910 2910                   * Drop the AS_LOCK so that other threads can grab it
2911 2911                   * in the as_pageunlock path and hopefully get the segment
2912 2912                   * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2913 2913                   * to keep this segment resident.
2914 2914                   */
2915      -                writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
     2915 +                writer = AS_WRITE_HELD(seg->s_as);
2916 2916                  atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2917      -                AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
     2917 +                AS_LOCK_EXIT(seg->s_as);
2918 2918  
2919 2919                  mutex_enter(&sptd->spt_lock);
2920 2920  
2921 2921                  end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2922 2922  
2923 2923                  /*
2924 2924                   * Try to wait for pages to get kicked out of the seg_pcache.
2925 2925                   */
2926 2926                  while (sptd->spt_gen == gen &&
2927 2927                      (sptd->spt_flags & DISM_PPA_CHANGED) &&
2928 2928                      ddi_get_lbolt() < end_lbolt) {
2929 2929                          if (!cv_timedwait_sig(&sptd->spt_cv,
2930 2930                              &sptd->spt_lock, end_lbolt)) {
2931 2931                                  break;
2932 2932                          }
2933 2933                  }
2934 2934  
2935 2935                  mutex_exit(&sptd->spt_lock);
2936 2936  
2937 2937                  /* Regrab the AS_LOCK and release our hold on the segment */
2938      -                AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2939      -                    writer ? RW_WRITER : RW_READER);
     2938 +                AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
2940 2939                  atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2941 2940                  if (shmd->shm_softlockcnt <= 0) {
2942 2941                          if (AS_ISUNMAPWAIT(seg->s_as)) {
2943 2942                                  mutex_enter(&seg->s_as->a_contents);
2944 2943                                  if (AS_ISUNMAPWAIT(seg->s_as)) {
2945 2944                                          AS_CLRUNMAPWAIT(seg->s_as);
2946 2945                                          cv_broadcast(&seg->s_as->a_cv);
2947 2946                                  }
2948 2947                                  mutex_exit(&seg->s_as->a_contents);
2949 2948                          }
↓ open down ↓ 168 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX