1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 static void
  80 segspt_badop()
  81 {
  82         panic("segspt_badop called");
  83         /*NOTREACHED*/
  84 }
  85 
  86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
  87 
  88 struct seg_ops segspt_ops = {
  89         SEGSPT_BADOP(int),              /* dup */
  90         segspt_unmap,
  91         segspt_free,
  92         SEGSPT_BADOP(int),              /* fault */
  93         SEGSPT_BADOP(faultcode_t),      /* faulta */
  94         SEGSPT_BADOP(int),              /* setprot */
  95         SEGSPT_BADOP(int),              /* checkprot */
  96         SEGSPT_BADOP(int),              /* kluster */
  97         SEGSPT_BADOP(int),              /* sync */
  98         SEGSPT_BADOP(size_t),           /* incore */
  99         SEGSPT_BADOP(int),              /* lockop */
 100         SEGSPT_BADOP(int),              /* getprot */
 101         SEGSPT_BADOP(u_offset_t),       /* getoffset */
 102         SEGSPT_BADOP(int),              /* gettype */
 103         SEGSPT_BADOP(int),              /* getvp */
 104         SEGSPT_BADOP(int),              /* advise */
 105         SEGSPT_BADOP(void),             /* dump */
 106         SEGSPT_BADOP(int),              /* pagelock */
 107         SEGSPT_BADOP(int),              /* setpgsz */
 108         SEGSPT_BADOP(int),              /* getmemid */
 109         segspt_getpolicy,               /* getpolicy */
 110         SEGSPT_BADOP(int),              /* capable */
 111 };
 112 
 113 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
 114 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
 115 static void segspt_shmfree(struct seg *seg);
 116 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
 117                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
 118 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
 119 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
 120                         register size_t len, register uint_t prot);
 121 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
 122                         uint_t prot);
 123 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
 124 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
 125                         register char *vec);
 126 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
 127                         int attr, uint_t flags);
 128 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 129                         int attr, int op, ulong_t *lockmap, size_t pos);
 130 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 131                         uint_t *protv);
 132 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 133 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 134 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 135 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 136                         uint_t behav);
 137 static void segspt_shmdump(struct seg *seg);
 138 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 139                         struct page ***, enum lock_type, enum seg_rw);
 140 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
 141 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 142 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 143 static int segspt_shmcapable(struct seg *, segcapability_t);
 144 
 145 struct seg_ops segspt_shmops = {
 146         segspt_shmdup,
 147         segspt_shmunmap,
 148         segspt_shmfree,
 149         segspt_shmfault,
 150         segspt_shmfaulta,
 151         segspt_shmsetprot,
 152         segspt_shmcheckprot,
 153         segspt_shmkluster,
 154         segspt_shmsync,
 155         segspt_shmincore,
 156         segspt_shmlockop,
 157         segspt_shmgetprot,
 158         segspt_shmgetoffset,
 159         segspt_shmgettype,
 160         segspt_shmgetvp,
 161         segspt_shmadvise,       /* advise */
 162         segspt_shmdump,
 163         segspt_shmpagelock,
 164         segspt_shmsetpgsz,
 165         segspt_shmgetmemid,
 166         segspt_shmgetpolicy,
 167         segspt_shmcapable,
 168 };
 169 
 170 static void segspt_purge(struct seg *seg);
 171 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 172                 enum seg_rw, int);
 173 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 174                 page_t **ppa);
 175 
 176 
 177 
 178 /*ARGSUSED*/
 179 int
 180 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 181         uint_t prot, uint_t flags, uint_t share_szc)
 182 {
 183         int     err;
 184         struct  as      *newas;
 185         struct  segspt_crargs sptcargs;
 186 
 187 #ifdef DEBUG
 188         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 189                         tnf_ulong, size, size );
 190 #endif
 191         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 192                 segspt_minfree = availrmem/20;  /* for the system */
 193 
 194         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 195                 return (EINVAL);
 196 
 197         /*
 198          * get a new as for this shared memory segment
 199          */
 200         newas = as_alloc();
 201         newas->a_proc = NULL;
 202         sptcargs.amp = amp;
 203         sptcargs.prot = prot;
 204         sptcargs.flags = flags;
 205         sptcargs.szc = share_szc;
 206         /*
 207          * create a shared page table (spt) segment
 208          */
 209 
 210         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 211                 as_free(newas);
 212                 return (err);
 213         }
 214         *sptseg = sptcargs.seg_spt;
 215         return (0);
 216 }
 217 
 218 void
 219 sptdestroy(struct as *as, struct anon_map *amp)
 220 {
 221 
 222 #ifdef DEBUG
 223         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 224 #endif
 225         (void) as_unmap(as, SEGSPTADDR, amp->size);
 226         as_free(as);
 227 }
 228 
 229 /*
 230  * called from seg_free().
 231  * free (i.e., unlock, unmap, return to free list)
 232  *  all the pages in the given seg.
 233  */
 234 void
 235 segspt_free(struct seg  *seg)
 236 {
 237         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 238 
 239         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 240 
 241         if (sptd != NULL) {
 242                 if (sptd->spt_realsize)
 243                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 244 
 245         if (sptd->spt_ppa_lckcnt)
 246                 kmem_free(sptd->spt_ppa_lckcnt,
 247                     sizeof (*sptd->spt_ppa_lckcnt)
 248                     * btopr(sptd->spt_amp->size));
 249                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 250                 cv_destroy(&sptd->spt_cv);
 251                 mutex_destroy(&sptd->spt_lock);
 252                 kmem_free(sptd, sizeof (*sptd));
 253         }
 254 }
 255 
 256 /*ARGSUSED*/
 257 static int
 258 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 259         uint_t flags)
 260 {
 261         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 262 
 263         return (0);
 264 }
 265 
 266 /*ARGSUSED*/
 267 static size_t
 268 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 269 {
 270         caddr_t eo_seg;
 271         pgcnt_t npages;
 272         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 273         struct seg      *sptseg;
 274         struct spt_data *sptd;
 275 
 276         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 277 #ifdef lint
 278         seg = seg;
 279 #endif
 280         sptseg = shmd->shm_sptseg;
 281         sptd = sptseg->s_data;
 282 
 283         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 284                 eo_seg = addr + len;
 285                 while (addr < eo_seg) {
 286                         /* page exists, and it's locked. */
 287                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 288                             SEG_PAGE_ANON;
 289                         addr += PAGESIZE;
 290                 }
 291                 return (len);
 292         } else {
 293                 struct  anon_map *amp = shmd->shm_amp;
 294                 struct  anon    *ap;
 295                 page_t          *pp;
 296                 pgcnt_t         anon_index;
 297                 struct vnode    *vp;
 298                 u_offset_t      off;
 299                 ulong_t         i;
 300                 int             ret;
 301                 anon_sync_obj_t cookie;
 302 
 303                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 304                 anon_index = seg_page(seg, addr);
 305                 npages = btopr(len);
 306                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 307                         return (EINVAL);
 308                 }
 309                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 310                 for (i = 0; i < npages; i++, anon_index++) {
 311                         ret = 0;
 312                         anon_array_enter(amp, anon_index, &cookie);
 313                         ap = anon_get_ptr(amp->ahp, anon_index);
 314                         if (ap != NULL) {
 315                                 swap_xlate(ap, &vp, &off);
 316                                 anon_array_exit(&cookie);
 317                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 318                                 if (pp != NULL) {
 319                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 320                                         page_unlock(pp);
 321                                 }
 322                         } else {
 323                                 anon_array_exit(&cookie);
 324                         }
 325                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 326                                 ret |= SEG_PAGE_LOCKED;
 327                         }
 328                         *vec++ = (char)ret;
 329                 }
 330                 ANON_LOCK_EXIT(&amp->a_rwlock);
 331                 return (len);
 332         }
 333 }
 334 
 335 static int
 336 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 337 {
 338         size_t share_size;
 339 
 340         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 341 
 342         /*
 343          * seg.s_size may have been rounded up to the largest page size
 344          * in shmat().
 345          * XXX This should be cleanedup. sptdestroy should take a length
 346          * argument which should be the same as sptcreate. Then
 347          * this rounding would not be needed (or is done in shm.c)
 348          * Only the check for full segment will be needed.
 349          *
 350          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 351          * to be useful at all.
 352          */
 353         share_size = page_get_pagesize(seg->s_szc);
 354         ssize = P2ROUNDUP(ssize, share_size);
 355 
 356         if (raddr == seg->s_base && ssize == seg->s_size) {
 357                 seg_free(seg);
 358                 return (0);
 359         } else
 360                 return (EINVAL);
 361 }
 362 
 363 int
 364 segspt_create(struct seg *seg, caddr_t argsp)
 365 {
 366         int             err;
 367         caddr_t         addr = seg->s_base;
 368         struct spt_data *sptd;
 369         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 370         struct anon_map *amp = sptcargs->amp;
 371         struct kshmid   *sp = amp->a_sp;
 372         struct  cred    *cred = CRED();
 373         ulong_t         i, j, anon_index = 0;
 374         pgcnt_t         npages = btopr(amp->size);
 375         struct vnode    *vp;
 376         page_t          **ppa;
 377         uint_t          hat_flags;
 378         size_t          pgsz;
 379         pgcnt_t         pgcnt;
 380         caddr_t         a;
 381         pgcnt_t         pidx;
 382         size_t          sz;
 383         proc_t          *procp = curproc;
 384         rctl_qty_t      lockedbytes = 0;
 385         kproject_t      *proj;
 386 
 387         /*
 388          * We are holding the a_lock on the underlying dummy as,
 389          * so we can make calls to the HAT layer.
 390          */
 391         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 392         ASSERT(sp != NULL);
 393 
 394 #ifdef DEBUG
 395         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 396             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 397 #endif
 398         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 399                 if (err = anon_swap_adjust(npages))
 400                         return (err);
 401         }
 402         err = ENOMEM;
 403 
 404         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 405                 goto out1;
 406 
 407         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 408                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 409                     KM_NOSLEEP)) == NULL)
 410                         goto out2;
 411         }
 412 
 413         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 414 
 415         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 416                 goto out3;
 417 
 418         seg->s_ops = &segspt_ops;
 419         sptd->spt_vp = vp;
 420         sptd->spt_amp = amp;
 421         sptd->spt_prot = sptcargs->prot;
 422         sptd->spt_flags = sptcargs->flags;
 423         seg->s_data = (caddr_t)sptd;
 424         sptd->spt_ppa = NULL;
 425         sptd->spt_ppa_lckcnt = NULL;
 426         seg->s_szc = sptcargs->szc;
 427         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 428         sptd->spt_gen = 0;
 429 
 430         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 431         if (seg->s_szc > amp->a_szc) {
 432                 amp->a_szc = seg->s_szc;
 433         }
 434         ANON_LOCK_EXIT(&amp->a_rwlock);
 435 
 436         /*
 437          * Set policy to affect initial allocation of pages in
 438          * anon_map_createpages()
 439          */
 440         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 441             NULL, 0, ptob(npages));
 442 
 443         if (sptcargs->flags & SHM_PAGEABLE) {
 444                 size_t  share_sz;
 445                 pgcnt_t new_npgs, more_pgs;
 446                 struct anon_hdr *nahp;
 447                 zone_t *zone;
 448 
 449                 share_sz = page_get_pagesize(seg->s_szc);
 450                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 451                         /*
 452                          * We are rounding up the size of the anon array
 453                          * on 4 M boundary because we always create 4 M
 454                          * of page(s) when locking, faulting pages and we
 455                          * don't have to check for all corner cases e.g.
 456                          * if there is enough space to allocate 4 M
 457                          * page.
 458                          */
 459                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 460                         more_pgs = new_npgs - npages;
 461 
 462                         /*
 463                          * The zone will never be NULL, as a fully created
 464                          * shm always has an owning zone.
 465                          */
 466                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 467                         ASSERT(zone != NULL);
 468                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 469                                 err = ENOMEM;
 470                                 goto out4;
 471                         }
 472 
 473                         nahp = anon_create(new_npgs, ANON_SLEEP);
 474                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 475                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 476                             ANON_SLEEP);
 477                         anon_release(amp->ahp, npages);
 478                         amp->ahp = nahp;
 479                         ASSERT(amp->swresv == ptob(npages));
 480                         amp->swresv = amp->size = ptob(new_npgs);
 481                         ANON_LOCK_EXIT(&amp->a_rwlock);
 482                         npages = new_npgs;
 483                 }
 484 
 485                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 486                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 487                 sptd->spt_pcachecnt = 0;
 488                 sptd->spt_realsize = ptob(npages);
 489                 sptcargs->seg_spt = seg;
 490                 return (0);
 491         }
 492 
 493         /*
 494          * get array of pages for each anon slot in amp
 495          */
 496         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 497             seg, addr, S_CREATE, cred)) != 0)
 498                 goto out4;
 499 
 500         mutex_enter(&sp->shm_mlock);
 501 
 502         /* May be partially locked, so, count bytes to charge for locking */
 503         for (i = 0; i < npages; i++)
 504                 if (ppa[i]->p_lckcnt == 0)
 505                         lockedbytes += PAGESIZE;
 506 
 507         proj = sp->shm_perm.ipc_proj;
 508 
 509         if (lockedbytes > 0) {
 510                 mutex_enter(&procp->p_lock);
 511                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 512                         mutex_exit(&procp->p_lock);
 513                         mutex_exit(&sp->shm_mlock);
 514                         for (i = 0; i < npages; i++)
 515                                 page_unlock(ppa[i]);
 516                         err = ENOMEM;
 517                         goto out4;
 518                 }
 519                 mutex_exit(&procp->p_lock);
 520         }
 521 
 522         /*
 523          * addr is initial address corresponding to the first page on ppa list
 524          */
 525         for (i = 0; i < npages; i++) {
 526                 /* attempt to lock all pages */
 527                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 528                         /*
 529                          * if unable to lock any page, unlock all
 530                          * of them and return error
 531                          */
 532                         for (j = 0; j < i; j++)
 533                                 page_pp_unlock(ppa[j], 0, 1);
 534                         for (i = 0; i < npages; i++)
 535                                 page_unlock(ppa[i]);
 536                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 537                         mutex_exit(&sp->shm_mlock);
 538                         err = ENOMEM;
 539                         goto out4;
 540                 }
 541         }
 542         mutex_exit(&sp->shm_mlock);
 543 
 544         /*
 545          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 546          * for the entire life of the segment. For example platforms
 547          * that do not support Dynamic Reconfiguration.
 548          */
 549         hat_flags = HAT_LOAD_SHARE;
 550         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 551                 hat_flags |= HAT_LOAD_LOCK;
 552 
 553         /*
 554          * Load translations one lare page at a time
 555          * to make sure we don't create mappings bigger than
 556          * segment's size code in case underlying pages
 557          * are shared with segvn's segment that uses bigger
 558          * size code than we do.
 559          */
 560         pgsz = page_get_pagesize(seg->s_szc);
 561         pgcnt = page_get_pagecnt(seg->s_szc);
 562         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 563                 sz = MIN(pgsz, ptob(npages - pidx));
 564                 hat_memload_array(seg->s_as->a_hat, a, sz,
 565                     &ppa[pidx], sptd->spt_prot, hat_flags);
 566         }
 567 
 568         /*
 569          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 570          * we will leave the pages locked SE_SHARED for the life
 571          * of the ISM segment. This will prevent any calls to
 572          * hat_pageunload() on this ISM segment for those platforms.
 573          */
 574         if (!(hat_flags & HAT_LOAD_LOCK)) {
 575                 /*
 576                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 577                  * we no longer need to hold the SE_SHARED lock on the pages,
 578                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 579                  * SE_SHARED lock on the pages as necessary.
 580                  */
 581                 for (i = 0; i < npages; i++)
 582                         page_unlock(ppa[i]);
 583         }
 584         sptd->spt_pcachecnt = 0;
 585         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 586         sptd->spt_realsize = ptob(npages);
 587         atomic_add_long(&spt_used, npages);
 588         sptcargs->seg_spt = seg;
 589         return (0);
 590 
 591 out4:
 592         seg->s_data = NULL;
 593         kmem_free(vp, sizeof (*vp));
 594         cv_destroy(&sptd->spt_cv);
 595 out3:
 596         mutex_destroy(&sptd->spt_lock);
 597         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 598                 kmem_free(ppa, (sizeof (*ppa) * npages));
 599 out2:
 600         kmem_free(sptd, sizeof (*sptd));
 601 out1:
 602         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 603                 anon_swap_restore(npages);
 604         return (err);
 605 }
 606 
 607 /*ARGSUSED*/
 608 void
 609 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 610 {
 611         struct page     *pp;
 612         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 613         pgcnt_t         npages;
 614         ulong_t         anon_idx;
 615         struct anon_map *amp;
 616         struct anon     *ap;
 617         struct vnode    *vp;
 618         u_offset_t      off;
 619         uint_t          hat_flags;
 620         int             root = 0;
 621         pgcnt_t         pgs, curnpgs = 0;
 622         page_t          *rootpp;
 623         rctl_qty_t      unlocked_bytes = 0;
 624         kproject_t      *proj;
 625         kshmid_t        *sp;
 626 
 627         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 628 
 629         len = P2ROUNDUP(len, PAGESIZE);
 630 
 631         npages = btop(len);
 632 
 633         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 634         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 635             (sptd->spt_flags & SHM_PAGEABLE)) {
 636                 hat_flags = HAT_UNLOAD_UNMAP;
 637         }
 638 
 639         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 640 
 641         amp = sptd->spt_amp;
 642         if (sptd->spt_flags & SHM_PAGEABLE)
 643                 npages = btop(amp->size);
 644 
 645         ASSERT(amp != NULL);
 646 
 647         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 648                 sp = amp->a_sp;
 649                 proj = sp->shm_perm.ipc_proj;
 650                 mutex_enter(&sp->shm_mlock);
 651         }
 652         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 653                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 654                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 655                                 panic("segspt_free_pages: null app");
 656                                 /*NOTREACHED*/
 657                         }
 658                 } else {
 659                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 660                             == NULL)
 661                                 continue;
 662                 }
 663                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 664                 swap_xlate(ap, &vp, &off);
 665 
 666                 /*
 667                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 668                  * the pages won't be having SE_SHARED lock at this
 669                  * point.
 670                  *
 671                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 672                  * the pages are still held SE_SHARED locked from the
 673                  * original segspt_create()
 674                  *
 675                  * Our goal is to get SE_EXCL lock on each page, remove
 676                  * permanent lock on it and invalidate the page.
 677                  */
 678                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 679                         if (hat_flags == HAT_UNLOAD_UNMAP)
 680                                 pp = page_lookup(vp, off, SE_EXCL);
 681                         else {
 682                                 if ((pp = page_find(vp, off)) == NULL) {
 683                                         panic("segspt_free_pages: "
 684                                             "page not locked");
 685                                         /*NOTREACHED*/
 686                                 }
 687                                 if (!page_tryupgrade(pp)) {
 688                                         page_unlock(pp);
 689                                         pp = page_lookup(vp, off, SE_EXCL);
 690                                 }
 691                         }
 692                         if (pp == NULL) {
 693                                 panic("segspt_free_pages: "
 694                                     "page not in the system");
 695                                 /*NOTREACHED*/
 696                         }
 697                         ASSERT(pp->p_lckcnt > 0);
 698                         page_pp_unlock(pp, 0, 1);
 699                         if (pp->p_lckcnt == 0)
 700                                 unlocked_bytes += PAGESIZE;
 701                 } else {
 702                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 703                                 continue;
 704                 }
 705                 /*
 706                  * It's logical to invalidate the pages here as in most cases
 707                  * these were created by segspt.
 708                  */
 709                 if (pp->p_szc != 0) {
 710                         if (root == 0) {
 711                                 ASSERT(curnpgs == 0);
 712                                 root = 1;
 713                                 rootpp = pp;
 714                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 715                                 ASSERT(pgs > 1);
 716                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 717                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 718                                 curnpgs--;
 719                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 720                                 ASSERT(curnpgs == 1);
 721                                 ASSERT(page_pptonum(pp) ==
 722                                     page_pptonum(rootpp) + (pgs - 1));
 723                                 page_destroy_pages(rootpp);
 724                                 root = 0;
 725                                 curnpgs = 0;
 726                         } else {
 727                                 ASSERT(curnpgs > 1);
 728                                 ASSERT(page_pptonum(pp) ==
 729                                     page_pptonum(rootpp) + (pgs - curnpgs));
 730                                 curnpgs--;
 731                         }
 732                 } else {
 733                         if (root != 0 || curnpgs != 0) {
 734                                 panic("segspt_free_pages: bad large page");
 735                                 /*NOTREACHED*/
 736                         }
 737                         /*
 738                          * Before destroying the pages, we need to take care
 739                          * of the rctl locked memory accounting. For that
 740                          * we need to calculte the unlocked_bytes.
 741                          */
 742                         if (pp->p_lckcnt > 0)
 743                                 unlocked_bytes += PAGESIZE;
 744                         /*LINTED: constant in conditional context */
 745                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 746                 }
 747         }
 748         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 749                 if (unlocked_bytes > 0)
 750                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 751                 mutex_exit(&sp->shm_mlock);
 752         }
 753         if (root != 0 || curnpgs != 0) {
 754                 panic("segspt_free_pages: bad large page");
 755                 /*NOTREACHED*/
 756         }
 757 
 758         /*
 759          * mark that pages have been released
 760          */
 761         sptd->spt_realsize = 0;
 762 
 763         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 764                 atomic_add_long(&spt_used, -npages);
 765                 anon_swap_restore(npages);
 766         }
 767 }
 768 
 769 /*
 770  * Get memory allocation policy info for specified address in given segment
 771  */
 772 static lgrp_mem_policy_info_t *
 773 segspt_getpolicy(struct seg *seg, caddr_t addr)
 774 {
 775         struct anon_map         *amp;
 776         ulong_t                 anon_index;
 777         lgrp_mem_policy_info_t  *policy_info;
 778         struct spt_data         *spt_data;
 779 
 780         ASSERT(seg != NULL);
 781 
 782         /*
 783          * Get anon_map from segspt
 784          *
 785          * Assume that no lock needs to be held on anon_map, since
 786          * it should be protected by its reference count which must be
 787          * nonzero for an existing segment
 788          * Need to grab readers lock on policy tree though
 789          */
 790         spt_data = (struct spt_data *)seg->s_data;
 791         if (spt_data == NULL)
 792                 return (NULL);
 793         amp = spt_data->spt_amp;
 794         ASSERT(amp->refcnt != 0);
 795 
 796         /*
 797          * Get policy info
 798          *
 799          * Assume starting anon index of 0
 800          */
 801         anon_index = seg_page(seg, addr);
 802         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 803 
 804         return (policy_info);
 805 }
 806 
 807 /*
 808  * DISM only.
 809  * Return locked pages over a given range.
 810  *
 811  * We will cache all DISM locked pages and save the pplist for the
 812  * entire segment in the ppa field of the underlying DISM segment structure.
 813  * Later, during a call to segspt_reclaim() we will use this ppa array
 814  * to page_unlock() all of the pages and then we will free this ppa list.
 815  */
 816 /*ARGSUSED*/
 817 static int
 818 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 819     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 820 {
 821         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 822         struct  seg     *sptseg = shmd->shm_sptseg;
 823         struct  spt_data *sptd = sptseg->s_data;
 824         pgcnt_t pg_idx, npages, tot_npages, npgs;
 825         struct  page **pplist, **pl, **ppa, *pp;
 826         struct  anon_map *amp;
 827         spgcnt_t        an_idx;
 828         int     ret = ENOTSUP;
 829         uint_t  pl_built = 0;
 830         struct  anon *ap;
 831         struct  vnode *vp;
 832         u_offset_t off;
 833         pgcnt_t claim_availrmem = 0;
 834         uint_t  szc;
 835 
 836         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 837         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 838 
 839         /*
 840          * We want to lock/unlock the entire ISM segment. Therefore,
 841          * we will be using the underlying sptseg and it's base address
 842          * and length for the caching arguments.
 843          */
 844         ASSERT(sptseg);
 845         ASSERT(sptd);
 846 
 847         pg_idx = seg_page(seg, addr);
 848         npages = btopr(len);
 849 
 850         /*
 851          * check if the request is larger than number of pages covered
 852          * by amp
 853          */
 854         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 855                 *ppp = NULL;
 856                 return (ENOTSUP);
 857         }
 858 
 859         if (type == L_PAGEUNLOCK) {
 860                 ASSERT(sptd->spt_ppa != NULL);
 861 
 862                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 863                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 864 
 865                 /*
 866                  * If someone is blocked while unmapping, we purge
 867                  * segment page cache and thus reclaim pplist synchronously
 868                  * without waiting for seg_pasync_thread. This speeds up
 869                  * unmapping in cases where munmap(2) is called, while
 870                  * raw async i/o is still in progress or where a thread
 871                  * exits on data fault in a multithreaded application.
 872                  */
 873                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 874                     (AS_ISUNMAPWAIT(seg->s_as) &&
 875                     shmd->shm_softlockcnt > 0)) {
 876                         segspt_purge(seg);
 877                 }
 878                 return (0);
 879         }
 880 
 881         /* The L_PAGELOCK case ... */
 882 
 883         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 884                 segspt_purge(seg);
 885                 /*
 886                  * for DISM ppa needs to be rebuild since
 887                  * number of locked pages could be changed
 888                  */
 889                 *ppp = NULL;
 890                 return (ENOTSUP);
 891         }
 892 
 893         /*
 894          * First try to find pages in segment page cache, without
 895          * holding the segment lock.
 896          */
 897         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 898             S_WRITE, SEGP_FORCE_WIRED);
 899         if (pplist != NULL) {
 900                 ASSERT(sptd->spt_ppa != NULL);
 901                 ASSERT(sptd->spt_ppa == pplist);
 902                 ppa = sptd->spt_ppa;
 903                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 904                         if (ppa[an_idx] == NULL) {
 905                                 seg_pinactive(seg, NULL, seg->s_base,
 906                                     sptd->spt_amp->size, ppa,
 907                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 908                                 *ppp = NULL;
 909                                 return (ENOTSUP);
 910                         }
 911                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 912                                 npgs = page_get_pagecnt(szc);
 913                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 914                         } else {
 915                                 an_idx++;
 916                         }
 917                 }
 918                 /*
 919                  * Since we cache the entire DISM segment, we want to
 920                  * set ppp to point to the first slot that corresponds
 921                  * to the requested addr, i.e. pg_idx.
 922                  */
 923                 *ppp = &(sptd->spt_ppa[pg_idx]);
 924                 return (0);
 925         }
 926 
 927         mutex_enter(&sptd->spt_lock);
 928         /*
 929          * try to find pages in segment page cache with mutex
 930          */
 931         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 932             S_WRITE, SEGP_FORCE_WIRED);
 933         if (pplist != NULL) {
 934                 ASSERT(sptd->spt_ppa != NULL);
 935                 ASSERT(sptd->spt_ppa == pplist);
 936                 ppa = sptd->spt_ppa;
 937                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 938                         if (ppa[an_idx] == NULL) {
 939                                 mutex_exit(&sptd->spt_lock);
 940                                 seg_pinactive(seg, NULL, seg->s_base,
 941                                     sptd->spt_amp->size, ppa,
 942                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 943                                 *ppp = NULL;
 944                                 return (ENOTSUP);
 945                         }
 946                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 947                                 npgs = page_get_pagecnt(szc);
 948                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 949                         } else {
 950                                 an_idx++;
 951                         }
 952                 }
 953                 /*
 954                  * Since we cache the entire DISM segment, we want to
 955                  * set ppp to point to the first slot that corresponds
 956                  * to the requested addr, i.e. pg_idx.
 957                  */
 958                 mutex_exit(&sptd->spt_lock);
 959                 *ppp = &(sptd->spt_ppa[pg_idx]);
 960                 return (0);
 961         }
 962         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 963             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 964                 mutex_exit(&sptd->spt_lock);
 965                 *ppp = NULL;
 966                 return (ENOTSUP);
 967         }
 968 
 969         /*
 970          * No need to worry about protections because DISM pages are always rw.
 971          */
 972         pl = pplist = NULL;
 973         amp = sptd->spt_amp;
 974 
 975         /*
 976          * Do we need to build the ppa array?
 977          */
 978         if (sptd->spt_ppa == NULL) {
 979                 pgcnt_t lpg_cnt = 0;
 980 
 981                 pl_built = 1;
 982                 tot_npages = btopr(sptd->spt_amp->size);
 983 
 984                 ASSERT(sptd->spt_pcachecnt == 0);
 985                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 986                 pl = pplist;
 987 
 988                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 989                 for (an_idx = 0; an_idx < tot_npages; ) {
 990                         ap = anon_get_ptr(amp->ahp, an_idx);
 991                         /*
 992                          * Cache only mlocked pages. For large pages
 993                          * if one (constituent) page is mlocked
 994                          * all pages for that large page
 995                          * are cached also. This is for quick
 996                          * lookups of ppa array;
 997                          */
 998                         if ((ap != NULL) && (lpg_cnt != 0 ||
 999                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1000 
1001                                 swap_xlate(ap, &vp, &off);
1002                                 pp = page_lookup(vp, off, SE_SHARED);
1003                                 ASSERT(pp != NULL);
1004                                 if (lpg_cnt == 0) {
1005                                         lpg_cnt++;
1006                                         /*
1007                                          * For a small page, we are done --
1008                                          * lpg_count is reset to 0 below.
1009                                          *
1010                                          * For a large page, we are guaranteed
1011                                          * to find the anon structures of all
1012                                          * constituent pages and a non-zero
1013                                          * lpg_cnt ensures that we don't test
1014                                          * for mlock for these. We are done
1015                                          * when lpg_count reaches (npgs + 1).
1016                                          * If we are not the first constituent
1017                                          * page, restart at the first one.
1018                                          */
1019                                         npgs = page_get_pagecnt(pp->p_szc);
1020                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
1021                                                 an_idx = P2ALIGN(an_idx, npgs);
1022                                                 page_unlock(pp);
1023                                                 continue;
1024                                         }
1025                                 }
1026                                 if (++lpg_cnt > npgs)
1027                                         lpg_cnt = 0;
1028 
1029                                 /*
1030                                  * availrmem is decremented only
1031                                  * for unlocked pages
1032                                  */
1033                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1034                                         claim_availrmem++;
1035                                 pplist[an_idx] = pp;
1036                         }
1037                         an_idx++;
1038                 }
1039                 ANON_LOCK_EXIT(&amp->a_rwlock);
1040 
1041                 if (claim_availrmem) {
1042                         mutex_enter(&freemem_lock);
1043                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1044                                 mutex_exit(&freemem_lock);
1045                                 ret = ENOTSUP;
1046                                 claim_availrmem = 0;
1047                                 goto insert_fail;
1048                         } else {
1049                                 availrmem -= claim_availrmem;
1050                         }
1051                         mutex_exit(&freemem_lock);
1052                 }
1053 
1054                 sptd->spt_ppa = pl;
1055         } else {
1056                 /*
1057                  * We already have a valid ppa[].
1058                  */
1059                 pl = sptd->spt_ppa;
1060         }
1061 
1062         ASSERT(pl != NULL);
1063 
1064         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1065             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1066             segspt_reclaim);
1067         if (ret == SEGP_FAIL) {
1068                 /*
1069                  * seg_pinsert failed. We return
1070                  * ENOTSUP, so that the as_pagelock() code will
1071                  * then try the slower F_SOFTLOCK path.
1072                  */
1073                 if (pl_built) {
1074                         /*
1075                          * No one else has referenced the ppa[].
1076                          * We created it and we need to destroy it.
1077                          */
1078                         sptd->spt_ppa = NULL;
1079                 }
1080                 ret = ENOTSUP;
1081                 goto insert_fail;
1082         }
1083 
1084         /*
1085          * In either case, we increment softlockcnt on the 'real' segment.
1086          */
1087         sptd->spt_pcachecnt++;
1088         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1089 
1090         ppa = sptd->spt_ppa;
1091         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1092                 if (ppa[an_idx] == NULL) {
1093                         mutex_exit(&sptd->spt_lock);
1094                         seg_pinactive(seg, NULL, seg->s_base,
1095                             sptd->spt_amp->size,
1096                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1097                         *ppp = NULL;
1098                         return (ENOTSUP);
1099                 }
1100                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1101                         npgs = page_get_pagecnt(szc);
1102                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1103                 } else {
1104                         an_idx++;
1105                 }
1106         }
1107         /*
1108          * We can now drop the sptd->spt_lock since the ppa[]
1109          * exists and he have incremented pacachecnt.
1110          */
1111         mutex_exit(&sptd->spt_lock);
1112 
1113         /*
1114          * Since we cache the entire segment, we want to
1115          * set ppp to point to the first slot that corresponds
1116          * to the requested addr, i.e. pg_idx.
1117          */
1118         *ppp = &(sptd->spt_ppa[pg_idx]);
1119         return (0);
1120 
1121 insert_fail:
1122         /*
1123          * We will only reach this code if we tried and failed.
1124          *
1125          * And we can drop the lock on the dummy seg, once we've failed
1126          * to set up a new ppa[].
1127          */
1128         mutex_exit(&sptd->spt_lock);
1129 
1130         if (pl_built) {
1131                 if (claim_availrmem) {
1132                         mutex_enter(&freemem_lock);
1133                         availrmem += claim_availrmem;
1134                         mutex_exit(&freemem_lock);
1135                 }
1136 
1137                 /*
1138                  * We created pl and we need to destroy it.
1139                  */
1140                 pplist = pl;
1141                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1142                         if (pplist[an_idx] != NULL)
1143                                 page_unlock(pplist[an_idx]);
1144                 }
1145                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1146         }
1147 
1148         if (shmd->shm_softlockcnt <= 0) {
1149                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1150                         mutex_enter(&seg->s_as->a_contents);
1151                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1152                                 AS_CLRUNMAPWAIT(seg->s_as);
1153                                 cv_broadcast(&seg->s_as->a_cv);
1154                         }
1155                         mutex_exit(&seg->s_as->a_contents);
1156                 }
1157         }
1158         *ppp = NULL;
1159         return (ret);
1160 }
1161 
1162 
1163 
1164 /*
1165  * return locked pages over a given range.
1166  *
1167  * We will cache the entire ISM segment and save the pplist for the
1168  * entire segment in the ppa field of the underlying ISM segment structure.
1169  * Later, during a call to segspt_reclaim() we will use this ppa array
1170  * to page_unlock() all of the pages and then we will free this ppa list.
1171  */
1172 /*ARGSUSED*/
1173 static int
1174 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1175     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1176 {
1177         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1178         struct seg      *sptseg = shmd->shm_sptseg;
1179         struct spt_data *sptd = sptseg->s_data;
1180         pgcnt_t np, page_index, npages;
1181         caddr_t a, spt_base;
1182         struct page **pplist, **pl, *pp;
1183         struct anon_map *amp;
1184         ulong_t anon_index;
1185         int ret = ENOTSUP;
1186         uint_t  pl_built = 0;
1187         struct anon *ap;
1188         struct vnode *vp;
1189         u_offset_t off;
1190 
1191         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1192         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1193 
1194 
1195         /*
1196          * We want to lock/unlock the entire ISM segment. Therefore,
1197          * we will be using the underlying sptseg and it's base address
1198          * and length for the caching arguments.
1199          */
1200         ASSERT(sptseg);
1201         ASSERT(sptd);
1202 
1203         if (sptd->spt_flags & SHM_PAGEABLE) {
1204                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1205         }
1206 
1207         page_index = seg_page(seg, addr);
1208         npages = btopr(len);
1209 
1210         /*
1211          * check if the request is larger than number of pages covered
1212          * by amp
1213          */
1214         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1215                 *ppp = NULL;
1216                 return (ENOTSUP);
1217         }
1218 
1219         if (type == L_PAGEUNLOCK) {
1220 
1221                 ASSERT(sptd->spt_ppa != NULL);
1222 
1223                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1224                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1225 
1226                 /*
1227                  * If someone is blocked while unmapping, we purge
1228                  * segment page cache and thus reclaim pplist synchronously
1229                  * without waiting for seg_pasync_thread. This speeds up
1230                  * unmapping in cases where munmap(2) is called, while
1231                  * raw async i/o is still in progress or where a thread
1232                  * exits on data fault in a multithreaded application.
1233                  */
1234                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1235                         segspt_purge(seg);
1236                 }
1237                 return (0);
1238         }
1239 
1240         /* The L_PAGELOCK case... */
1241 
1242         /*
1243          * First try to find pages in segment page cache, without
1244          * holding the segment lock.
1245          */
1246         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1247             S_WRITE, SEGP_FORCE_WIRED);
1248         if (pplist != NULL) {
1249                 ASSERT(sptd->spt_ppa == pplist);
1250                 ASSERT(sptd->spt_ppa[page_index]);
1251                 /*
1252                  * Since we cache the entire ISM segment, we want to
1253                  * set ppp to point to the first slot that corresponds
1254                  * to the requested addr, i.e. page_index.
1255                  */
1256                 *ppp = &(sptd->spt_ppa[page_index]);
1257                 return (0);
1258         }
1259 
1260         mutex_enter(&sptd->spt_lock);
1261 
1262         /*
1263          * try to find pages in segment page cache
1264          */
1265         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1266             S_WRITE, SEGP_FORCE_WIRED);
1267         if (pplist != NULL) {
1268                 ASSERT(sptd->spt_ppa == pplist);
1269                 /*
1270                  * Since we cache the entire segment, we want to
1271                  * set ppp to point to the first slot that corresponds
1272                  * to the requested addr, i.e. page_index.
1273                  */
1274                 mutex_exit(&sptd->spt_lock);
1275                 *ppp = &(sptd->spt_ppa[page_index]);
1276                 return (0);
1277         }
1278 
1279         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1280             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1281                 mutex_exit(&sptd->spt_lock);
1282                 *ppp = NULL;
1283                 return (ENOTSUP);
1284         }
1285 
1286         /*
1287          * No need to worry about protections because ISM pages
1288          * are always rw.
1289          */
1290         pl = pplist = NULL;
1291 
1292         /*
1293          * Do we need to build the ppa array?
1294          */
1295         if (sptd->spt_ppa == NULL) {
1296                 ASSERT(sptd->spt_ppa == pplist);
1297 
1298                 spt_base = sptseg->s_base;
1299                 pl_built = 1;
1300 
1301                 /*
1302                  * availrmem is decremented once during anon_swap_adjust()
1303                  * and is incremented during the anon_unresv(), which is
1304                  * called from shm_rm_amp() when the segment is destroyed.
1305                  */
1306                 amp = sptd->spt_amp;
1307                 ASSERT(amp != NULL);
1308 
1309                 /* pcachecnt is protected by sptd->spt_lock */
1310                 ASSERT(sptd->spt_pcachecnt == 0);
1311                 pplist = kmem_zalloc(sizeof (page_t *)
1312                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1313                 pl = pplist;
1314 
1315                 anon_index = seg_page(sptseg, spt_base);
1316 
1317                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1318                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1319                     a += PAGESIZE, anon_index++, pplist++) {
1320                         ap = anon_get_ptr(amp->ahp, anon_index);
1321                         ASSERT(ap != NULL);
1322                         swap_xlate(ap, &vp, &off);
1323                         pp = page_lookup(vp, off, SE_SHARED);
1324                         ASSERT(pp != NULL);
1325                         *pplist = pp;
1326                 }
1327                 ANON_LOCK_EXIT(&amp->a_rwlock);
1328 
1329                 if (a < (spt_base + sptd->spt_amp->size)) {
1330                         ret = ENOTSUP;
1331                         goto insert_fail;
1332                 }
1333                 sptd->spt_ppa = pl;
1334         } else {
1335                 /*
1336                  * We already have a valid ppa[].
1337                  */
1338                 pl = sptd->spt_ppa;
1339         }
1340 
1341         ASSERT(pl != NULL);
1342 
1343         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1344             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1345             segspt_reclaim);
1346         if (ret == SEGP_FAIL) {
1347                 /*
1348                  * seg_pinsert failed. We return
1349                  * ENOTSUP, so that the as_pagelock() code will
1350                  * then try the slower F_SOFTLOCK path.
1351                  */
1352                 if (pl_built) {
1353                         /*
1354                          * No one else has referenced the ppa[].
1355                          * We created it and we need to destroy it.
1356                          */
1357                         sptd->spt_ppa = NULL;
1358                 }
1359                 ret = ENOTSUP;
1360                 goto insert_fail;
1361         }
1362 
1363         /*
1364          * In either case, we increment softlockcnt on the 'real' segment.
1365          */
1366         sptd->spt_pcachecnt++;
1367         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1368 
1369         /*
1370          * We can now drop the sptd->spt_lock since the ppa[]
1371          * exists and he have incremented pacachecnt.
1372          */
1373         mutex_exit(&sptd->spt_lock);
1374 
1375         /*
1376          * Since we cache the entire segment, we want to
1377          * set ppp to point to the first slot that corresponds
1378          * to the requested addr, i.e. page_index.
1379          */
1380         *ppp = &(sptd->spt_ppa[page_index]);
1381         return (0);
1382 
1383 insert_fail:
1384         /*
1385          * We will only reach this code if we tried and failed.
1386          *
1387          * And we can drop the lock on the dummy seg, once we've failed
1388          * to set up a new ppa[].
1389          */
1390         mutex_exit(&sptd->spt_lock);
1391 
1392         if (pl_built) {
1393                 /*
1394                  * We created pl and we need to destroy it.
1395                  */
1396                 pplist = pl;
1397                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1398                 while (np) {
1399                         page_unlock(*pplist);
1400                         np--;
1401                         pplist++;
1402                 }
1403                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1404         }
1405         if (shmd->shm_softlockcnt <= 0) {
1406                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1407                         mutex_enter(&seg->s_as->a_contents);
1408                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1409                                 AS_CLRUNMAPWAIT(seg->s_as);
1410                                 cv_broadcast(&seg->s_as->a_cv);
1411                         }
1412                         mutex_exit(&seg->s_as->a_contents);
1413                 }
1414         }
1415         *ppp = NULL;
1416         return (ret);
1417 }
1418 
1419 /*
1420  * purge any cached pages in the I/O page cache
1421  */
1422 static void
1423 segspt_purge(struct seg *seg)
1424 {
1425         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1426 }
1427 
1428 static int
1429 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1430         enum seg_rw rw, int async)
1431 {
1432         struct seg *seg = (struct seg *)ptag;
1433         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1434         struct  seg     *sptseg;
1435         struct  spt_data *sptd;
1436         pgcnt_t npages, i, free_availrmem = 0;
1437         int     done = 0;
1438 
1439 #ifdef lint
1440         addr = addr;
1441 #endif
1442         sptseg = shmd->shm_sptseg;
1443         sptd = sptseg->s_data;
1444         npages = (len >> PAGESHIFT);
1445         ASSERT(npages);
1446         ASSERT(sptd->spt_pcachecnt != 0);
1447         ASSERT(sptd->spt_ppa == pplist);
1448         ASSERT(npages == btopr(sptd->spt_amp->size));
1449         ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1450 
1451         /*
1452          * Acquire the lock on the dummy seg and destroy the
1453          * ppa array IF this is the last pcachecnt.
1454          */
1455         mutex_enter(&sptd->spt_lock);
1456         if (--sptd->spt_pcachecnt == 0) {
1457                 for (i = 0; i < npages; i++) {
1458                         if (pplist[i] == NULL) {
1459                                 continue;
1460                         }
1461                         if (rw == S_WRITE) {
1462                                 hat_setrefmod(pplist[i]);
1463                         } else {
1464                                 hat_setref(pplist[i]);
1465                         }
1466                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1467                             (sptd->spt_ppa_lckcnt[i] == 0))
1468                                 free_availrmem++;
1469                         page_unlock(pplist[i]);
1470                 }
1471                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1472                         mutex_enter(&freemem_lock);
1473                         availrmem += free_availrmem;
1474                         mutex_exit(&freemem_lock);
1475                 }
1476                 /*
1477                  * Since we want to cach/uncache the entire ISM segment,
1478                  * we will track the pplist in a segspt specific field
1479                  * ppa, that is initialized at the time we add an entry to
1480                  * the cache.
1481                  */
1482                 ASSERT(sptd->spt_pcachecnt == 0);
1483                 kmem_free(pplist, sizeof (page_t *) * npages);
1484                 sptd->spt_ppa = NULL;
1485                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1486                 sptd->spt_gen++;
1487                 cv_broadcast(&sptd->spt_cv);
1488                 done = 1;
1489         }
1490         mutex_exit(&sptd->spt_lock);
1491 
1492         /*
1493          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1494          * may not hold AS lock (in this case async argument is not 0). This
1495          * means if softlockcnt drops to 0 after the decrement below address
1496          * space may get freed. We can't allow it since after softlock
1497          * derement to 0 we still need to access as structure for possible
1498          * wakeup of unmap waiters. To prevent the disappearance of as we take
1499          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1500          * this mutex as a barrier to make sure this routine completes before
1501          * segment is freed.
1502          *
1503          * The second complication we have to deal with in async case is a
1504          * possibility of missed wake up of unmap wait thread. When we don't
1505          * hold as lock here we may take a_contents lock before unmap wait
1506          * thread that was first to see softlockcnt was still not 0. As a
1507          * result we'll fail to wake up an unmap wait thread. To avoid this
1508          * race we set nounmapwait flag in as structure if we drop softlockcnt
1509          * to 0 if async is not 0.  unmapwait thread
1510          * will not block if this flag is set.
1511          */
1512         if (async)
1513                 mutex_enter(&shmd->shm_segfree_syncmtx);
1514 
1515         /*
1516          * Now decrement softlockcnt.
1517          */
1518         ASSERT(shmd->shm_softlockcnt > 0);
1519         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
1520 
1521         if (shmd->shm_softlockcnt <= 0) {
1522                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1523                         mutex_enter(&seg->s_as->a_contents);
1524                         if (async)
1525                                 AS_SETNOUNMAPWAIT(seg->s_as);
1526                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1527                                 AS_CLRUNMAPWAIT(seg->s_as);
1528                                 cv_broadcast(&seg->s_as->a_cv);
1529                         }
1530                         mutex_exit(&seg->s_as->a_contents);
1531                 }
1532         }
1533 
1534         if (async)
1535                 mutex_exit(&shmd->shm_segfree_syncmtx);
1536 
1537         return (done);
1538 }
1539 
1540 /*
1541  * Do a F_SOFTUNLOCK call over the range requested.
1542  * The range must have already been F_SOFTLOCK'ed.
1543  *
1544  * The calls to acquire and release the anon map lock mutex were
1545  * removed in order to avoid a deadly embrace during a DR
1546  * memory delete operation.  (Eg. DR blocks while waiting for a
1547  * exclusive lock on a page that is being used for kaio; the
1548  * thread that will complete the kaio and call segspt_softunlock
1549  * blocks on the anon map lock; another thread holding the anon
1550  * map lock blocks on another page lock via the segspt_shmfault
1551  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1552  *
1553  * The appropriateness of the removal is based upon the following:
1554  * 1. If we are holding a segment's reader lock and the page is held
1555  * shared, then the corresponding element in anonmap which points to
1556  * anon struct cannot change and there is no need to acquire the
1557  * anonymous map lock.
1558  * 2. Threads in segspt_softunlock have a reader lock on the segment
1559  * and already have the shared page lock, so we are guaranteed that
1560  * the anon map slot cannot change and therefore can call anon_get_ptr()
1561  * without grabbing the anonymous map lock.
1562  * 3. Threads that softlock a shared page break copy-on-write, even if
1563  * its a read.  Thus cow faults can be ignored with respect to soft
1564  * unlocking, since the breaking of cow means that the anon slot(s) will
1565  * not be shared.
1566  */
1567 static void
1568 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1569         size_t len, enum seg_rw rw)
1570 {
1571         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1572         struct seg      *sptseg;
1573         struct spt_data *sptd;
1574         page_t *pp;
1575         caddr_t adr;
1576         struct vnode *vp;
1577         u_offset_t offset;
1578         ulong_t anon_index;
1579         struct anon_map *amp;           /* XXX - for locknest */
1580         struct anon *ap = NULL;
1581         pgcnt_t npages;
1582 
1583         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1584 
1585         sptseg = shmd->shm_sptseg;
1586         sptd = sptseg->s_data;
1587 
1588         /*
1589          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1590          * and therefore their pages are SE_SHARED locked
1591          * for the entire life of the segment.
1592          */
1593         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1594             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1595                 goto softlock_decrement;
1596         }
1597 
1598         /*
1599          * Any thread is free to do a page_find and
1600          * page_unlock() on the pages within this seg.
1601          *
1602          * We are already holding the as->a_lock on the user's
1603          * real segment, but we need to hold the a_lock on the
1604          * underlying dummy as. This is mostly to satisfy the
1605          * underlying HAT layer.
1606          */
1607         AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1608         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1609         AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1610 
1611         amp = sptd->spt_amp;
1612         ASSERT(amp != NULL);
1613         anon_index = seg_page(sptseg, sptseg_addr);
1614 
1615         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1616                 ap = anon_get_ptr(amp->ahp, anon_index++);
1617                 ASSERT(ap != NULL);
1618                 swap_xlate(ap, &vp, &offset);
1619 
1620                 /*
1621                  * Use page_find() instead of page_lookup() to
1622                  * find the page since we know that it has a
1623                  * "shared" lock.
1624                  */
1625                 pp = page_find(vp, offset);
1626                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1627                 if (pp == NULL) {
1628                         panic("segspt_softunlock: "
1629                             "addr %p, ap %p, vp %p, off %llx",
1630                             (void *)adr, (void *)ap, (void *)vp, offset);
1631                         /*NOTREACHED*/
1632                 }
1633 
1634                 if (rw == S_WRITE) {
1635                         hat_setrefmod(pp);
1636                 } else if (rw != S_OTHER) {
1637                         hat_setref(pp);
1638                 }
1639                 page_unlock(pp);
1640         }
1641 
1642 softlock_decrement:
1643         npages = btopr(len);
1644         ASSERT(shmd->shm_softlockcnt >= npages);
1645         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1646         if (shmd->shm_softlockcnt == 0) {
1647                 /*
1648                  * All SOFTLOCKS are gone. Wakeup any waiting
1649                  * unmappers so they can try again to unmap.
1650                  * Check for waiters first without the mutex
1651                  * held so we don't always grab the mutex on
1652                  * softunlocks.
1653                  */
1654                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1655                         mutex_enter(&seg->s_as->a_contents);
1656                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1657                                 AS_CLRUNMAPWAIT(seg->s_as);
1658                                 cv_broadcast(&seg->s_as->a_cv);
1659                         }
1660                         mutex_exit(&seg->s_as->a_contents);
1661                 }
1662         }
1663 }
1664 
1665 int
1666 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1667 {
1668         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1669         struct shm_data *shmd;
1670         struct anon_map *shm_amp = shmd_arg->shm_amp;
1671         struct spt_data *sptd;
1672         int error = 0;
1673 
1674         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1675 
1676         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1677         if (shmd == NULL)
1678                 return (ENOMEM);
1679 
1680         shmd->shm_sptas = shmd_arg->shm_sptas;
1681         shmd->shm_amp = shm_amp;
1682         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1683 
1684         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1685             NULL, 0, seg->s_size);
1686 
1687         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1688 
1689         seg->s_data = (void *)shmd;
1690         seg->s_ops = &segspt_shmops;
1691         seg->s_szc = shmd->shm_sptseg->s_szc;
1692         sptd = shmd->shm_sptseg->s_data;
1693 
1694         if (sptd->spt_flags & SHM_PAGEABLE) {
1695                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1696                     KM_NOSLEEP)) == NULL) {
1697                         seg->s_data = (void *)NULL;
1698                         kmem_free(shmd, (sizeof (*shmd)));
1699                         return (ENOMEM);
1700                 }
1701                 shmd->shm_lckpgs = 0;
1702                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1703                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1704                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1705                             seg->s_size, seg->s_szc)) != 0) {
1706                                 kmem_free(shmd->shm_vpage,
1707                                     btopr(shm_amp->size));
1708                         }
1709                 }
1710         } else {
1711                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1712                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1713                     seg->s_size, seg->s_szc);
1714         }
1715         if (error) {
1716                 seg->s_szc = 0;
1717                 seg->s_data = (void *)NULL;
1718                 kmem_free(shmd, (sizeof (*shmd)));
1719         } else {
1720                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1721                 shm_amp->refcnt++;
1722                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1723         }
1724         return (error);
1725 }
1726 
1727 int
1728 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1729 {
1730         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1731         int reclaim = 1;
1732 
1733         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1734 retry:
1735         if (shmd->shm_softlockcnt > 0) {
1736                 if (reclaim == 1) {
1737                         segspt_purge(seg);
1738                         reclaim = 0;
1739                         goto retry;
1740                 }
1741                 return (EAGAIN);
1742         }
1743 
1744         if (ssize != seg->s_size) {
1745 #ifdef DEBUG
1746                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1747                     ssize, seg->s_size);
1748 #endif
1749                 return (EINVAL);
1750         }
1751 
1752         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1753             NULL, 0);
1754         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1755 
1756         seg_free(seg);
1757 
1758         return (0);
1759 }
1760 
1761 void
1762 segspt_shmfree(struct seg *seg)
1763 {
1764         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1765         struct anon_map *shm_amp = shmd->shm_amp;
1766 
1767         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1768 
1769         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1770             MC_UNLOCK, NULL, 0);
1771 
1772         /*
1773          * Need to increment refcnt when attaching
1774          * and decrement when detaching because of dup().
1775          */
1776         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1777         shm_amp->refcnt--;
1778         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1779 
1780         if (shmd->shm_vpage) {       /* only for DISM */
1781                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1782                 shmd->shm_vpage = NULL;
1783         }
1784 
1785         /*
1786          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1787          * still working with this segment without holding as lock.
1788          */
1789         ASSERT(shmd->shm_softlockcnt == 0);
1790         mutex_enter(&shmd->shm_segfree_syncmtx);
1791         mutex_destroy(&shmd->shm_segfree_syncmtx);
1792 
1793         kmem_free(shmd, sizeof (*shmd));
1794 }
1795 
1796 /*ARGSUSED*/
1797 int
1798 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1799 {
1800         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1801 
1802         /*
1803          * Shared page table is more than shared mapping.
1804          *  Individual process sharing page tables can't change prot
1805          *  because there is only one set of page tables.
1806          *  This will be allowed after private page table is
1807          *  supported.
1808          */
1809 /* need to return correct status error? */
1810         return (0);
1811 }
1812 
1813 
1814 faultcode_t
1815 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1816     size_t len, enum fault_type type, enum seg_rw rw)
1817 {
1818         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1819         struct  seg             *sptseg = shmd->shm_sptseg;
1820         struct  as              *curspt = shmd->shm_sptas;
1821         struct  spt_data        *sptd = sptseg->s_data;
1822         pgcnt_t npages;
1823         size_t  size;
1824         caddr_t segspt_addr, shm_addr;
1825         page_t  **ppa;
1826         int     i;
1827         ulong_t an_idx = 0;
1828         int     err = 0;
1829         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1830         size_t  pgsz;
1831         pgcnt_t pgcnt;
1832         caddr_t a;
1833         pgcnt_t pidx;
1834 
1835 #ifdef lint
1836         hat = hat;
1837 #endif
1838         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1839 
1840         /*
1841          * Because of the way spt is implemented
1842          * the realsize of the segment does not have to be
1843          * equal to the segment size itself. The segment size is
1844          * often in multiples of a page size larger than PAGESIZE.
1845          * The realsize is rounded up to the nearest PAGESIZE
1846          * based on what the user requested. This is a bit of
1847          * ungliness that is historical but not easily fixed
1848          * without re-designing the higher levels of ISM.
1849          */
1850         ASSERT(addr >= seg->s_base);
1851         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1852                 return (FC_NOMAP);
1853         /*
1854          * For all of the following cases except F_PROT, we need to
1855          * make any necessary adjustments to addr and len
1856          * and get all of the necessary page_t's into an array called ppa[].
1857          *
1858          * The code in shmat() forces base addr and len of ISM segment
1859          * to be aligned to largest page size supported. Therefore,
1860          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1861          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1862          * in large pagesize chunks, or else we will screw up the HAT
1863          * layer by calling hat_memload_array() with differing page sizes
1864          * over a given virtual range.
1865          */
1866         pgsz = page_get_pagesize(sptseg->s_szc);
1867         pgcnt = page_get_pagecnt(sptseg->s_szc);
1868         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1869         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1870         npages = btopr(size);
1871 
1872         /*
1873          * Now we need to convert from addr in segshm to addr in segspt.
1874          */
1875         an_idx = seg_page(seg, shm_addr);
1876         segspt_addr = sptseg->s_base + ptob(an_idx);
1877 
1878         ASSERT((segspt_addr + ptob(npages)) <=
1879             (sptseg->s_base + sptd->spt_realsize));
1880         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1881 
1882         switch (type) {
1883 
1884         case F_SOFTLOCK:
1885 
1886                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1887                 /*
1888                  * Fall through to the F_INVAL case to load up the hat layer
1889                  * entries with the HAT_LOAD_LOCK flag.
1890                  */
1891                 /* FALLTHRU */
1892         case F_INVAL:
1893 
1894                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1895                         return (FC_NOMAP);
1896 
1897                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1898 
1899                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1900                 if (err != 0) {
1901                         if (type == F_SOFTLOCK) {
1902                                 atomic_add_long((ulong_t *)(
1903                                     &(shmd->shm_softlockcnt)), -npages);
1904                         }
1905                         goto dism_err;
1906                 }
1907                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1908                 a = segspt_addr;
1909                 pidx = 0;
1910                 if (type == F_SOFTLOCK) {
1911 
1912                         /*
1913                          * Load up the translation keeping it
1914                          * locked and don't unlock the page.
1915                          */
1916                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1917                                 hat_memload_array(sptseg->s_as->a_hat,
1918                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1919                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1920                         }
1921                 } else {
1922                         if (hat == seg->s_as->a_hat) {
1923 
1924                                 /*
1925                                  * Migrate pages marked for migration
1926                                  */
1927                                 if (lgrp_optimizations())
1928                                         page_migrate(seg, shm_addr, ppa,
1929                                             npages);
1930 
1931                                 /* CPU HAT */
1932                                 for (; pidx < npages;
1933                                     a += pgsz, pidx += pgcnt) {
1934                                         hat_memload_array(sptseg->s_as->a_hat,
1935                                             a, pgsz, &ppa[pidx],
1936                                             sptd->spt_prot,
1937                                             HAT_LOAD_SHARE);
1938                                 }
1939                         } else {
1940                                 /* XHAT. Pass real address */
1941                                 hat_memload_array(hat, shm_addr,
1942                                     size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1943                         }
1944 
1945                         /*
1946                          * And now drop the SE_SHARED lock(s).
1947                          */
1948                         if (dyn_ism_unmap) {
1949                                 for (i = 0; i < npages; i++) {
1950                                         page_unlock(ppa[i]);
1951                                 }
1952                         }
1953                 }
1954 
1955                 if (!dyn_ism_unmap) {
1956                         if (hat_share(seg->s_as->a_hat, shm_addr,
1957                             curspt->a_hat, segspt_addr, ptob(npages),
1958                             seg->s_szc) != 0) {
1959                                 panic("hat_share err in DISM fault");
1960                                 /* NOTREACHED */
1961                         }
1962                         if (type == F_INVAL) {
1963                                 for (i = 0; i < npages; i++) {
1964                                         page_unlock(ppa[i]);
1965                                 }
1966                         }
1967                 }
1968                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1969 dism_err:
1970                 kmem_free(ppa, npages * sizeof (page_t *));
1971                 return (err);
1972 
1973         case F_SOFTUNLOCK:
1974 
1975                 /*
1976                  * This is a bit ugly, we pass in the real seg pointer,
1977                  * but the segspt_addr is the virtual address within the
1978                  * dummy seg.
1979                  */
1980                 segspt_softunlock(seg, segspt_addr, size, rw);
1981                 return (0);
1982 
1983         case F_PROT:
1984 
1985                 /*
1986                  * This takes care of the unusual case where a user
1987                  * allocates a stack in shared memory and a register
1988                  * window overflow is written to that stack page before
1989                  * it is otherwise modified.
1990                  *
1991                  * We can get away with this because ISM segments are
1992                  * always rw. Other than this unusual case, there
1993                  * should be no instances of protection violations.
1994                  */
1995                 return (0);
1996 
1997         default:
1998 #ifdef DEBUG
1999                 panic("segspt_dismfault default type?");
2000 #else
2001                 return (FC_NOMAP);
2002 #endif
2003         }
2004 }
2005 
2006 
2007 faultcode_t
2008 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2009     size_t len, enum fault_type type, enum seg_rw rw)
2010 {
2011         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2012         struct seg              *sptseg = shmd->shm_sptseg;
2013         struct as               *curspt = shmd->shm_sptas;
2014         struct spt_data         *sptd   = sptseg->s_data;
2015         pgcnt_t npages;
2016         size_t size;
2017         caddr_t sptseg_addr, shm_addr;
2018         page_t *pp, **ppa;
2019         int     i;
2020         u_offset_t offset;
2021         ulong_t anon_index = 0;
2022         struct vnode *vp;
2023         struct anon_map *amp;           /* XXX - for locknest */
2024         struct anon *ap = NULL;
2025         size_t          pgsz;
2026         pgcnt_t         pgcnt;
2027         caddr_t         a;
2028         pgcnt_t         pidx;
2029         size_t          sz;
2030 
2031 #ifdef lint
2032         hat = hat;
2033 #endif
2034 
2035         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2036 
2037         if (sptd->spt_flags & SHM_PAGEABLE) {
2038                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2039         }
2040 
2041         /*
2042          * Because of the way spt is implemented
2043          * the realsize of the segment does not have to be
2044          * equal to the segment size itself. The segment size is
2045          * often in multiples of a page size larger than PAGESIZE.
2046          * The realsize is rounded up to the nearest PAGESIZE
2047          * based on what the user requested. This is a bit of
2048          * ungliness that is historical but not easily fixed
2049          * without re-designing the higher levels of ISM.
2050          */
2051         ASSERT(addr >= seg->s_base);
2052         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2053                 return (FC_NOMAP);
2054         /*
2055          * For all of the following cases except F_PROT, we need to
2056          * make any necessary adjustments to addr and len
2057          * and get all of the necessary page_t's into an array called ppa[].
2058          *
2059          * The code in shmat() forces base addr and len of ISM segment
2060          * to be aligned to largest page size supported. Therefore,
2061          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2062          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2063          * in large pagesize chunks, or else we will screw up the HAT
2064          * layer by calling hat_memload_array() with differing page sizes
2065          * over a given virtual range.
2066          */
2067         pgsz = page_get_pagesize(sptseg->s_szc);
2068         pgcnt = page_get_pagecnt(sptseg->s_szc);
2069         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2070         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2071         npages = btopr(size);
2072 
2073         /*
2074          * Now we need to convert from addr in segshm to addr in segspt.
2075          */
2076         anon_index = seg_page(seg, shm_addr);
2077         sptseg_addr = sptseg->s_base + ptob(anon_index);
2078 
2079         /*
2080          * And now we may have to adjust npages downward if we have
2081          * exceeded the realsize of the segment or initial anon
2082          * allocations.
2083          */
2084         if ((sptseg_addr + ptob(npages)) >
2085             (sptseg->s_base + sptd->spt_realsize))
2086                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2087 
2088         npages = btopr(size);
2089 
2090         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2091         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2092 
2093         switch (type) {
2094 
2095         case F_SOFTLOCK:
2096 
2097                 /*
2098                  * availrmem is decremented once during anon_swap_adjust()
2099                  * and is incremented during the anon_unresv(), which is
2100                  * called from shm_rm_amp() when the segment is destroyed.
2101                  */
2102                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2103                 /*
2104                  * Some platforms assume that ISM pages are SE_SHARED
2105                  * locked for the entire life of the segment.
2106                  */
2107                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2108                         return (0);
2109                 /*
2110                  * Fall through to the F_INVAL case to load up the hat layer
2111                  * entries with the HAT_LOAD_LOCK flag.
2112                  */
2113 
2114                 /* FALLTHRU */
2115         case F_INVAL:
2116 
2117                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2118                         return (FC_NOMAP);
2119 
2120                 /*
2121                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2122                  * may still rely on this call to hat_share(). That
2123                  * would imply that those hat's can fault on a
2124                  * HAT_LOAD_LOCK translation, which would seem
2125                  * contradictory.
2126                  */
2127                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2128                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2129                             curspt->a_hat, sptseg->s_base,
2130                             sptseg->s_size, sptseg->s_szc) != 0) {
2131                                 panic("hat_share error in ISM fault");
2132                                 /*NOTREACHED*/
2133                         }
2134                         return (0);
2135                 }
2136                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2137 
2138                 /*
2139                  * I see no need to lock the real seg,
2140                  * here, because all of our work will be on the underlying
2141                  * dummy seg.
2142                  *
2143                  * sptseg_addr and npages now account for large pages.
2144                  */
2145                 amp = sptd->spt_amp;
2146                 ASSERT(amp != NULL);
2147                 anon_index = seg_page(sptseg, sptseg_addr);
2148 
2149                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2150                 for (i = 0; i < npages; i++) {
2151                         ap = anon_get_ptr(amp->ahp, anon_index++);
2152                         ASSERT(ap != NULL);
2153                         swap_xlate(ap, &vp, &offset);
2154                         pp = page_lookup(vp, offset, SE_SHARED);
2155                         ASSERT(pp != NULL);
2156                         ppa[i] = pp;
2157                 }
2158                 ANON_LOCK_EXIT(&amp->a_rwlock);
2159                 ASSERT(i == npages);
2160 
2161                 /*
2162                  * We are already holding the as->a_lock on the user's
2163                  * real segment, but we need to hold the a_lock on the
2164                  * underlying dummy as. This is mostly to satisfy the
2165                  * underlying HAT layer.
2166                  */
2167                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2168                 a = sptseg_addr;
2169                 pidx = 0;
2170                 if (type == F_SOFTLOCK) {
2171                         /*
2172                          * Load up the translation keeping it
2173                          * locked and don't unlock the page.
2174                          */
2175                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2176                                 sz = MIN(pgsz, ptob(npages - pidx));
2177                                 hat_memload_array(sptseg->s_as->a_hat, a,
2178                                     sz, &ppa[pidx], sptd->spt_prot,
2179                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2180                         }
2181                 } else {
2182                         if (hat == seg->s_as->a_hat) {
2183 
2184                                 /*
2185                                  * Migrate pages marked for migration.
2186                                  */
2187                                 if (lgrp_optimizations())
2188                                         page_migrate(seg, shm_addr, ppa,
2189                                             npages);
2190 
2191                                 /* CPU HAT */
2192                                 for (; pidx < npages;
2193                                     a += pgsz, pidx += pgcnt) {
2194                                         sz = MIN(pgsz, ptob(npages - pidx));
2195                                         hat_memload_array(sptseg->s_as->a_hat,
2196                                             a, sz, &ppa[pidx],
2197                                             sptd->spt_prot, HAT_LOAD_SHARE);
2198                                 }
2199                         } else {
2200                                 /* XHAT. Pass real address */
2201                                 hat_memload_array(hat, shm_addr,
2202                                     ptob(npages), ppa, sptd->spt_prot,
2203                                     HAT_LOAD_SHARE);
2204                         }
2205 
2206                         /*
2207                          * And now drop the SE_SHARED lock(s).
2208                          */
2209                         for (i = 0; i < npages; i++)
2210                                 page_unlock(ppa[i]);
2211                 }
2212                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2213 
2214                 kmem_free(ppa, sizeof (page_t *) * npages);
2215                 return (0);
2216         case F_SOFTUNLOCK:
2217 
2218                 /*
2219                  * This is a bit ugly, we pass in the real seg pointer,
2220                  * but the sptseg_addr is the virtual address within the
2221                  * dummy seg.
2222                  */
2223                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2224                 return (0);
2225 
2226         case F_PROT:
2227 
2228                 /*
2229                  * This takes care of the unusual case where a user
2230                  * allocates a stack in shared memory and a register
2231                  * window overflow is written to that stack page before
2232                  * it is otherwise modified.
2233                  *
2234                  * We can get away with this because ISM segments are
2235                  * always rw. Other than this unusual case, there
2236                  * should be no instances of protection violations.
2237                  */
2238                 return (0);
2239 
2240         default:
2241 #ifdef DEBUG
2242                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2243 #endif
2244                 return (FC_NOMAP);
2245         }
2246 }
2247 
2248 /*ARGSUSED*/
2249 static faultcode_t
2250 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2251 {
2252         return (0);
2253 }
2254 
2255 /*ARGSUSED*/
2256 static int
2257 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2258 {
2259         return (0);
2260 }
2261 
2262 /*
2263  * duplicate the shared page tables
2264  */
2265 int
2266 segspt_shmdup(struct seg *seg, struct seg *newseg)
2267 {
2268         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2269         struct anon_map         *amp = shmd->shm_amp;
2270         struct shm_data         *shmd_new;
2271         struct seg              *spt_seg = shmd->shm_sptseg;
2272         struct spt_data         *sptd = spt_seg->s_data;
2273         int                     error = 0;
2274 
2275         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2276 
2277         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2278         newseg->s_data = (void *)shmd_new;
2279         shmd_new->shm_sptas = shmd->shm_sptas;
2280         shmd_new->shm_amp = amp;
2281         shmd_new->shm_sptseg = shmd->shm_sptseg;
2282         newseg->s_ops = &segspt_shmops;
2283         newseg->s_szc = seg->s_szc;
2284         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2285 
2286         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2287         amp->refcnt++;
2288         ANON_LOCK_EXIT(&amp->a_rwlock);
2289 
2290         if (sptd->spt_flags & SHM_PAGEABLE) {
2291                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2292                 shmd_new->shm_lckpgs = 0;
2293                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2294                         if ((error = hat_share(newseg->s_as->a_hat,
2295                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2296                             seg->s_size, seg->s_szc)) != 0) {
2297                                 kmem_free(shmd_new->shm_vpage,
2298                                     btopr(amp->size));
2299                         }
2300                 }
2301                 return (error);
2302         } else {
2303                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2304                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2305                     seg->s_szc));
2306 
2307         }
2308 }
2309 
2310 /*ARGSUSED*/
2311 int
2312 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2313 {
2314         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2315         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2316 
2317         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2318 
2319         /*
2320          * ISM segment is always rw.
2321          */
2322         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2323 }
2324 
2325 /*
2326  * Return an array of locked large pages, for empty slots allocate
2327  * private zero-filled anon pages.
2328  */
2329 static int
2330 spt_anon_getpages(
2331         struct seg *sptseg,
2332         caddr_t sptaddr,
2333         size_t len,
2334         page_t *ppa[])
2335 {
2336         struct  spt_data *sptd = sptseg->s_data;
2337         struct  anon_map *amp = sptd->spt_amp;
2338         enum    seg_rw rw = sptd->spt_prot;
2339         uint_t  szc = sptseg->s_szc;
2340         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2341         pgcnt_t lp_npgs;
2342         caddr_t lp_addr, e_sptaddr;
2343         uint_t  vpprot, ppa_szc = 0;
2344         struct  vpage *vpage = NULL;
2345         ulong_t j, ppa_idx;
2346         int     err, ierr = 0;
2347         pgcnt_t an_idx;
2348         anon_sync_obj_t cookie;
2349         int anon_locked = 0;
2350         pgcnt_t amp_pgs;
2351 
2352 
2353         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2354         ASSERT(len != 0);
2355 
2356         pg_sz = share_sz;
2357         lp_npgs = btop(pg_sz);
2358         lp_addr = sptaddr;
2359         e_sptaddr = sptaddr + len;
2360         an_idx = seg_page(sptseg, sptaddr);
2361         ppa_idx = 0;
2362 
2363         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2364 
2365         amp_pgs = page_get_pagecnt(amp->a_szc);
2366 
2367         /*CONSTCOND*/
2368         while (1) {
2369                 for (; lp_addr < e_sptaddr;
2370                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2371 
2372                         /*
2373                          * If we're currently locked, and we get to a new
2374                          * page, unlock our current anon chunk.
2375                          */
2376                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2377                                 anon_array_exit(&cookie);
2378                                 anon_locked = 0;
2379                         }
2380                         if (!anon_locked) {
2381                                 anon_array_enter(amp, an_idx, &cookie);
2382                                 anon_locked = 1;
2383                         }
2384                         ppa_szc = (uint_t)-1;
2385                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2386                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2387                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2388 
2389                         if (ierr != 0) {
2390                                 if (ierr > 0) {
2391                                         err = FC_MAKE_ERR(ierr);
2392                                         goto lpgs_err;
2393                                 }
2394                                 break;
2395                         }
2396                 }
2397                 if (lp_addr == e_sptaddr) {
2398                         break;
2399                 }
2400                 ASSERT(lp_addr < e_sptaddr);
2401 
2402                 /*
2403                  * ierr == -1 means we failed to allocate a large page.
2404                  * so do a size down operation.
2405                  *
2406                  * ierr == -2 means some other process that privately shares
2407                  * pages with this process has allocated a larger page and we
2408                  * need to retry with larger pages. So do a size up
2409                  * operation. This relies on the fact that large pages are
2410                  * never partially shared i.e. if we share any constituent
2411                  * page of a large page with another process we must share the
2412                  * entire large page. Note this cannot happen for SOFTLOCK
2413                  * case, unless current address (lpaddr) is at the beginning
2414                  * of the next page size boundary because the other process
2415                  * couldn't have relocated locked pages.
2416                  */
2417                 ASSERT(ierr == -1 || ierr == -2);
2418                 if (segvn_anypgsz) {
2419                         ASSERT(ierr == -2 || szc != 0);
2420                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2421                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2422                 } else {
2423                         /*
2424                          * For faults and segvn_anypgsz == 0
2425                          * we need to be careful not to loop forever
2426                          * if existing page is found with szc other
2427                          * than 0 or seg->s_szc. This could be due
2428                          * to page relocations on behalf of DR or
2429                          * more likely large page creation. For this
2430                          * case simply re-size to existing page's szc
2431                          * if returned by anon_map_getpages().
2432                          */
2433                         if (ppa_szc == (uint_t)-1) {
2434                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2435                         } else {
2436                                 ASSERT(ppa_szc <= sptseg->s_szc);
2437                                 ASSERT(ierr == -2 || ppa_szc < szc);
2438                                 ASSERT(ierr == -1 || ppa_szc > szc);
2439                                 szc = ppa_szc;
2440                         }
2441                 }
2442                 pg_sz = page_get_pagesize(szc);
2443                 lp_npgs = btop(pg_sz);
2444                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2445         }
2446         if (anon_locked) {
2447                 anon_array_exit(&cookie);
2448         }
2449         ANON_LOCK_EXIT(&amp->a_rwlock);
2450         return (0);
2451 
2452 lpgs_err:
2453         if (anon_locked) {
2454                 anon_array_exit(&cookie);
2455         }
2456         ANON_LOCK_EXIT(&amp->a_rwlock);
2457         for (j = 0; j < ppa_idx; j++)
2458                 page_unlock(ppa[j]);
2459         return (err);
2460 }
2461 
2462 /*
2463  * count the number of bytes in a set of spt pages that are currently not
2464  * locked
2465  */
2466 static rctl_qty_t
2467 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2468 {
2469         ulong_t i;
2470         rctl_qty_t unlocked = 0;
2471 
2472         for (i = 0; i < npages; i++) {
2473                 if (ppa[i]->p_lckcnt == 0)
2474                         unlocked += PAGESIZE;
2475         }
2476         return (unlocked);
2477 }
2478 
2479 extern  u_longlong_t randtick(void);
2480 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2481 #define NLCK    (NCPU_P2)
2482 /* Random number with a range [0, n-1], n must be power of two */
2483 #define RAND_P2(n)      \
2484         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2485 
2486 int
2487 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2488     page_t **ppa, ulong_t *lockmap, size_t pos,
2489     rctl_qty_t *locked)
2490 {
2491         struct  shm_data *shmd = seg->s_data;
2492         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2493         ulong_t i;
2494         int     kernel;
2495         pgcnt_t nlck = 0;
2496         int     rv = 0;
2497         int     use_reserved = 1;
2498 
2499         /* return the number of bytes actually locked */
2500         *locked = 0;
2501 
2502         /*
2503          * To avoid contention on freemem_lock, availrmem and pages_locked
2504          * global counters are updated only every nlck locked pages instead of
2505          * every time.  Reserve nlck locks up front and deduct from this
2506          * reservation for each page that requires a lock.  When the reservation
2507          * is consumed, reserve again.  nlck is randomized, so the competing
2508          * threads do not fall into a cyclic lock contention pattern. When
2509          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2510          * is used to lock pages.
2511          */
2512         for (i = 0; i < npages; anon_index++, pos++, i++) {
2513                 if (nlck == 0 && use_reserved == 1) {
2514                         nlck = NLCK + RAND_P2(NLCK);
2515                         /* if fewer loops left, decrease nlck */
2516                         nlck = MIN(nlck, npages - i);
2517                         /*
2518                          * Reserve nlck locks up front and deduct from this
2519                          * reservation for each page that requires a lock.  When
2520                          * the reservation is consumed, reserve again.
2521                          */
2522                         mutex_enter(&freemem_lock);
2523                         if ((availrmem - nlck) < pages_pp_maximum) {
2524                                 /* Do not do advance memory reserves */
2525                                 use_reserved = 0;
2526                         } else {
2527                                 availrmem       -= nlck;
2528                                 pages_locked    += nlck;
2529                         }
2530                         mutex_exit(&freemem_lock);
2531                 }
2532                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2533                         if (sptd->spt_ppa_lckcnt[anon_index] <
2534                             (ushort_t)DISM_LOCK_MAX) {
2535                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2536                                     (ushort_t)DISM_LOCK_MAX) {
2537                                         cmn_err(CE_WARN,
2538                                             "DISM page lock limit "
2539                                             "reached on DISM offset 0x%lx\n",
2540                                             anon_index << PAGESHIFT);
2541                                 }
2542                                 kernel = (sptd->spt_ppa &&
2543                                     sptd->spt_ppa[anon_index]);
2544                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2545                                     use_reserved)) {
2546                                         sptd->spt_ppa_lckcnt[anon_index]--;
2547                                         rv = EAGAIN;
2548                                         break;
2549                                 }
2550                                 /* if this is a newly locked page, count it */
2551                                 if (ppa[i]->p_lckcnt == 1) {
2552                                         if (kernel == 0 && use_reserved == 1)
2553                                                 nlck--;
2554                                         *locked += PAGESIZE;
2555                                 }
2556                                 shmd->shm_lckpgs++;
2557                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2558                                 if (lockmap != NULL)
2559                                         BT_SET(lockmap, pos);
2560                         }
2561                 }
2562         }
2563         /* Return unused lock reservation */
2564         if (nlck != 0 && use_reserved == 1) {
2565                 mutex_enter(&freemem_lock);
2566                 availrmem       += nlck;
2567                 pages_locked    -= nlck;
2568                 mutex_exit(&freemem_lock);
2569         }
2570 
2571         return (rv);
2572 }
2573 
2574 int
2575 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2576     rctl_qty_t *unlocked)
2577 {
2578         struct shm_data *shmd = seg->s_data;
2579         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2580         struct anon_map *amp = sptd->spt_amp;
2581         struct anon     *ap;
2582         struct vnode    *vp;
2583         u_offset_t      off;
2584         struct page     *pp;
2585         int             kernel;
2586         anon_sync_obj_t cookie;
2587         ulong_t         i;
2588         pgcnt_t         nlck = 0;
2589         pgcnt_t         nlck_limit = NLCK;
2590 
2591         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2592         for (i = 0; i < npages; i++, anon_index++) {
2593                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2594                         anon_array_enter(amp, anon_index, &cookie);
2595                         ap = anon_get_ptr(amp->ahp, anon_index);
2596                         ASSERT(ap);
2597 
2598                         swap_xlate(ap, &vp, &off);
2599                         anon_array_exit(&cookie);
2600                         pp = page_lookup(vp, off, SE_SHARED);
2601                         ASSERT(pp);
2602                         /*
2603                          * availrmem is decremented only for pages which are not
2604                          * in seg pcache, for pages in seg pcache availrmem was
2605                          * decremented in _dismpagelock()
2606                          */
2607                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2608                         ASSERT(pp->p_lckcnt > 0);
2609 
2610                         /*
2611                          * lock page but do not change availrmem, we do it
2612                          * ourselves every nlck loops.
2613                          */
2614                         page_pp_unlock(pp, 0, 1);
2615                         if (pp->p_lckcnt == 0) {
2616                                 if (kernel == 0)
2617                                         nlck++;
2618                                 *unlocked += PAGESIZE;
2619                         }
2620                         page_unlock(pp);
2621                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2622                         sptd->spt_ppa_lckcnt[anon_index]--;
2623                         shmd->shm_lckpgs--;
2624                 }
2625 
2626                 /*
2627                  * To reduce freemem_lock contention, do not update availrmem
2628                  * until at least NLCK pages have been unlocked.
2629                  * 1. No need to update if nlck is zero
2630                  * 2. Always update if the last iteration
2631                  */
2632                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2633                         mutex_enter(&freemem_lock);
2634                         availrmem       += nlck;
2635                         pages_locked    -= nlck;
2636                         mutex_exit(&freemem_lock);
2637                         nlck = 0;
2638                         nlck_limit = NLCK + RAND_P2(NLCK);
2639                 }
2640         }
2641         ANON_LOCK_EXIT(&amp->a_rwlock);
2642 
2643         return (0);
2644 }
2645 
2646 /*ARGSUSED*/
2647 static int
2648 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2649     int attr, int op, ulong_t *lockmap, size_t pos)
2650 {
2651         struct shm_data *shmd = seg->s_data;
2652         struct seg      *sptseg = shmd->shm_sptseg;
2653         struct spt_data *sptd = sptseg->s_data;
2654         struct kshmid   *sp = sptd->spt_amp->a_sp;
2655         pgcnt_t         npages, a_npages;
2656         page_t          **ppa;
2657         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2658         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2659         size_t          a_len;                  /* aligned len */
2660         size_t          share_sz;
2661         ulong_t         i;
2662         int             sts = 0;
2663         rctl_qty_t      unlocked = 0;
2664         rctl_qty_t      locked = 0;
2665         struct proc     *p = curproc;
2666         kproject_t      *proj;
2667 
2668         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2669         ASSERT(sp != NULL);
2670 
2671         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2672                 return (0);
2673         }
2674 
2675         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2676         an_idx = seg_page(seg, addr);
2677         npages = btopr(len);
2678 
2679         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2680                 return (ENOMEM);
2681         }
2682 
2683         /*
2684          * A shm's project never changes, so no lock needed.
2685          * The shm has a hold on the project, so it will not go away.
2686          * Since we have a mapping to shm within this zone, we know
2687          * that the zone will not go away.
2688          */
2689         proj = sp->shm_perm.ipc_proj;
2690 
2691         if (op == MC_LOCK) {
2692 
2693                 /*
2694                  * Need to align addr and size request if they are not
2695                  * aligned so we can always allocate large page(s) however
2696                  * we only lock what was requested in initial request.
2697                  */
2698                 share_sz = page_get_pagesize(sptseg->s_szc);
2699                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2700                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2701                     share_sz);
2702                 a_npages = btop(a_len);
2703                 a_an_idx = seg_page(seg, a_addr);
2704                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2705                 ppa_idx = an_idx - a_an_idx;
2706 
2707                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2708                     KM_NOSLEEP)) == NULL) {
2709                         return (ENOMEM);
2710                 }
2711 
2712                 /*
2713                  * Don't cache any new pages for IO and
2714                  * flush any cached pages.
2715                  */
2716                 mutex_enter(&sptd->spt_lock);
2717                 if (sptd->spt_ppa != NULL)
2718                         sptd->spt_flags |= DISM_PPA_CHANGED;
2719 
2720                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2721                 if (sts != 0) {
2722                         mutex_exit(&sptd->spt_lock);
2723                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2724                         return (sts);
2725                 }
2726 
2727                 mutex_enter(&sp->shm_mlock);
2728                 /* enforce locked memory rctl */
2729                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2730 
2731                 mutex_enter(&p->p_lock);
2732                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2733                         mutex_exit(&p->p_lock);
2734                         sts = EAGAIN;
2735                 } else {
2736                         mutex_exit(&p->p_lock);
2737                         sts = spt_lockpages(seg, an_idx, npages,
2738                             &ppa[ppa_idx], lockmap, pos, &locked);
2739 
2740                         /*
2741                          * correct locked count if not all pages could be
2742                          * locked
2743                          */
2744                         if ((unlocked - locked) > 0) {
2745                                 rctl_decr_locked_mem(NULL, proj,
2746                                     (unlocked - locked), 0);
2747                         }
2748                 }
2749                 /*
2750                  * unlock pages
2751                  */
2752                 for (i = 0; i < a_npages; i++)
2753                         page_unlock(ppa[i]);
2754                 if (sptd->spt_ppa != NULL)
2755                         sptd->spt_flags |= DISM_PPA_CHANGED;
2756                 mutex_exit(&sp->shm_mlock);
2757                 mutex_exit(&sptd->spt_lock);
2758 
2759                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2760 
2761         } else if (op == MC_UNLOCK) { /* unlock */
2762                 page_t          **ppa;
2763 
2764                 mutex_enter(&sptd->spt_lock);
2765                 if (shmd->shm_lckpgs == 0) {
2766                         mutex_exit(&sptd->spt_lock);
2767                         return (0);
2768                 }
2769                 /*
2770                  * Don't cache new IO pages.
2771                  */
2772                 if (sptd->spt_ppa != NULL)
2773                         sptd->spt_flags |= DISM_PPA_CHANGED;
2774 
2775                 mutex_enter(&sp->shm_mlock);
2776                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2777                 if ((ppa = sptd->spt_ppa) != NULL)
2778                         sptd->spt_flags |= DISM_PPA_CHANGED;
2779                 mutex_exit(&sptd->spt_lock);
2780 
2781                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2782                 mutex_exit(&sp->shm_mlock);
2783 
2784                 if (ppa != NULL)
2785                         seg_ppurge_wiredpp(ppa);
2786         }
2787         return (sts);
2788 }
2789 
2790 /*ARGSUSED*/
2791 int
2792 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2793 {
2794         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2795         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2796         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2797 
2798         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2799 
2800         /*
2801          * ISM segment is always rw.
2802          */
2803         while (--pgno >= 0)
2804                 *protv++ = sptd->spt_prot;
2805         return (0);
2806 }
2807 
2808 /*ARGSUSED*/
2809 u_offset_t
2810 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2811 {
2812         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2813 
2814         /* Offset does not matter in ISM memory */
2815 
2816         return ((u_offset_t)0);
2817 }
2818 
2819 /* ARGSUSED */
2820 int
2821 segspt_shmgettype(struct seg *seg, caddr_t addr)
2822 {
2823         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2824         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2825 
2826         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2827 
2828         /*
2829          * The shared memory mapping is always MAP_SHARED, SWAP is only
2830          * reserved for DISM
2831          */
2832         return (MAP_SHARED |
2833             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2834 }
2835 
2836 /*ARGSUSED*/
2837 int
2838 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2839 {
2840         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2841         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2842 
2843         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2844 
2845         *vpp = sptd->spt_vp;
2846         return (0);
2847 }
2848 
2849 /*
2850  * We need to wait for pending IO to complete to a DISM segment in order for
2851  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2852  * than enough time to wait.
2853  */
2854 static clock_t spt_pcache_wait = 120;
2855 
2856 /*ARGSUSED*/
2857 static int
2858 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2859 {
2860         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2861         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2862         struct anon_map *amp;
2863         pgcnt_t pg_idx;
2864         ushort_t gen;
2865         clock_t end_lbolt;
2866         int writer;
2867         page_t **ppa;
2868 
2869         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2870 
2871         if (behav == MADV_FREE) {
2872                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2873                         return (0);
2874 
2875                 amp = sptd->spt_amp;
2876                 pg_idx = seg_page(seg, addr);
2877 
2878                 mutex_enter(&sptd->spt_lock);
2879                 if ((ppa = sptd->spt_ppa) == NULL) {
2880                         mutex_exit(&sptd->spt_lock);
2881                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2882                         anon_disclaim(amp, pg_idx, len);
2883                         ANON_LOCK_EXIT(&amp->a_rwlock);
2884                         return (0);
2885                 }
2886 
2887                 sptd->spt_flags |= DISM_PPA_CHANGED;
2888                 gen = sptd->spt_gen;
2889 
2890                 mutex_exit(&sptd->spt_lock);
2891 
2892                 /*
2893                  * Purge all DISM cached pages
2894                  */
2895                 seg_ppurge_wiredpp(ppa);
2896 
2897                 /*
2898                  * Drop the AS_LOCK so that other threads can grab it
2899                  * in the as_pageunlock path and hopefully get the segment
2900                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2901                  * to keep this segment resident.
2902                  */
2903                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2904                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
2905                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2906 
2907                 mutex_enter(&sptd->spt_lock);
2908 
2909                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2910 
2911                 /*
2912                  * Try to wait for pages to get kicked out of the seg_pcache.
2913                  */
2914                 while (sptd->spt_gen == gen &&
2915                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2916                     ddi_get_lbolt() < end_lbolt) {
2917                         if (!cv_timedwait_sig(&sptd->spt_cv,
2918                             &sptd->spt_lock, end_lbolt)) {
2919                                 break;
2920                         }
2921                 }
2922 
2923                 mutex_exit(&sptd->spt_lock);
2924 
2925                 /* Regrab the AS_LOCK and release our hold on the segment */
2926                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2927                     writer ? RW_WRITER : RW_READER);
2928                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
2929                 if (shmd->shm_softlockcnt <= 0) {
2930                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2931                                 mutex_enter(&seg->s_as->a_contents);
2932                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2933                                         AS_CLRUNMAPWAIT(seg->s_as);
2934                                         cv_broadcast(&seg->s_as->a_cv);
2935                                 }
2936                                 mutex_exit(&seg->s_as->a_contents);
2937                         }
2938                 }
2939 
2940                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2941                 anon_disclaim(amp, pg_idx, len);
2942                 ANON_LOCK_EXIT(&amp->a_rwlock);
2943         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2944             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2945                 int                     already_set;
2946                 ulong_t                 anon_index;
2947                 lgrp_mem_policy_t       policy;
2948                 caddr_t                 shm_addr;
2949                 size_t                  share_size;
2950                 size_t                  size;
2951                 struct seg              *sptseg = shmd->shm_sptseg;
2952                 caddr_t                 sptseg_addr;
2953 
2954                 /*
2955                  * Align address and length to page size of underlying segment
2956                  */
2957                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2958                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2959                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2960                     share_size);
2961 
2962                 amp = shmd->shm_amp;
2963                 anon_index = seg_page(seg, shm_addr);
2964 
2965                 /*
2966                  * And now we may have to adjust size downward if we have
2967                  * exceeded the realsize of the segment or initial anon
2968                  * allocations.
2969                  */
2970                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2971                 if ((sptseg_addr + size) >
2972                     (sptseg->s_base + sptd->spt_realsize))
2973                         size = (sptseg->s_base + sptd->spt_realsize) -
2974                             sptseg_addr;
2975 
2976                 /*
2977                  * Set memory allocation policy for this segment
2978                  */
2979                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2980                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2981                     NULL, 0, len);
2982 
2983                 /*
2984                  * If random memory allocation policy set already,
2985                  * don't bother reapplying it.
2986                  */
2987                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2988                         return (0);
2989 
2990                 /*
2991                  * Mark any existing pages in the given range for
2992                  * migration, flushing the I/O page cache, and using
2993                  * underlying segment to calculate anon index and get
2994                  * anonmap and vnode pointer from
2995                  */
2996                 if (shmd->shm_softlockcnt > 0)
2997                         segspt_purge(seg);
2998 
2999                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3000         }
3001 
3002         return (0);
3003 }
3004 
3005 /*ARGSUSED*/
3006 void
3007 segspt_shmdump(struct seg *seg)
3008 {
3009         /* no-op for ISM segment */
3010 }
3011 
3012 /*ARGSUSED*/
3013 static faultcode_t
3014 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3015 {
3016         return (ENOTSUP);
3017 }
3018 
3019 /*
3020  * get a memory ID for an addr in a given segment
3021  */
3022 static int
3023 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3024 {
3025         struct shm_data *shmd = (struct shm_data *)seg->s_data;
3026         struct anon     *ap;
3027         size_t          anon_index;
3028         struct anon_map *amp = shmd->shm_amp;
3029         struct spt_data *sptd = shmd->shm_sptseg->s_data;
3030         struct seg      *sptseg = shmd->shm_sptseg;
3031         anon_sync_obj_t cookie;
3032 
3033         anon_index = seg_page(seg, addr);
3034 
3035         if (addr > (seg->s_base + sptd->spt_realsize)) {
3036                 return (EFAULT);
3037         }
3038 
3039         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3040         anon_array_enter(amp, anon_index, &cookie);
3041         ap = anon_get_ptr(amp->ahp, anon_index);
3042         if (ap == NULL) {
3043                 struct page *pp;
3044                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3045 
3046                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3047                 if (pp == NULL) {
3048                         anon_array_exit(&cookie);
3049                         ANON_LOCK_EXIT(&amp->a_rwlock);
3050                         return (ENOMEM);
3051                 }
3052                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3053                 page_unlock(pp);
3054         }
3055         anon_array_exit(&cookie);
3056         ANON_LOCK_EXIT(&amp->a_rwlock);
3057         memidp->val[0] = (uintptr_t)ap;
3058         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3059         return (0);
3060 }
3061 
3062 /*
3063  * Get memory allocation policy info for specified address in given segment
3064  */
3065 static lgrp_mem_policy_info_t *
3066 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3067 {
3068         struct anon_map         *amp;
3069         ulong_t                 anon_index;
3070         lgrp_mem_policy_info_t  *policy_info;
3071         struct shm_data         *shm_data;
3072 
3073         ASSERT(seg != NULL);
3074 
3075         /*
3076          * Get anon_map from segshm
3077          *
3078          * Assume that no lock needs to be held on anon_map, since
3079          * it should be protected by its reference count which must be
3080          * nonzero for an existing segment
3081          * Need to grab readers lock on policy tree though
3082          */
3083         shm_data = (struct shm_data *)seg->s_data;
3084         if (shm_data == NULL)
3085                 return (NULL);
3086         amp = shm_data->shm_amp;
3087         ASSERT(amp->refcnt != 0);
3088 
3089         /*
3090          * Get policy info
3091          *
3092          * Assume starting anon index of 0
3093          */
3094         anon_index = seg_page(seg, addr);
3095         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3096 
3097         return (policy_info);
3098 }
3099 
3100 /*ARGSUSED*/
3101 static int
3102 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3103 {
3104         return (0);
3105 }