Print this page
patch as-lock-macro-simplification
@@ -239,11 +239,11 @@
void
segspt_free(struct seg *seg)
{
struct spt_data *sptd = (struct spt_data *)seg->s_data;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
if (sptd != NULL) {
if (sptd->spt_realsize)
segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
@@ -261,11 +261,11 @@
/*ARGSUSED*/
static int
segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
uint_t flags)
{
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return (0);
}
/*ARGSUSED*/
@@ -276,11 +276,11 @@
pgcnt_t npages;
struct shm_data *shmd = (struct shm_data *)seg->s_data;
struct seg *sptseg;
struct spt_data *sptd;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
#ifdef lint
seg = seg;
#endif
sptseg = shmd->shm_sptseg;
sptd = sptseg->s_data;
@@ -340,11 +340,11 @@
static int
segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
{
size_t share_size;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
/*
* seg.s_size may have been rounded up to the largest page size
* in shmat().
* XXX This should be cleanedup. sptdestroy should take a length
@@ -391,11 +391,11 @@
/*
* We are holding the a_lock on the underlying dummy as,
* so we can make calls to the HAT layer.
*/
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
ASSERT(sp != NULL);
#ifdef DEBUG
TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
@@ -627,11 +627,11 @@
page_t *rootpp;
rctl_qty_t unlocked_bytes = 0;
kproject_t *proj;
kshmid_t *sp;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
len = P2ROUNDUP(len, PAGESIZE);
npages = btop(len);
@@ -836,11 +836,11 @@
struct vnode *vp;
u_offset_t off;
pgcnt_t claim_availrmem = 0;
uint_t szc;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
/*
* We want to lock/unlock the entire ISM segment. Therefore,
* we will be using the underlying sptseg and it's base address
@@ -1191,11 +1191,11 @@
uint_t pl_built = 0;
struct anon *ap;
struct vnode *vp;
u_offset_t off;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
/*
* We want to lock/unlock the entire ISM segment. Therefore,
@@ -1449,11 +1449,11 @@
npages = (len >> PAGESHIFT);
ASSERT(npages);
ASSERT(sptd->spt_pcachecnt != 0);
ASSERT(sptd->spt_ppa == pplist);
ASSERT(npages == btopr(sptd->spt_amp->size));
- ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(async || AS_LOCK_HELD(seg->s_as));
/*
* Acquire the lock on the dummy seg and destroy the
* ppa array IF this is the last pcachecnt.
*/
@@ -1583,11 +1583,11 @@
ulong_t anon_index;
struct anon_map *amp; /* XXX - for locknest */
struct anon *ap = NULL;
pgcnt_t npages;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
sptseg = shmd->shm_sptseg;
sptd = sptseg->s_data;
/*
@@ -1607,13 +1607,13 @@
* We are already holding the as->a_lock on the user's
* real segment, but we need to hold the a_lock on the
* underlying dummy as. This is mostly to satisfy the
* underlying HAT layer.
*/
- AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
+ AS_LOCK_ENTER(sptseg->s_as, RW_READER);
hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
- AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
+ AS_LOCK_EXIT(sptseg->s_as);
amp = sptd->spt_amp;
ASSERT(amp != NULL);
anon_index = seg_page(sptseg, sptseg_addr);
@@ -1674,11 +1674,11 @@
struct shm_data *shmd;
struct anon_map *shm_amp = shmd_arg->shm_amp;
struct spt_data *sptd;
int error = 0;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
if (shmd == NULL)
return (ENOMEM);
@@ -1733,11 +1733,11 @@
segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
{
struct shm_data *shmd = (struct shm_data *)seg->s_data;
int reclaim = 1;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
retry:
if (shmd->shm_softlockcnt > 0) {
if (reclaim == 1) {
segspt_purge(seg);
reclaim = 0;
@@ -1767,11 +1767,11 @@
segspt_shmfree(struct seg *seg)
{
struct shm_data *shmd = (struct shm_data *)seg->s_data;
struct anon_map *shm_amp = shmd->shm_amp;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
(void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
MC_UNLOCK, NULL, 0);
/*
@@ -1800,11 +1800,11 @@
/*ARGSUSED*/
int
segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
{
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
/*
* Shared page table is more than shared mapping.
* Individual process sharing page tables can't change prot
* because there is only one set of page tables.
@@ -1838,11 +1838,11 @@
pgcnt_t pidx;
#ifdef lint
hat = hat;
#endif
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
/*
* Because of the way spt is implemented
* the realsize of the segment does not have to be
* equal to the segment size itself. The segment size is
@@ -1907,11 +1907,11 @@
atomic_add_long((ulong_t *)(
&(shmd->shm_softlockcnt)), -npages);
}
goto dism_err;
}
- AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
+ AS_LOCK_ENTER(sptseg->s_as, RW_READER);
a = segspt_addr;
pidx = 0;
if (type == F_SOFTLOCK) {
/*
@@ -1968,11 +1968,11 @@
for (i = 0; i < npages; i++) {
page_unlock(ppa[i]);
}
}
}
- AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
+ AS_LOCK_EXIT(sptseg->s_as);
dism_err:
kmem_free(ppa, npages * sizeof (page_t *));
return (err);
case F_SOFTUNLOCK:
@@ -2035,11 +2035,11 @@
#ifdef lint
hat = hat;
#endif
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
if (sptd->spt_flags & SHM_PAGEABLE) {
return (segspt_dismfault(hat, seg, addr, len, type, rw));
}
@@ -2167,11 +2167,11 @@
* We are already holding the as->a_lock on the user's
* real segment, but we need to hold the a_lock on the
* underlying dummy as. This is mostly to satisfy the
* underlying HAT layer.
*/
- AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
+ AS_LOCK_ENTER(sptseg->s_as, RW_READER);
a = sptseg_addr;
pidx = 0;
if (type == F_SOFTLOCK) {
/*
* Load up the translation keeping it
@@ -2212,11 +2212,11 @@
* And now drop the SE_SHARED lock(s).
*/
for (i = 0; i < npages; i++)
page_unlock(ppa[i]);
}
- AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
+ AS_LOCK_EXIT(sptseg->s_as);
kmem_free(ppa, sizeof (page_t *) * npages);
return (0);
case F_SOFTUNLOCK:
@@ -2282,11 +2282,11 @@
struct shm_data *shmd_new;
struct seg *spt_seg = shmd->shm_sptseg;
struct spt_data *sptd = spt_seg->s_data;
int error = 0;
- ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
newseg->s_data = (void *)shmd_new;
shmd_new->shm_sptas = shmd->shm_sptas;
shmd_new->shm_amp = amp;
@@ -2324,11 +2324,11 @@
segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
{
struct shm_data *shmd = (struct shm_data *)seg->s_data;
struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
/*
* ISM segment is always rw.
*/
return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
@@ -2675,11 +2675,11 @@
rctl_qty_t unlocked = 0;
rctl_qty_t locked = 0;
struct proc *p = curproc;
kproject_t *proj;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
ASSERT(sp != NULL);
if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
return (0);
}
@@ -2805,11 +2805,11 @@
{
struct shm_data *shmd = (struct shm_data *)seg->s_data;
struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
/*
* ISM segment is always rw.
*/
while (--pgno >= 0)
@@ -2819,11 +2819,11 @@
/*ARGSUSED*/
u_offset_t
segspt_shmgetoffset(struct seg *seg, caddr_t addr)
{
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
/* Offset does not matter in ISM memory */
return ((u_offset_t)0);
}
@@ -2833,11 +2833,11 @@
segspt_shmgettype(struct seg *seg, caddr_t addr)
{
struct shm_data *shmd = (struct shm_data *)seg->s_data;
struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
/*
* The shared memory mapping is always MAP_SHARED, SWAP is only
* reserved for DISM
*/
@@ -2850,11 +2850,11 @@
segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
{
struct shm_data *shmd = (struct shm_data *)seg->s_data;
struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
*vpp = sptd->spt_vp;
return (0);
}
@@ -2876,11 +2876,11 @@
ushort_t gen;
clock_t end_lbolt;
int writer;
page_t **ppa;
- ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
+ ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
if (behav == MADV_FREE) {
if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
return (0);
@@ -2910,13 +2910,13 @@
* Drop the AS_LOCK so that other threads can grab it
* in the as_pageunlock path and hopefully get the segment
* kicked out of the seg_pcache. We bump the shm_softlockcnt
* to keep this segment resident.
*/
- writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
+ writer = AS_WRITE_HELD(seg->s_as);
atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
- AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
+ AS_LOCK_EXIT(seg->s_as);
mutex_enter(&sptd->spt_lock);
end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
@@ -2933,12 +2933,11 @@
}
mutex_exit(&sptd->spt_lock);
/* Regrab the AS_LOCK and release our hold on the segment */
- AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
- writer ? RW_WRITER : RW_READER);
+ AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
if (shmd->shm_softlockcnt <= 0) {
if (AS_ISUNMAPWAIT(seg->s_as)) {
mutex_enter(&seg->s_as->a_contents);
if (AS_ISUNMAPWAIT(seg->s_as)) {