Print this page
patch as-lock-macro-simplification

*** 258,268 **** * the htable_steal() code. */ if (can_steal_post_boot == 0) can_steal_post_boot = 1; ! ASSERT(AS_WRITE_HELD(as, &as->a_lock)); hat = kmem_cache_alloc(hat_cache, KM_SLEEP); hat->hat_as = as; mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); ASSERT(hat->hat_flags == 0); --- 258,268 ---- * the htable_steal() code. */ if (can_steal_post_boot == 0) can_steal_post_boot = 1; ! ASSERT(AS_WRITE_HELD(as)); hat = kmem_cache_alloc(hat_cache, KM_SLEEP); hat->hat_as = as; mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); ASSERT(hat->hat_flags == 0);
*** 391,401 **** */ /*ARGSUSED*/ void hat_free_start(hat_t *hat) { ! ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); /* * If the hat is currently a stealing victim, wait for the stealing * to finish. Once we mark it as HAT_FREEING, htable_steal() * won't look at its pagetables anymore. --- 391,401 ---- */ /*ARGSUSED*/ void hat_free_start(hat_t *hat) { ! ASSERT(AS_WRITE_HELD(hat->hat_as)); /* * If the hat is currently a stealing victim, wait for the stealing * to finish. Once we mark it as HAT_FREEING, htable_steal() * won't look at its pagetables anymore.
*** 724,739 **** } /* * Set up the kernel's hat */ ! AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); kas.a_hat->hat_as = &kas; kas.a_hat->hat_flags = 0; ! AS_LOCK_EXIT(&kas, &kas.a_lock); CPUSET_ZERO(khat_cpuset); CPUSET_ADD(khat_cpuset, CPU->cpu_id); /* --- 724,739 ---- } /* * Set up the kernel's hat */ ! AS_LOCK_ENTER(&kas, RW_WRITER); kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); kas.a_hat->hat_as = &kas; kas.a_hat->hat_flags = 0; ! AS_LOCK_EXIT(&kas); CPUSET_ZERO(khat_cpuset); CPUSET_ADD(khat_cpuset, CPU->cpu_id); /*
*** 1155,1165 **** * Instead we'll walk through all the address space and unload * any mappings which we are sure are not shared, not locked. */ ASSERT(IS_PAGEALIGNED(vaddr)); ASSERT(IS_PAGEALIGNED(eaddr)); ! ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) eaddr = (uintptr_t)hat->hat_as->a_userlimit; while (vaddr < eaddr) { (void) htable_walk(hat, &ht, &vaddr, eaddr); --- 1155,1165 ---- * Instead we'll walk through all the address space and unload * any mappings which we are sure are not shared, not locked. */ ASSERT(IS_PAGEALIGNED(vaddr)); ASSERT(IS_PAGEALIGNED(eaddr)); ! ASSERT(AS_LOCK_HELD(hat->hat_as)); if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) eaddr = (uintptr_t)hat->hat_as->a_userlimit; while (vaddr < eaddr) { (void) htable_walk(hat, &ht, &vaddr, eaddr);
*** 1436,1447 **** * early before we blow out the kernel stack. */ ++curthread->t_hatdepth; ASSERT(curthread->t_hatdepth < 16); ! ASSERT(hat == kas.a_hat || ! AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); if (flags & HAT_LOAD_SHARE) hat->hat_flags |= HAT_SHARED; /* --- 1436,1446 ---- * early before we blow out the kernel stack. */ ++curthread->t_hatdepth; ASSERT(curthread->t_hatdepth < 16); ! ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); if (flags & HAT_LOAD_SHARE) hat->hat_flags |= HAT_SHARED; /*
*** 1585,1596 **** pfn_t pfn = page_pptonum(pp); XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || va < _userlimit); ! ASSERT(hat == kas.a_hat || ! AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); ASSERT((flags & supported_memload_flags) == flags); ASSERT(!IN_VA_HOLE(va)); ASSERT(!PP_ISFREE(pp)); --- 1584,1594 ---- pfn_t pfn = page_pptonum(pp); XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || va < _userlimit); ! ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); ASSERT((flags & supported_memload_flags) == flags); ASSERT(!IN_VA_HOLE(va)); ASSERT(!PP_ISFREE(pp));
*** 1643,1654 **** pgcnt_t i; XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || va + len <= _userlimit); ! ASSERT(hat == kas.a_hat || ! AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); ASSERT((flags & supported_memload_flags) == flags); /* * memload is used for memory with full caching enabled, so * set HAT_STORECACHING_OK. --- 1641,1651 ---- pgcnt_t i; XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || va + len <= _userlimit); ! ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); ASSERT((flags & supported_memload_flags) == flags); /* * memload is used for memory with full caching enabled, so * set HAT_STORECACHING_OK.
*** 1779,1790 **** uint_t a; /* per PTE copy of attr */ XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || eva <= _userlimit); ! ASSERT(hat == kas.a_hat || ! AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); ASSERT((flags & supported_devload_flags) == flags); /* * handle all pages */ --- 1776,1786 ---- uint_t a; /* per PTE copy of attr */ XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || eva <= _userlimit); ! ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); ASSERT((flags & supported_devload_flags) == flags); /* * handle all pages */
*** 1888,1898 **** return; if (eaddr > _userlimit) panic("hat_unlock() address out of range - above _userlimit"); XPV_DISALLOW_MIGRATE(); ! ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); while (vaddr < eaddr) { (void) htable_walk(hat, &ht, &vaddr, eaddr); if (ht == NULL) break; --- 1884,1894 ---- return; if (eaddr > _userlimit) panic("hat_unlock() address out of range - above _userlimit"); XPV_DISALLOW_MIGRATE(); ! ASSERT(AS_LOCK_HELD(hat->hat_as)); while (vaddr < eaddr) { (void) htable_walk(hat, &ht, &vaddr, eaddr); if (ht == NULL) break;
*** 2643,2654 **** page_t *pp; XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(vaddr)); ASSERT(IS_PAGEALIGNED(eaddr)); ! ASSERT(hat == kas.a_hat || ! AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { try_again: oldpte = htable_walk(hat, &ht, &vaddr, eaddr); if (ht == NULL) break; --- 2639,2649 ---- page_t *pp; XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(vaddr)); ASSERT(IS_PAGEALIGNED(eaddr)); ! ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { try_again: oldpte = htable_walk(hat, &ht, &vaddr, eaddr); if (ht == NULL) break;
*** 2854,2865 **** uint_t entry; htable_t *ht; pgcnt_t pg_off; ASSERT(hat == kas.a_hat || vaddr <= _userlimit); ! ASSERT(hat == kas.a_hat || ! AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); if (IN_VA_HOLE(vaddr)) return (0); /* * Most common use of hat_probe is from segmap. We special case it --- 2849,2859 ---- uint_t entry; htable_t *ht; pgcnt_t pg_off; ASSERT(hat == kas.a_hat || vaddr <= _userlimit); ! ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); if (IN_VA_HOLE(vaddr)) return (0); /* * Most common use of hat_probe is from segmap. We special case it