Print this page
5042 stop using deprecated atomic functions


3661                  * if this is a unused hblk it was just allocated or could
3662                  * potentially be a previous large page hblk so we need to
3663                  * set the shadow bit.
3664                  */
3665                 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3666                 hmeblkp->hblk_shw_bit = 1;
3667         } else if (hmeblkp->hblk_shw_bit == 0) {
3668                 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3669                     (void *)hmeblkp);
3670         }
3671         ASSERT(hmeblkp->hblk_shw_bit == 1);
3672         ASSERT(!hmeblkp->hblk_shared);
3673         vshift = vaddr_to_vshift(hblktag, vaddr, size);
3674         ASSERT(vshift < 8);
3675         /*
3676          * Atomically set shw mask bit
3677          */
3678         do {
3679                 shw_mask = hmeblkp->hblk_shw_mask;
3680                 newshw_mask = shw_mask | (1 << vshift);
3681                 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask,
3682                     newshw_mask);
3683         } while (newshw_mask != shw_mask);
3684 
3685         SFMMU_HASH_UNLOCK(hmebp);
3686 
3687         return (hmeblkp);
3688 }
3689 
3690 /*
3691  * This routine cleanup a previous shadow hmeblk and changes it to
3692  * a regular hblk.  This happens rarely but it is possible
3693  * when a process wants to use large pages and there are hblks still
3694  * lying around from the previous as that used these hmeblks.
3695  * The alternative was to cleanup the shadow hblks at unload time
3696  * but since so few user processes actually use large pages, it is
3697  * better to be lazy and cleanup at this time.
3698  */
3699 static void
3700 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3701         struct hmehash_bucket *hmebp)


11661         ASSERT(hmeblkp->hblk_lckcnt == 0);
11662         ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11663 
11664         sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11665         hmeblkp->hblk_nextpa = hblkpa;
11666 
11667         shw_hblkp = hmeblkp->hblk_shadow;
11668         if (shw_hblkp) {
11669                 ASSERT(!hmeblkp->hblk_shared);
11670                 shw_size = get_hblk_ttesz(shw_hblkp);
11671                 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11672                 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11673                 ASSERT(vshift < 8);
11674                 /*
11675                  * Atomically clear shadow mask bit
11676                  */
11677                 do {
11678                         shw_mask = shw_hblkp->hblk_shw_mask;
11679                         ASSERT(shw_mask & (1 << vshift));
11680                         newshw_mask = shw_mask & ~(1 << vshift);
11681                         newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
11682                             shw_mask, newshw_mask);
11683                 } while (newshw_mask != shw_mask);
11684                 hmeblkp->hblk_shadow = NULL;
11685         }
11686 
11687         /*
11688          * remove shadow bit if we are stealing an unused shadow hmeblk.
11689          * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11690          * we are indeed allocating a shadow hmeblk.
11691          */
11692         hmeblkp->hblk_shw_bit = 0;
11693 
11694         if (hmeblkp->hblk_shared) {
11695                 sf_srd_t        *srdp;
11696                 sf_region_t     *rgnp;
11697                 uint_t          rid;
11698 
11699                 srdp = hblktosrd(hmeblkp);
11700                 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11701                 rid = hmeblkp->hblk_tag.htag_rid;


15740                 ASSERT(!hmeblkp->hblk_shared);
15741 #ifdef  DEBUG
15742                 if (mmu_page_sizes == max_mmu_page_sizes) {
15743                         ASSERT(size < TTE256M);
15744                 } else {
15745                         ASSERT(size < TTE4M);
15746                 }
15747 #endif /* DEBUG */
15748 
15749                 shw_size = get_hblk_ttesz(shw_hblkp);
15750                 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15751                 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15752                 ASSERT(vshift < 8);
15753                 /*
15754                  * Atomically clear shadow mask bit
15755                  */
15756                 do {
15757                         shw_mask = shw_hblkp->hblk_shw_mask;
15758                         ASSERT(shw_mask & (1 << vshift));
15759                         newshw_mask = shw_mask & ~(1 << vshift);
15760                         newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
15761                             shw_mask, newshw_mask);
15762                 } while (newshw_mask != shw_mask);
15763                 hmeblkp->hblk_shadow = NULL;
15764         }
15765         hmeblkp->hblk_shw_bit = 0;
15766 
15767         if (hmeblkp->hblk_shared) {
15768 #ifdef  DEBUG
15769                 sf_srd_t        *srdp;
15770                 sf_region_t     *rgnp;
15771                 uint_t          rid;
15772 
15773                 srdp = hblktosrd(hmeblkp);
15774                 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15775                 rid = hmeblkp->hblk_tag.htag_rid;
15776                 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15777                 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15778                 rgnp = srdp->srd_hmergnp[rid];
15779                 ASSERT(rgnp != NULL);
15780                 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);




3661                  * if this is a unused hblk it was just allocated or could
3662                  * potentially be a previous large page hblk so we need to
3663                  * set the shadow bit.
3664                  */
3665                 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3666                 hmeblkp->hblk_shw_bit = 1;
3667         } else if (hmeblkp->hblk_shw_bit == 0) {
3668                 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3669                     (void *)hmeblkp);
3670         }
3671         ASSERT(hmeblkp->hblk_shw_bit == 1);
3672         ASSERT(!hmeblkp->hblk_shared);
3673         vshift = vaddr_to_vshift(hblktag, vaddr, size);
3674         ASSERT(vshift < 8);
3675         /*
3676          * Atomically set shw mask bit
3677          */
3678         do {
3679                 shw_mask = hmeblkp->hblk_shw_mask;
3680                 newshw_mask = shw_mask | (1 << vshift);
3681                 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3682                     newshw_mask);
3683         } while (newshw_mask != shw_mask);
3684 
3685         SFMMU_HASH_UNLOCK(hmebp);
3686 
3687         return (hmeblkp);
3688 }
3689 
3690 /*
3691  * This routine cleanup a previous shadow hmeblk and changes it to
3692  * a regular hblk.  This happens rarely but it is possible
3693  * when a process wants to use large pages and there are hblks still
3694  * lying around from the previous as that used these hmeblks.
3695  * The alternative was to cleanup the shadow hblks at unload time
3696  * but since so few user processes actually use large pages, it is
3697  * better to be lazy and cleanup at this time.
3698  */
3699 static void
3700 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3701         struct hmehash_bucket *hmebp)


11661         ASSERT(hmeblkp->hblk_lckcnt == 0);
11662         ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11663 
11664         sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11665         hmeblkp->hblk_nextpa = hblkpa;
11666 
11667         shw_hblkp = hmeblkp->hblk_shadow;
11668         if (shw_hblkp) {
11669                 ASSERT(!hmeblkp->hblk_shared);
11670                 shw_size = get_hblk_ttesz(shw_hblkp);
11671                 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11672                 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11673                 ASSERT(vshift < 8);
11674                 /*
11675                  * Atomically clear shadow mask bit
11676                  */
11677                 do {
11678                         shw_mask = shw_hblkp->hblk_shw_mask;
11679                         ASSERT(shw_mask & (1 << vshift));
11680                         newshw_mask = shw_mask & ~(1 << vshift);
11681                         newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11682                             shw_mask, newshw_mask);
11683                 } while (newshw_mask != shw_mask);
11684                 hmeblkp->hblk_shadow = NULL;
11685         }
11686 
11687         /*
11688          * remove shadow bit if we are stealing an unused shadow hmeblk.
11689          * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11690          * we are indeed allocating a shadow hmeblk.
11691          */
11692         hmeblkp->hblk_shw_bit = 0;
11693 
11694         if (hmeblkp->hblk_shared) {
11695                 sf_srd_t        *srdp;
11696                 sf_region_t     *rgnp;
11697                 uint_t          rid;
11698 
11699                 srdp = hblktosrd(hmeblkp);
11700                 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11701                 rid = hmeblkp->hblk_tag.htag_rid;


15740                 ASSERT(!hmeblkp->hblk_shared);
15741 #ifdef  DEBUG
15742                 if (mmu_page_sizes == max_mmu_page_sizes) {
15743                         ASSERT(size < TTE256M);
15744                 } else {
15745                         ASSERT(size < TTE4M);
15746                 }
15747 #endif /* DEBUG */
15748 
15749                 shw_size = get_hblk_ttesz(shw_hblkp);
15750                 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15751                 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15752                 ASSERT(vshift < 8);
15753                 /*
15754                  * Atomically clear shadow mask bit
15755                  */
15756                 do {
15757                         shw_mask = shw_hblkp->hblk_shw_mask;
15758                         ASSERT(shw_mask & (1 << vshift));
15759                         newshw_mask = shw_mask & ~(1 << vshift);
15760                         newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15761                             shw_mask, newshw_mask);
15762                 } while (newshw_mask != shw_mask);
15763                 hmeblkp->hblk_shadow = NULL;
15764         }
15765         hmeblkp->hblk_shw_bit = 0;
15766 
15767         if (hmeblkp->hblk_shared) {
15768 #ifdef  DEBUG
15769                 sf_srd_t        *srdp;
15770                 sf_region_t     *rgnp;
15771                 uint_t          rid;
15772 
15773                 srdp = hblktosrd(hmeblkp);
15774                 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15775                 rid = hmeblkp->hblk_tag.htag_rid;
15776                 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15777                 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15778                 rgnp = srdp->srd_hmergnp[rid];
15779                 ASSERT(rgnp != NULL);
15780                 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);