Print this page
patch vm-cleanup


1959                 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1960                         if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1961                                 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1962                                     SFMMU_L2_HMERLINKS_SIZE);
1963                                 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1964                         }
1965                 }
1966         }
1967         sfmmu_free_sfmmu(sfmmup);
1968 
1969 #ifdef DEBUG
1970         for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971                 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972         }
1973 #endif
1974 
1975         kmem_cache_free(sfmmuid_cache, sfmmup);
1976 }
1977 
1978 /*
1979  * Set up any translation structures, for the specified address space,
1980  * that are needed or preferred when the process is being swapped in.
1981  */
1982 /* ARGSUSED */
1983 void
1984 hat_swapin(struct hat *hat)
1985 {
1986         ASSERT(hat->sfmmu_xhat_provider == NULL);
1987 }
1988 
1989 /*
1990  * Free all of the translation resources, for the specified address space,
1991  * that can be freed while the process is swapped out. Called from as_swapout.
1992  * Also, free up the ctx that this process was using.
1993  */
1994 void
1995 hat_swapout(struct hat *sfmmup)
1996 {
1997         struct hmehash_bucket *hmebp;
1998         struct hme_blk *hmeblkp;
1999         struct hme_blk *pr_hblk = NULL;
2000         struct hme_blk *nx_hblk;
2001         int i;
2002         struct hme_blk *list = NULL;
2003         hatlock_t *hatlockp;
2004         struct tsb_info *tsbinfop;
2005         struct free_tsb {
2006                 struct free_tsb *next;
2007                 struct tsb_info *tsbinfop;
2008         };                      /* free list of TSBs */
2009         struct free_tsb *freelist, *last, *next;
2010 
2011         ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012         SFMMU_STAT(sf_swapout);
2013 
2014         /*
2015          * There is no way to go from an as to all its translations in sfmmu.
2016          * Here is one of the times when we take the big hit and traverse
2017          * the hash looking for hme_blks to free up.  Not only do we free up
2018          * this as hme_blks but all those that are free.  We are obviously
2019          * swapping because we need memory so let's free up as much
2020          * as we can.
2021          *
2022          * Note that we don't flush TLB/TSB here -- it's not necessary
2023          * because:
2024          *  1) we free the ctx we're using and throw away the TSB(s);
2025          *  2) processes aren't runnable while being swapped out.
2026          */
2027         ASSERT(sfmmup != KHATID);
2028         for (i = 0; i <= UHMEHASH_SZ; i++) {
2029                 hmebp = &uhme_hash[i];
2030                 SFMMU_HASH_LOCK(hmebp);
2031                 hmeblkp = hmebp->hmeblkp;
2032                 pr_hblk = NULL;
2033                 while (hmeblkp) {
2034 
2035                         ASSERT(!hmeblkp->hblk_xhat_bit);
2036 
2037                         if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038                             !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039                                 ASSERT(!hmeblkp->hblk_shared);
2040                                 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041                                     (caddr_t)get_hblk_base(hmeblkp),
2042                                     get_hblk_endaddr(hmeblkp),
2043                                     NULL, HAT_UNLOAD);
2044                         }
2045                         nx_hblk = hmeblkp->hblk_next;
2046                         if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2047                                 ASSERT(!hmeblkp->hblk_lckcnt);
2048                                 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2049                                     &list, 0);
2050                         } else {
2051                                 pr_hblk = hmeblkp;
2052                         }
2053                         hmeblkp = nx_hblk;
2054                 }
2055                 SFMMU_HASH_UNLOCK(hmebp);
2056         }
2057 
2058         sfmmu_hblks_list_purge(&list, 0);
2059 
2060         /*
2061          * Now free up the ctx so that others can reuse it.
2062          */
2063         hatlockp = sfmmu_hat_enter(sfmmup);
2064 
2065         sfmmu_invalidate_ctx(sfmmup);
2066 
2067         /*
2068          * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2069          * If TSBs were never swapped in, just return.
2070          * This implies that we don't support partial swapping
2071          * of TSBs -- either all are swapped out, or none are.
2072          *
2073          * We must hold the HAT lock here to prevent racing with another
2074          * thread trying to unmap TTEs from the TSB or running the post-
2075          * relocator after relocating the TSB's memory.  Unfortunately, we
2076          * can't free memory while holding the HAT lock or we could
2077          * deadlock, so we build a list of TSBs to be freed after marking
2078          * the tsbinfos as swapped out and free them after dropping the
2079          * lock.
2080          */
2081         if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2082                 sfmmu_hat_exit(hatlockp);
2083                 return;
2084         }
2085 
2086         SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2087         last = freelist = NULL;
2088         for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2089             tsbinfop = tsbinfop->tsb_next) {
2090                 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2091 
2092                 /*
2093                  * Cast the TSB into a struct free_tsb and put it on the free
2094                  * list.
2095                  */
2096                 if (freelist == NULL) {
2097                         last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2098                 } else {
2099                         last->next = (struct free_tsb *)tsbinfop->tsb_va;
2100                         last = last->next;
2101                 }
2102                 last->next = NULL;
2103                 last->tsbinfop = tsbinfop;
2104                 tsbinfop->tsb_flags |= TSB_SWAPPED;
2105                 /*
2106                  * Zero out the TTE to clear the valid bit.
2107                  * Note we can't use a value like 0xbad because we want to
2108                  * ensure diagnostic bits are NEVER set on TTEs that might
2109                  * be loaded.  The intent is to catch any invalid access
2110                  * to the swapped TSB, such as a thread running with a valid
2111                  * context without first calling sfmmu_tsb_swapin() to
2112                  * allocate TSB memory.
2113                  */
2114                 tsbinfop->tsb_tte.ll = 0;
2115         }
2116 
2117         /* Now we can drop the lock and free the TSB memory. */
2118         sfmmu_hat_exit(hatlockp);
2119         for (; freelist != NULL; freelist = next) {
2120                 next = freelist->next;
2121                 sfmmu_tsb_free(freelist->tsbinfop);
2122         }
2123 }
2124 
2125 /*
2126  * Duplicate the translations of an as into another newas
2127  */
2128 /* ARGSUSED */
2129 int
2130 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131         uint_t flag)
2132 {
2133         sf_srd_t *srdp;
2134         sf_scd_t *scdp;
2135         int i;
2136         extern uint_t get_color_start(struct as *);
2137 
2138         ASSERT(hat->sfmmu_xhat_provider == NULL);
2139         ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2140             (flag == HAT_DUP_SRD));
2141         ASSERT(hat != ksfmmup);
2142         ASSERT(newhat != ksfmmup);
2143         ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2144 
2145         if (flag == HAT_DUP_COW) {


9979                         curcnum = sfmmu_getctx_sec();
9980                         if (curcnum == cnum)
9981                                 sfmmu_load_mmustate(sfmmup);
9982                         sfmmu_enable_intrs(pstate_save);
9983                         ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9984                 }
9985         } else {
9986                 /*
9987                  * multi-thread
9988                  * or when sfmmup is not the same as the curproc.
9989                  */
9990                 sfmmu_invalidate_ctx(sfmmup);
9991         }
9992 
9993         kpreempt_enable();
9994 }
9995 
9996 
9997 /*
9998  * Replace the specified TSB with a new TSB.  This function gets called when
9999  * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
10000  * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10001  * (8K).
10002  *
10003  * Caller must hold the HAT lock, but should assume any tsb_info
10004  * pointers it has are no longer valid after calling this function.
10005  *
10006  * Return values:
10007  *      TSB_ALLOCFAIL   Failed to allocate a TSB, due to memory constraints
10008  *      TSB_LOSTRACE    HAT is busy, i.e. another thread is already doing
10009  *                      something to this tsbinfo/TSB
10010  *      TSB_SUCCESS     Operation succeeded
10011  */
10012 static tsb_replace_rc_t
10013 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
10014     hatlock_t *hatlockp, uint_t flags)
10015 {
10016         struct tsb_info *new_tsbinfo = NULL;
10017         struct tsb_info *curtsb, *prevtsb;
10018         uint_t tte_sz_mask;
10019         int i;




1959                 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1960                         if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1961                                 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1962                                     SFMMU_L2_HMERLINKS_SIZE);
1963                                 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1964                         }
1965                 }
1966         }
1967         sfmmu_free_sfmmu(sfmmup);
1968 
1969 #ifdef DEBUG
1970         for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971                 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972         }
1973 #endif
1974 
1975         kmem_cache_free(sfmmuid_cache, sfmmup);
1976 }
1977 
1978 /*



















































































































































1979  * Duplicate the translations of an as into another newas
1980  */
1981 /* ARGSUSED */
1982 int
1983 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
1984         uint_t flag)
1985 {
1986         sf_srd_t *srdp;
1987         sf_scd_t *scdp;
1988         int i;
1989         extern uint_t get_color_start(struct as *);
1990 
1991         ASSERT(hat->sfmmu_xhat_provider == NULL);
1992         ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
1993             (flag == HAT_DUP_SRD));
1994         ASSERT(hat != ksfmmup);
1995         ASSERT(newhat != ksfmmup);
1996         ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
1997 
1998         if (flag == HAT_DUP_COW) {


9832                         curcnum = sfmmu_getctx_sec();
9833                         if (curcnum == cnum)
9834                                 sfmmu_load_mmustate(sfmmup);
9835                         sfmmu_enable_intrs(pstate_save);
9836                         ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9837                 }
9838         } else {
9839                 /*
9840                  * multi-thread
9841                  * or when sfmmup is not the same as the curproc.
9842                  */
9843                 sfmmu_invalidate_ctx(sfmmup);
9844         }
9845 
9846         kpreempt_enable();
9847 }
9848 
9849 
9850 /*
9851  * Replace the specified TSB with a new TSB.  This function gets called when
9852  * we grow, or shrink a TSB.  When swapping in a TSB (TSB_SWAPIN), the
9853  * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9854  * (8K).
9855  *
9856  * Caller must hold the HAT lock, but should assume any tsb_info
9857  * pointers it has are no longer valid after calling this function.
9858  *
9859  * Return values:
9860  *      TSB_ALLOCFAIL   Failed to allocate a TSB, due to memory constraints
9861  *      TSB_LOSTRACE    HAT is busy, i.e. another thread is already doing
9862  *                      something to this tsbinfo/TSB
9863  *      TSB_SUCCESS     Operation succeeded
9864  */
9865 static tsb_replace_rc_t
9866 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9867     hatlock_t *hatlockp, uint_t flags)
9868 {
9869         struct tsb_info *new_tsbinfo = NULL;
9870         struct tsb_info *curtsb, *prevtsb;
9871         uint_t tte_sz_mask;
9872         int i;