Print this page
6154 const-ify segment ops structures


 457         seg = avl_find(&as->a_segtree, &addr, &where);
 458 
 459         if (seg == NULL)
 460                 seg = avl_nearest(&as->a_segtree, where, AVL_AFTER);
 461 
 462         if (seg == NULL)
 463                 seg = avl_last(&as->a_segtree);
 464 
 465         if (seg != NULL) {
 466                 caddr_t base = seg->s_base;
 467 
 468                 /*
 469                  * If top of seg is below the requested address, then
 470                  * the insertion point is at the end of the linked list,
 471                  * and seg points to the tail of the list.  Otherwise,
 472                  * the insertion point is immediately before seg.
 473                  */
 474                 if (base + seg->s_size > addr) {
 475                         if (addr >= base || eaddr > base) {
 476 #ifdef __sparc
 477                                 extern struct seg_ops segnf_ops;
 478 
 479                                 /*
 480                                  * no-fault segs must disappear if overlaid.
 481                                  * XXX need new segment type so
 482                                  * we don't have to check s_ops
 483                                  */
 484                                 if (seg->s_ops == &segnf_ops) {
 485                                         seg_unmap(seg);
 486                                         goto again;
 487                                 }
 488 #endif
 489                                 return (-1);    /* overlapping segment */
 490                         }
 491                 }
 492         }
 493         as->a_seglast = newseg;
 494         avl_insert(&as->a_segtree, newseg, where);
 495 
 496 #ifdef VERIFY_SEGLIST
 497         as_verify(as);


 865 }
 866 
 867 /*
 868  * Handle a ``fault'' at addr for size bytes.
 869  */
 870 faultcode_t
 871 as_fault(struct hat *hat, struct as *as, caddr_t addr, size_t size,
 872         enum fault_type type, enum seg_rw rw)
 873 {
 874         struct seg *seg;
 875         caddr_t raddr;                  /* rounded down addr */
 876         size_t rsize;                   /* rounded up size */
 877         size_t ssize;
 878         faultcode_t res = 0;
 879         caddr_t addrsav;
 880         struct seg *segsav;
 881         int as_lock_held;
 882         klwp_t *lwp = ttolwp(curthread);
 883         int is_xhat = 0;
 884         int holding_wpage = 0;
 885         extern struct seg_ops   segdev_ops;
 886 
 887 
 888 
 889         if (as->a_hat != hat) {
 890                 /* This must be an XHAT then */
 891                 is_xhat = 1;
 892 
 893                 if ((type != F_INVAL) || (as == &kas))
 894                         return (FC_NOSUPPORT);
 895         }
 896 
 897 retry:
 898         if (!is_xhat) {
 899                 /*
 900                  * Indicate that the lwp is not to be stopped while waiting
 901                  * for a pagefault.  This is to avoid deadlock while debugging
 902                  * a process via /proc over NFS (in particular).
 903                  */
 904                 if (lwp != NULL)
 905                         lwp->lwp_nostop++;
 906 
 907                 /*


2071  * -1 is returned.
2072  *
2073  * NOTE: This routine is not correct when base+len overflows caddr_t.
2074  */
2075 int
2076 as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
2077     caddr_t addr)
2078 {
2079 
2080         return (as_gap_aligned(as, minlen, basep, lenp, flags, addr, 0, 0, 0));
2081 }
2082 
2083 /*
2084  * Return the next range within [base, base + len) that is backed
2085  * with "real memory".  Skip holes and non-seg_vn segments.
2086  * We're lazy and only return one segment at a time.
2087  */
2088 int
2089 as_memory(struct as *as, caddr_t *basep, size_t *lenp)
2090 {
2091         extern struct seg_ops segspt_shmops;    /* needs a header file */
2092         struct seg *seg;
2093         caddr_t addr, eaddr;
2094         caddr_t segend;
2095 
2096         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2097 
2098         addr = *basep;
2099         eaddr = addr + *lenp;
2100 
2101         seg = as_findseg(as, addr, 0);
2102         if (seg != NULL)
2103                 addr = MAX(seg->s_base, addr);
2104 
2105         for (;;) {
2106                 if (seg == NULL || addr >= eaddr || eaddr <= seg->s_base) {
2107                         AS_LOCK_EXIT(as, &as->a_lock);
2108                         return (EINVAL);
2109                 }
2110 
2111                 if (seg->s_ops == &segvn_ops) {


2175         /*
2176          * Free all mapping resources associated with the address
2177          * space.  The segment-level swapout routines capitalize
2178          * on this unmapping by scavanging pages that have become
2179          * unmapped here.
2180          */
2181         hat_swapout(as->a_hat);
2182         if (as->a_xhat != NULL)
2183                 xhat_swapout_all(as);
2184 
2185         mutex_enter(&as->a_contents);
2186         AS_CLRBUSY(as);
2187         mutex_exit(&as->a_contents);
2188 
2189         /*
2190          * Call the swapout routines of all segments in the address
2191          * space to do the actual work, accumulating the amount of
2192          * space reclaimed.
2193          */
2194         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
2195                 struct seg_ops *ov = seg->s_ops;
2196 
2197                 /*
2198                  * We have to check to see if the seg has
2199                  * an ops vector because the seg may have
2200                  * been in the middle of being set up when
2201                  * the process was picked for swapout.
2202                  */
2203                 if ((ov != NULL) && (ov->swapout != NULL))
2204                         swpcnt += segop_swapout(seg);
2205         }
2206         AS_LOCK_EXIT(as, &as->a_lock);
2207         return (swpcnt);
2208 }
2209 
2210 /*
2211  * Determine whether data from the mappings in interval [addr, addr + size)
2212  * are in the primary memory (core) cache.
2213  */
2214 int
2215 as_incore(struct as *as, caddr_t addr,


2615  * as expected by the caller.  Save pointers to per segment shadow lists at
2616  * the tail of plist so that they can be used during as_pageunlock().
2617  */
2618 static int
2619 as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp,
2620     caddr_t addr, size_t size, enum seg_rw rw)
2621 {
2622         caddr_t sv_addr = addr;
2623         size_t sv_size = size;
2624         struct seg *sv_seg = seg;
2625         ulong_t segcnt = 1;
2626         ulong_t cnt;
2627         size_t ssize;
2628         pgcnt_t npages = btop(size);
2629         page_t **plist;
2630         page_t **pl;
2631         int error;
2632         caddr_t eaddr;
2633         faultcode_t fault_err = 0;
2634         pgcnt_t pl_off;
2635         extern struct seg_ops segspt_shmops;
2636 
2637         ASSERT(AS_LOCK_HELD(as, &as->a_lock));
2638         ASSERT(seg != NULL);
2639         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2640         ASSERT(addr + size > seg->s_base + seg->s_size);
2641         ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2642         ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2643 
2644         /*
2645          * Count the number of segments covered by the range we are about to
2646          * lock. The segment count is used to size the shadow list we return
2647          * back to the caller.
2648          */
2649         for (; size != 0; size -= ssize, addr += ssize) {
2650                 if (addr >= seg->s_base + seg->s_size) {
2651 
2652                         seg = AS_SEGNEXT(as, seg);
2653                         if (seg == NULL || addr != seg->s_base) {
2654                                 AS_LOCK_EXIT(as, &as->a_lock);
2655                                 return (EFAULT);




 457         seg = avl_find(&as->a_segtree, &addr, &where);
 458 
 459         if (seg == NULL)
 460                 seg = avl_nearest(&as->a_segtree, where, AVL_AFTER);
 461 
 462         if (seg == NULL)
 463                 seg = avl_last(&as->a_segtree);
 464 
 465         if (seg != NULL) {
 466                 caddr_t base = seg->s_base;
 467 
 468                 /*
 469                  * If top of seg is below the requested address, then
 470                  * the insertion point is at the end of the linked list,
 471                  * and seg points to the tail of the list.  Otherwise,
 472                  * the insertion point is immediately before seg.
 473                  */
 474                 if (base + seg->s_size > addr) {
 475                         if (addr >= base || eaddr > base) {
 476 #ifdef __sparc
 477                                 extern const struct seg_ops segnf_ops;
 478 
 479                                 /*
 480                                  * no-fault segs must disappear if overlaid.
 481                                  * XXX need new segment type so
 482                                  * we don't have to check s_ops
 483                                  */
 484                                 if (seg->s_ops == &segnf_ops) {
 485                                         seg_unmap(seg);
 486                                         goto again;
 487                                 }
 488 #endif
 489                                 return (-1);    /* overlapping segment */
 490                         }
 491                 }
 492         }
 493         as->a_seglast = newseg;
 494         avl_insert(&as->a_segtree, newseg, where);
 495 
 496 #ifdef VERIFY_SEGLIST
 497         as_verify(as);


 865 }
 866 
 867 /*
 868  * Handle a ``fault'' at addr for size bytes.
 869  */
 870 faultcode_t
 871 as_fault(struct hat *hat, struct as *as, caddr_t addr, size_t size,
 872         enum fault_type type, enum seg_rw rw)
 873 {
 874         struct seg *seg;
 875         caddr_t raddr;                  /* rounded down addr */
 876         size_t rsize;                   /* rounded up size */
 877         size_t ssize;
 878         faultcode_t res = 0;
 879         caddr_t addrsav;
 880         struct seg *segsav;
 881         int as_lock_held;
 882         klwp_t *lwp = ttolwp(curthread);
 883         int is_xhat = 0;
 884         int holding_wpage = 0;



 885 
 886         if (as->a_hat != hat) {
 887                 /* This must be an XHAT then */
 888                 is_xhat = 1;
 889 
 890                 if ((type != F_INVAL) || (as == &kas))
 891                         return (FC_NOSUPPORT);
 892         }
 893 
 894 retry:
 895         if (!is_xhat) {
 896                 /*
 897                  * Indicate that the lwp is not to be stopped while waiting
 898                  * for a pagefault.  This is to avoid deadlock while debugging
 899                  * a process via /proc over NFS (in particular).
 900                  */
 901                 if (lwp != NULL)
 902                         lwp->lwp_nostop++;
 903 
 904                 /*


2068  * -1 is returned.
2069  *
2070  * NOTE: This routine is not correct when base+len overflows caddr_t.
2071  */
2072 int
2073 as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
2074     caddr_t addr)
2075 {
2076 
2077         return (as_gap_aligned(as, minlen, basep, lenp, flags, addr, 0, 0, 0));
2078 }
2079 
2080 /*
2081  * Return the next range within [base, base + len) that is backed
2082  * with "real memory".  Skip holes and non-seg_vn segments.
2083  * We're lazy and only return one segment at a time.
2084  */
2085 int
2086 as_memory(struct as *as, caddr_t *basep, size_t *lenp)
2087 {
2088         extern const struct seg_ops segspt_shmops; /* needs a header file */
2089         struct seg *seg;
2090         caddr_t addr, eaddr;
2091         caddr_t segend;
2092 
2093         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2094 
2095         addr = *basep;
2096         eaddr = addr + *lenp;
2097 
2098         seg = as_findseg(as, addr, 0);
2099         if (seg != NULL)
2100                 addr = MAX(seg->s_base, addr);
2101 
2102         for (;;) {
2103                 if (seg == NULL || addr >= eaddr || eaddr <= seg->s_base) {
2104                         AS_LOCK_EXIT(as, &as->a_lock);
2105                         return (EINVAL);
2106                 }
2107 
2108                 if (seg->s_ops == &segvn_ops) {


2172         /*
2173          * Free all mapping resources associated with the address
2174          * space.  The segment-level swapout routines capitalize
2175          * on this unmapping by scavanging pages that have become
2176          * unmapped here.
2177          */
2178         hat_swapout(as->a_hat);
2179         if (as->a_xhat != NULL)
2180                 xhat_swapout_all(as);
2181 
2182         mutex_enter(&as->a_contents);
2183         AS_CLRBUSY(as);
2184         mutex_exit(&as->a_contents);
2185 
2186         /*
2187          * Call the swapout routines of all segments in the address
2188          * space to do the actual work, accumulating the amount of
2189          * space reclaimed.
2190          */
2191         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
2192                 const struct seg_ops *ov = seg->s_ops;
2193 
2194                 /*
2195                  * We have to check to see if the seg has
2196                  * an ops vector because the seg may have
2197                  * been in the middle of being set up when
2198                  * the process was picked for swapout.
2199                  */
2200                 if ((ov != NULL) && (ov->swapout != NULL))
2201                         swpcnt += segop_swapout(seg);
2202         }
2203         AS_LOCK_EXIT(as, &as->a_lock);
2204         return (swpcnt);
2205 }
2206 
2207 /*
2208  * Determine whether data from the mappings in interval [addr, addr + size)
2209  * are in the primary memory (core) cache.
2210  */
2211 int
2212 as_incore(struct as *as, caddr_t addr,


2612  * as expected by the caller.  Save pointers to per segment shadow lists at
2613  * the tail of plist so that they can be used during as_pageunlock().
2614  */
2615 static int
2616 as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp,
2617     caddr_t addr, size_t size, enum seg_rw rw)
2618 {
2619         caddr_t sv_addr = addr;
2620         size_t sv_size = size;
2621         struct seg *sv_seg = seg;
2622         ulong_t segcnt = 1;
2623         ulong_t cnt;
2624         size_t ssize;
2625         pgcnt_t npages = btop(size);
2626         page_t **plist;
2627         page_t **pl;
2628         int error;
2629         caddr_t eaddr;
2630         faultcode_t fault_err = 0;
2631         pgcnt_t pl_off;
2632         extern const struct seg_ops segspt_shmops;
2633 
2634         ASSERT(AS_LOCK_HELD(as, &as->a_lock));
2635         ASSERT(seg != NULL);
2636         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2637         ASSERT(addr + size > seg->s_base + seg->s_size);
2638         ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2639         ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2640 
2641         /*
2642          * Count the number of segments covered by the range we are about to
2643          * lock. The segment count is used to size the shadow list we return
2644          * back to the caller.
2645          */
2646         for (; size != 0; size -= ssize, addr += ssize) {
2647                 if (addr >= seg->s_base + seg->s_size) {
2648 
2649                         seg = AS_SEGNEXT(as, seg);
2650                         if (seg == NULL || addr != seg->s_base) {
2651                                 AS_LOCK_EXIT(as, &as->a_lock);
2652                                 return (EFAULT);