Print this page
6147 segop_getpolicy already checks for a NULL op
Reviewed by: Garrett D'Amore <garrett@damore.org>


 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
 167                     enum fault_type, enum seg_rw);
 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
 169 static int      segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
 170 static int      segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
 171 static void     segdev_badop(void);
 172 static int      segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
 173 static size_t   segdev_incore(struct seg *, caddr_t, size_t, char *);
 174 static int      segdev_lockop(struct seg *, caddr_t, size_t, int, int,
 175                     ulong_t *, size_t);
 176 static int      segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
 177 static u_offset_t       segdev_getoffset(struct seg *, caddr_t);
 178 static int      segdev_gettype(struct seg *, caddr_t);
 179 static int      segdev_getvp(struct seg *, caddr_t, struct vnode **);
 180 static int      segdev_advise(struct seg *, caddr_t, size_t, uint_t);
 181 static void     segdev_dump(struct seg *);
 182 static int      segdev_pagelock(struct seg *, caddr_t, size_t,
 183                     struct page ***, enum lock_type, enum seg_rw);
 184 static int      segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
 185 static int      segdev_getmemid(struct seg *, caddr_t, memid_t *);
 186 static lgrp_mem_policy_info_t   *segdev_getpolicy(struct seg *, caddr_t);
 187 static int      segdev_capable(struct seg *, segcapability_t);
 188 
 189 /*
 190  * XXX  this struct is used by rootnex_map_fault to identify
 191  *      the segment it has been passed. So if you make it
 192  *      "static" you'll need to fix rootnex_map_fault.
 193  */
 194 struct seg_ops segdev_ops = {
 195         .dup            = segdev_dup,
 196         .unmap          = segdev_unmap,
 197         .free           = segdev_free,
 198         .fault          = segdev_fault,
 199         .faulta         = segdev_faulta,
 200         .setprot        = segdev_setprot,
 201         .checkprot      = segdev_checkprot,
 202         .kluster        = (int (*)())segdev_badop,
 203         .sync           = segdev_sync,
 204         .incore         = segdev_incore,
 205         .lockop         = segdev_lockop,
 206         .getprot        = segdev_getprot,
 207         .getoffset      = segdev_getoffset,
 208         .gettype        = segdev_gettype,
 209         .getvp          = segdev_getvp,
 210         .advise         = segdev_advise,
 211         .dump           = segdev_dump,
 212         .pagelock       = segdev_pagelock,
 213         .setpagesize    = segdev_setpagesize,
 214         .getmemid       = segdev_getmemid,
 215         .getpolicy      = segdev_getpolicy,
 216         .capable        = segdev_capable,
 217 };
 218 
 219 /*
 220  * Private segdev support routines
 221  */
 222 static struct segdev_data *sdp_alloc(void);
 223 
 224 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
 225     size_t, enum seg_rw);
 226 
 227 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
 228     struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
 229 
 230 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
 231     size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
 232 
 233 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
 234 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
 235 static void devmap_softlock_rele(devmap_handle_t *);


4011                     cp->type);
4012         }
4013 
4014         kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4015 }
4016 
4017 
4018 static int
4019 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4020 {
4021         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4022 
4023         /*
4024          * It looks as if it is always mapped shared
4025          */
4026         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4027             "segdev_getmemid:start");
4028         memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4029         memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4030         return (0);
4031 }
4032 
4033 /*ARGSUSED*/
4034 static lgrp_mem_policy_info_t *
4035 segdev_getpolicy(struct seg *seg, caddr_t addr)
4036 {
4037         return (NULL);
4038 }
4039 
4040 /*ARGSUSED*/
4041 static int
4042 segdev_capable(struct seg *seg, segcapability_t capability)
4043 {
4044         return (0);
4045 }
4046 
4047 /*
4048  * ddi_umem_alloc() non-pageable quantum cache max size.
4049  * This is just a SWAG.
4050  */
4051 #define DEVMAP_UMEM_QUANTUM     (8*PAGESIZE)
4052 
4053 /*
4054  * Initialize seg_dev from boot. This routine sets up the trash page
4055  * and creates the umem_np_arena used to back non-pageable memory
4056  * requests.
4057  */




 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
 167                     enum fault_type, enum seg_rw);
 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
 169 static int      segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
 170 static int      segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
 171 static void     segdev_badop(void);
 172 static int      segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
 173 static size_t   segdev_incore(struct seg *, caddr_t, size_t, char *);
 174 static int      segdev_lockop(struct seg *, caddr_t, size_t, int, int,
 175                     ulong_t *, size_t);
 176 static int      segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
 177 static u_offset_t       segdev_getoffset(struct seg *, caddr_t);
 178 static int      segdev_gettype(struct seg *, caddr_t);
 179 static int      segdev_getvp(struct seg *, caddr_t, struct vnode **);
 180 static int      segdev_advise(struct seg *, caddr_t, size_t, uint_t);
 181 static void     segdev_dump(struct seg *);
 182 static int      segdev_pagelock(struct seg *, caddr_t, size_t,
 183                     struct page ***, enum lock_type, enum seg_rw);
 184 static int      segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
 185 static int      segdev_getmemid(struct seg *, caddr_t, memid_t *);

 186 static int      segdev_capable(struct seg *, segcapability_t);
 187 
 188 /*
 189  * XXX  this struct is used by rootnex_map_fault to identify
 190  *      the segment it has been passed. So if you make it
 191  *      "static" you'll need to fix rootnex_map_fault.
 192  */
 193 struct seg_ops segdev_ops = {
 194         .dup            = segdev_dup,
 195         .unmap          = segdev_unmap,
 196         .free           = segdev_free,
 197         .fault          = segdev_fault,
 198         .faulta         = segdev_faulta,
 199         .setprot        = segdev_setprot,
 200         .checkprot      = segdev_checkprot,
 201         .kluster        = (int (*)())segdev_badop,
 202         .sync           = segdev_sync,
 203         .incore         = segdev_incore,
 204         .lockop         = segdev_lockop,
 205         .getprot        = segdev_getprot,
 206         .getoffset      = segdev_getoffset,
 207         .gettype        = segdev_gettype,
 208         .getvp          = segdev_getvp,
 209         .advise         = segdev_advise,
 210         .dump           = segdev_dump,
 211         .pagelock       = segdev_pagelock,
 212         .setpagesize    = segdev_setpagesize,
 213         .getmemid       = segdev_getmemid,

 214         .capable        = segdev_capable,
 215 };
 216 
 217 /*
 218  * Private segdev support routines
 219  */
 220 static struct segdev_data *sdp_alloc(void);
 221 
 222 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
 223     size_t, enum seg_rw);
 224 
 225 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
 226     struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
 227 
 228 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
 229     size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
 230 
 231 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
 232 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
 233 static void devmap_softlock_rele(devmap_handle_t *);


4009                     cp->type);
4010         }
4011 
4012         kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4013 }
4014 
4015 
4016 static int
4017 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4018 {
4019         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4020 
4021         /*
4022          * It looks as if it is always mapped shared
4023          */
4024         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4025             "segdev_getmemid:start");
4026         memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4027         memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4028         return (0);







4029 }
4030 
4031 /*ARGSUSED*/
4032 static int
4033 segdev_capable(struct seg *seg, segcapability_t capability)
4034 {
4035         return (0);
4036 }
4037 
4038 /*
4039  * ddi_umem_alloc() non-pageable quantum cache max size.
4040  * This is just a SWAG.
4041  */
4042 #define DEVMAP_UMEM_QUANTUM     (8*PAGESIZE)
4043 
4044 /*
4045  * Initialize seg_dev from boot. This routine sets up the trash page
4046  * and creates the umem_np_arena used to back non-pageable memory
4047  * requests.
4048  */