Print this page
6149 use NULL capable segop as a shorthand for no-capabilities

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_dev.c
          +++ new/usr/src/uts/common/vm/seg_dev.c
↓ open down ↓ 175 lines elided ↑ open up ↑
 176  176  static int      segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
 177  177  static u_offset_t       segdev_getoffset(struct seg *, caddr_t);
 178  178  static int      segdev_gettype(struct seg *, caddr_t);
 179  179  static int      segdev_getvp(struct seg *, caddr_t, struct vnode **);
 180  180  static int      segdev_advise(struct seg *, caddr_t, size_t, uint_t);
 181  181  static void     segdev_dump(struct seg *);
 182  182  static int      segdev_pagelock(struct seg *, caddr_t, size_t,
 183  183                      struct page ***, enum lock_type, enum seg_rw);
 184  184  static int      segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
 185  185  static int      segdev_getmemid(struct seg *, caddr_t, memid_t *);
 186      -static int      segdev_capable(struct seg *, segcapability_t);
 187  186  
 188  187  /*
 189  188   * XXX  this struct is used by rootnex_map_fault to identify
 190  189   *      the segment it has been passed. So if you make it
 191  190   *      "static" you'll need to fix rootnex_map_fault.
 192  191   */
 193  192  struct seg_ops segdev_ops = {
 194  193          .dup            = segdev_dup,
 195  194          .unmap          = segdev_unmap,
 196  195          .free           = segdev_free,
↓ open down ↓ 7 lines elided ↑ open up ↑
 204  203          .lockop         = segdev_lockop,
 205  204          .getprot        = segdev_getprot,
 206  205          .getoffset      = segdev_getoffset,
 207  206          .gettype        = segdev_gettype,
 208  207          .getvp          = segdev_getvp,
 209  208          .advise         = segdev_advise,
 210  209          .dump           = segdev_dump,
 211  210          .pagelock       = segdev_pagelock,
 212  211          .setpagesize    = segdev_setpagesize,
 213  212          .getmemid       = segdev_getmemid,
 214      -        .capable        = segdev_capable,
 215  213  };
 216  214  
 217  215  /*
 218  216   * Private segdev support routines
 219  217   */
 220  218  static struct segdev_data *sdp_alloc(void);
 221  219  
 222  220  static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
 223  221      size_t, enum seg_rw);
 224  222  
↓ open down ↓ 3793 lines elided ↑ open up ↑
4018 4016  {
4019 4017          struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4020 4018  
4021 4019          /*
4022 4020           * It looks as if it is always mapped shared
4023 4021           */
4024 4022          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4025 4023              "segdev_getmemid:start");
4026 4024          memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4027 4025          memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4028      -        return (0);
4029      -}
4030      -
4031      -/*ARGSUSED*/
4032      -static int
4033      -segdev_capable(struct seg *seg, segcapability_t capability)
4034      -{
4035 4026          return (0);
4036 4027  }
4037 4028  
4038 4029  /*
4039 4030   * ddi_umem_alloc() non-pageable quantum cache max size.
4040 4031   * This is just a SWAG.
4041 4032   */
4042 4033  #define DEVMAP_UMEM_QUANTUM     (8*PAGESIZE)
4043 4034  
4044 4035  /*
↓ open down ↓ 47 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX