1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 /* 31 * University Copyright- Copyright (c) 1982, 1986, 1988 32 * The Regents of the University of California 33 * All Rights Reserved 34 * 35 * University Acknowledgment- Portions of this document are derived from 36 * software developed by the University of California, Berkeley, and its 37 * contributors. 38 */ 39 40 /* 41 * VM - segment of a mapped device. 42 * 43 * This segment driver is used when mapping character special devices. 44 */ 45 46 #include <sys/types.h> 47 #include <sys/t_lock.h> 48 #include <sys/sysmacros.h> 49 #include <sys/vtrace.h> 50 #include <sys/systm.h> 51 #include <sys/vmsystm.h> 52 #include <sys/mman.h> 53 #include <sys/errno.h> 54 #include <sys/kmem.h> 55 #include <sys/cmn_err.h> 56 #include <sys/vnode.h> 57 #include <sys/proc.h> 58 #include <sys/conf.h> 59 #include <sys/debug.h> 60 #include <sys/ddidevmap.h> 61 #include <sys/ddi_implfuncs.h> 62 #include <sys/lgrp.h> 63 64 #include <vm/page.h> 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_dev.h> 69 #include <vm/seg_kp.h> 70 #include <vm/seg_kmem.h> 71 #include <vm/vpage.h> 72 73 #include <sys/sunddi.h> 74 #include <sys/esunddi.h> 75 #include <sys/fs/snode.h> 76 77 78 #if DEBUG 79 int segdev_debug; 80 #define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; } 81 #else 82 #define DEBUGF(level, args) 83 #endif 84 85 /* Default timeout for devmap context management */ 86 #define CTX_TIMEOUT_VALUE 0 87 88 #define HOLD_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \ 89 { mutex_enter(&dhp->dh_lock); } 90 91 #define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \ 92 { mutex_exit(&dhp->dh_lock); } 93 94 #define round_down_p2(a, s) ((a) & ~((s) - 1)) 95 #define round_up_p2(a, s) (((a) + (s) - 1) & ~((s) - 1)) 96 97 /* 98 * VA_PA_ALIGNED checks to see if both VA and PA are on pgsize boundary 99 * VA_PA_PGSIZE_ALIGNED check to see if VA is aligned with PA w.r.t. pgsize 100 */ 101 #define VA_PA_ALIGNED(uvaddr, paddr, pgsize) \ 102 (((uvaddr | paddr) & (pgsize - 1)) == 0) 103 #define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize) \ 104 (((uvaddr ^ paddr) & (pgsize - 1)) == 0) 105 106 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 107 108 #define VTOCVP(vp) (VTOS(vp)->s_commonvp) /* we "know" it's an snode */ 109 110 static struct devmap_ctx *devmapctx_list = NULL; 111 static struct devmap_softlock *devmap_slist = NULL; 112 113 /* 114 * mutex, vnode and page for the page of zeros we use for the trash mappings. 115 * One trash page is allocated on the first ddi_umem_setup call that uses it 116 * XXX Eventually, we may want to combine this with what segnf does when all 117 * hat layers implement HAT_NOFAULT. 118 * 119 * The trash page is used when the backing store for a userland mapping is 120 * removed but the application semantics do not take kindly to a SIGBUS. 121 * In that scenario, the applications pages are mapped to some dummy page 122 * which returns garbage on read and writes go into a common place. 123 * (Perfect for NO_FAULT semantics) 124 * The device driver is responsible to communicating to the app with some 125 * other mechanism that such remapping has happened and the app should take 126 * corrective action. 127 * We can also use an anonymous memory page as there is no requirement to 128 * keep the page locked, however this complicates the fault code. RFE. 129 */ 130 static struct vnode trashvp; 131 static struct page *trashpp; 132 133 /* Non-pageable kernel memory is allocated from the umem_np_arena. */ 134 static vmem_t *umem_np_arena; 135 136 /* Set the cookie to a value we know will never be a valid umem_cookie */ 137 #define DEVMAP_DEVMEM_COOKIE ((ddi_umem_cookie_t)0x1) 138 139 /* 140 * Macros to check if type of devmap handle 141 */ 142 #define cookie_is_devmem(c) \ 143 ((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE) 144 145 #define cookie_is_pmem(c) \ 146 ((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE) 147 148 #define cookie_is_kpmem(c) (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\ 149 ((c)->type == KMEM_PAGEABLE)) 150 151 #define dhp_is_devmem(dhp) \ 152 (cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie))) 153 154 #define dhp_is_pmem(dhp) \ 155 (cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie))) 156 157 #define dhp_is_kpmem(dhp) \ 158 (cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie))) 159 160 /* 161 * Private seg op routines. 162 */ 163 static int segdev_dup(struct seg *, struct seg *); 164 static int segdev_unmap(struct seg *, caddr_t, size_t); 165 static void segdev_free(struct seg *); 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t, 167 enum fault_type, enum seg_rw); 168 static faultcode_t segdev_faulta(struct seg *, caddr_t); 169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t); 170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t); 171 static void segdev_badop(void); 172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t); 173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *); 174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int, 175 ulong_t *, size_t); 176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *); 177 static u_offset_t segdev_getoffset(struct seg *, caddr_t); 178 static int segdev_gettype(struct seg *, caddr_t); 179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **); 180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t); 181 static void segdev_dump(struct seg *); 182 static int segdev_pagelock(struct seg *, caddr_t, size_t, 183 struct page ***, enum lock_type, enum seg_rw); 184 static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t); 185 static int segdev_getmemid(struct seg *, caddr_t, memid_t *); 186 static int segdev_capable(struct seg *, segcapability_t); 187 188 /* 189 * XXX this struct is used by rootnex_map_fault to identify 190 * the segment it has been passed. So if you make it 191 * "static" you'll need to fix rootnex_map_fault. 192 */ 193 struct seg_ops segdev_ops = { 194 .dup = segdev_dup, 195 .unmap = segdev_unmap, 196 .free = segdev_free, 197 .fault = segdev_fault, 198 .faulta = segdev_faulta, 199 .setprot = segdev_setprot, 200 .checkprot = segdev_checkprot, 201 .kluster = (int (*)())segdev_badop, 202 .sync = segdev_sync, 203 .incore = segdev_incore, 204 .lockop = segdev_lockop, 205 .getprot = segdev_getprot, 206 .getoffset = segdev_getoffset, 207 .gettype = segdev_gettype, 208 .getvp = segdev_getvp, 209 .advise = segdev_advise, 210 .dump = segdev_dump, 211 .pagelock = segdev_pagelock, 212 .setpagesize = segdev_setpagesize, 213 .getmemid = segdev_getmemid, 214 .capable = segdev_capable, 215 }; 216 217 /* 218 * Private segdev support routines 219 */ 220 static struct segdev_data *sdp_alloc(void); 221 222 static void segdev_softunlock(struct hat *, struct seg *, caddr_t, 223 size_t, enum seg_rw); 224 225 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t, 226 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *); 227 228 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t, 229 size_t, enum fault_type, enum seg_rw, devmap_handle_t *); 230 231 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t); 232 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t); 233 static void devmap_softlock_rele(devmap_handle_t *); 234 static void devmap_ctx_rele(devmap_handle_t *); 235 236 static void devmap_ctxto(void *); 237 238 static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head, 239 caddr_t addr); 240 241 static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len, 242 ulong_t *opfn, ulong_t *pagesize); 243 244 static void free_devmap_handle(devmap_handle_t *dhp); 245 246 static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp, 247 struct seg *newseg); 248 249 static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp); 250 251 static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len); 252 253 static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr); 254 255 static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr, 256 offset_t off, size_t len, uint_t flags); 257 258 static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, 259 caddr_t addr, size_t *llen, caddr_t *laddr); 260 261 static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len); 262 263 static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag); 264 static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size); 265 266 static void *devmap_umem_alloc_np(size_t size, size_t flags); 267 static void devmap_umem_free_np(void *addr, size_t size); 268 269 /* 270 * routines to lock and unlock underlying segkp segment for 271 * KMEM_PAGEABLE type cookies. 272 */ 273 static faultcode_t acquire_kpmem_lock(struct ddi_umem_cookie *, size_t); 274 static void release_kpmem_lock(struct ddi_umem_cookie *, size_t); 275 276 /* 277 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for 278 * drivers with devmap_access callbacks 279 */ 280 static int devmap_softlock_enter(struct devmap_softlock *, size_t, 281 enum fault_type); 282 static void devmap_softlock_exit(struct devmap_softlock *, size_t, 283 enum fault_type); 284 285 static kmutex_t devmapctx_lock; 286 287 static kmutex_t devmap_slock; 288 289 /* 290 * Initialize the thread callbacks and thread private data. 291 */ 292 static struct devmap_ctx * 293 devmap_ctxinit(dev_t dev, ulong_t id) 294 { 295 struct devmap_ctx *devctx; 296 struct devmap_ctx *tmp; 297 dev_info_t *dip; 298 299 tmp = kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP); 300 301 mutex_enter(&devmapctx_lock); 302 303 dip = e_ddi_hold_devi_by_dev(dev, 0); 304 ASSERT(dip != NULL); 305 ddi_release_devi(dip); 306 307 for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next) 308 if ((devctx->dip == dip) && (devctx->id == id)) 309 break; 310 311 if (devctx == NULL) { 312 devctx = tmp; 313 devctx->dip = dip; 314 devctx->id = id; 315 mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL); 316 cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL); 317 devctx->next = devmapctx_list; 318 devmapctx_list = devctx; 319 } else 320 kmem_free(tmp, sizeof (struct devmap_ctx)); 321 322 mutex_enter(&devctx->lock); 323 devctx->refcnt++; 324 mutex_exit(&devctx->lock); 325 mutex_exit(&devmapctx_lock); 326 327 return (devctx); 328 } 329 330 /* 331 * Timeout callback called if a CPU has not given up the device context 332 * within dhp->dh_timeout_length ticks 333 */ 334 static void 335 devmap_ctxto(void *data) 336 { 337 struct devmap_ctx *devctx = data; 338 339 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO, 340 "devmap_ctxto:timeout expired, devctx=%p", (void *)devctx); 341 mutex_enter(&devctx->lock); 342 /* 343 * Set oncpu = 0 so the next mapping trying to get the device context 344 * can. 345 */ 346 devctx->oncpu = 0; 347 devctx->timeout = 0; 348 cv_signal(&devctx->cv); 349 mutex_exit(&devctx->lock); 350 } 351 352 /* 353 * Create a device segment. 354 */ 355 int 356 segdev_create(struct seg *seg, void *argsp) 357 { 358 struct segdev_data *sdp; 359 struct segdev_crargs *a = (struct segdev_crargs *)argsp; 360 devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data; 361 int error; 362 363 /* 364 * Since the address space is "write" locked, we 365 * don't need the segment lock to protect "segdev" data. 366 */ 367 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 368 369 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 370 371 sdp = sdp_alloc(); 372 373 sdp->mapfunc = a->mapfunc; 374 sdp->offset = a->offset; 375 sdp->prot = a->prot; 376 sdp->maxprot = a->maxprot; 377 sdp->type = a->type; 378 sdp->pageprot = 0; 379 sdp->softlockcnt = 0; 380 sdp->vpage = NULL; 381 382 if (sdp->mapfunc == NULL) 383 sdp->devmap_data = dhp; 384 else 385 sdp->devmap_data = dhp = NULL; 386 387 sdp->hat_flags = a->hat_flags; 388 sdp->hat_attr = a->hat_attr; 389 390 /* 391 * Currently, hat_flags supports only HAT_LOAD_NOCONSIST 392 */ 393 ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST)); 394 395 /* 396 * Hold shadow vnode -- segdev only deals with 397 * character (VCHR) devices. We use the common 398 * vp to hang pages on. 399 */ 400 sdp->vp = specfind(a->dev, VCHR); 401 ASSERT(sdp->vp != NULL); 402 403 seg->s_ops = &segdev_ops; 404 seg->s_data = sdp; 405 406 while (dhp != NULL) { 407 dhp->dh_seg = seg; 408 dhp = dhp->dh_next; 409 } 410 411 /* 412 * Inform the vnode of the new mapping. 413 */ 414 /* 415 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use 416 * dhp specific maxprot because spec_addmap does not use maxprot. 417 */ 418 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset, 419 seg->s_as, seg->s_base, seg->s_size, 420 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL); 421 422 if (error != 0) { 423 sdp->devmap_data = NULL; 424 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 425 HAT_UNLOAD_UNMAP); 426 } else { 427 /* 428 * Mappings of /dev/null don't count towards the VSZ of a 429 * process. Mappings of /dev/null have no mapping type. 430 */ 431 if ((segop_gettype(seg, seg->s_base) & (MAP_SHARED | 432 MAP_PRIVATE)) == 0) { 433 seg->s_as->a_resvsize -= seg->s_size; 434 } 435 } 436 437 return (error); 438 } 439 440 static struct segdev_data * 441 sdp_alloc(void) 442 { 443 struct segdev_data *sdp; 444 445 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP); 446 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL); 447 448 return (sdp); 449 } 450 451 /* 452 * Duplicate seg and return new segment in newseg. 453 */ 454 static int 455 segdev_dup(struct seg *seg, struct seg *newseg) 456 { 457 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 458 struct segdev_data *newsdp; 459 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data; 460 size_t npages; 461 int ret; 462 463 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP, 464 "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg); 465 466 DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n", 467 (void *)dhp, (void *)seg)); 468 469 /* 470 * Since the address space is "write" locked, we 471 * don't need the segment lock to protect "segdev" data. 472 */ 473 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 474 475 newsdp = sdp_alloc(); 476 477 newseg->s_ops = seg->s_ops; 478 newseg->s_data = (void *)newsdp; 479 480 VN_HOLD(sdp->vp); 481 newsdp->vp = sdp->vp; 482 newsdp->mapfunc = sdp->mapfunc; 483 newsdp->offset = sdp->offset; 484 newsdp->pageprot = sdp->pageprot; 485 newsdp->prot = sdp->prot; 486 newsdp->maxprot = sdp->maxprot; 487 newsdp->type = sdp->type; 488 newsdp->hat_attr = sdp->hat_attr; 489 newsdp->hat_flags = sdp->hat_flags; 490 newsdp->softlockcnt = 0; 491 492 /* 493 * Initialize per page data if the segment we are 494 * dup'ing has per page information. 495 */ 496 npages = seg_pages(newseg); 497 498 if (sdp->vpage != NULL) { 499 size_t nbytes = vpgtob(npages); 500 501 newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP); 502 bcopy(sdp->vpage, newsdp->vpage, nbytes); 503 } else 504 newsdp->vpage = NULL; 505 506 /* 507 * duplicate devmap handles 508 */ 509 if (dhp != NULL) { 510 ret = devmap_handle_dup(dhp, 511 (devmap_handle_t **)&newsdp->devmap_data, newseg); 512 if (ret != 0) { 513 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1, 514 "segdev_dup:ret1 ret=%x, dhp=%p seg=%p", 515 ret, (void *)dhp, (void *)seg); 516 DEBUGF(1, (CE_CONT, 517 "segdev_dup: ret %x dhp %p seg %p\n", 518 ret, (void *)dhp, (void *)seg)); 519 return (ret); 520 } 521 } 522 523 /* 524 * Inform the common vnode of the new mapping. 525 */ 526 return (VOP_ADDMAP(VTOCVP(newsdp->vp), 527 newsdp->offset, newseg->s_as, 528 newseg->s_base, newseg->s_size, newsdp->prot, 529 newsdp->maxprot, sdp->type, CRED(), NULL)); 530 } 531 532 /* 533 * duplicate devmap handles 534 */ 535 static int 536 devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp, 537 struct seg *newseg) 538 { 539 devmap_handle_t *newdhp_save = NULL; 540 devmap_handle_t *newdhp = NULL; 541 struct devmap_callback_ctl *callbackops; 542 543 while (dhp != NULL) { 544 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP); 545 546 /* Need to lock the original dhp while copying if REMAP */ 547 HOLD_DHP_LOCK(dhp); 548 bcopy(dhp, newdhp, sizeof (devmap_handle_t)); 549 RELE_DHP_LOCK(dhp); 550 newdhp->dh_seg = newseg; 551 newdhp->dh_next = NULL; 552 if (newdhp_save != NULL) 553 newdhp_save->dh_next = newdhp; 554 else 555 *new_dhp = newdhp; 556 newdhp_save = newdhp; 557 558 callbackops = &newdhp->dh_callbackops; 559 560 if (dhp->dh_softlock != NULL) 561 newdhp->dh_softlock = devmap_softlock_init( 562 newdhp->dh_dev, 563 (ulong_t)callbackops->devmap_access); 564 if (dhp->dh_ctx != NULL) 565 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev, 566 (ulong_t)callbackops->devmap_access); 567 568 /* 569 * Initialize dh_lock if we want to do remap. 570 */ 571 if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) { 572 mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL); 573 newdhp->dh_flags |= DEVMAP_LOCK_INITED; 574 } 575 576 if (callbackops->devmap_dup != NULL) { 577 int ret; 578 579 /* 580 * Call the dup callback so that the driver can 581 * duplicate its private data. 582 */ 583 ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp, 584 (devmap_cookie_t *)newdhp, &newdhp->dh_pvtp); 585 586 if (ret != 0) { 587 /* 588 * We want to free up this segment as the driver 589 * has indicated that we can't dup it. But we 590 * don't want to call the drivers, devmap_unmap, 591 * callback function as the driver does not 592 * think this segment exists. The caller of 593 * devmap_dup will call seg_free on newseg 594 * as it was the caller that allocated the 595 * segment. 596 */ 597 DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: " 598 "newdhp %p dhp %p\n", (void *)newdhp, 599 (void *)dhp)); 600 callbackops->devmap_unmap = NULL; 601 return (ret); 602 } 603 } 604 605 dhp = dhp->dh_next; 606 } 607 608 return (0); 609 } 610 611 /* 612 * Split a segment at addr for length len. 613 */ 614 /*ARGSUSED*/ 615 static int 616 segdev_unmap(struct seg *seg, caddr_t addr, size_t len) 617 { 618 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 619 register struct segdev_data *nsdp; 620 register struct seg *nseg; 621 register size_t opages; /* old segment size in pages */ 622 register size_t npages; /* new segment size in pages */ 623 register size_t dpages; /* pages being deleted (unmapped) */ 624 register size_t nbytes; 625 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data; 626 devmap_handle_t *dhpp; 627 devmap_handle_t *newdhp; 628 struct devmap_callback_ctl *callbackops; 629 caddr_t nbase; 630 offset_t off; 631 ulong_t nsize; 632 size_t mlen, sz; 633 634 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP, 635 "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx", 636 (void *)dhp, (void *)seg, (void *)addr, len); 637 638 DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n", 639 (void *)dhp, (void *)seg, (void *)addr, len)); 640 641 /* 642 * Since the address space is "write" locked, we 643 * don't need the segment lock to protect "segdev" data. 644 */ 645 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 646 647 if ((sz = sdp->softlockcnt) > 0) { 648 /* 649 * Fail the unmap if pages are SOFTLOCKed through this mapping. 650 * softlockcnt is protected from change by the as write lock. 651 */ 652 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1, 653 "segdev_unmap:error softlockcnt = %ld", sz); 654 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz)); 655 return (EAGAIN); 656 } 657 658 /* 659 * Check for bad sizes 660 */ 661 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 662 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) 663 panic("segdev_unmap"); 664 665 if (dhp != NULL) { 666 devmap_handle_t *tdhp; 667 /* 668 * If large page size was used in hat_devload(), 669 * the same page size must be used in hat_unload(). 670 */ 671 dhpp = tdhp = devmap_find_handle(dhp, addr); 672 while (tdhp != NULL) { 673 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) { 674 break; 675 } 676 tdhp = tdhp->dh_next; 677 } 678 if (tdhp != NULL) { /* found a dhp using large pages */ 679 size_t slen = len; 680 size_t mlen; 681 size_t soff; 682 683 soff = (ulong_t)(addr - dhpp->dh_uvaddr); 684 while (slen != 0) { 685 mlen = MIN(slen, (dhpp->dh_len - soff)); 686 hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr, 687 dhpp->dh_len, HAT_UNLOAD_UNMAP); 688 dhpp = dhpp->dh_next; 689 ASSERT(slen >= mlen); 690 slen -= mlen; 691 soff = 0; 692 } 693 } else 694 hat_unload(seg->s_as->a_hat, addr, len, 695 HAT_UNLOAD_UNMAP); 696 } else { 697 /* 698 * Unload any hardware translations in the range 699 * to be taken out. 700 */ 701 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP); 702 } 703 704 /* 705 * get the user offset which will used in the driver callbacks 706 */ 707 off = sdp->offset + (offset_t)(addr - seg->s_base); 708 709 /* 710 * Inform the vnode of the unmapping. 711 */ 712 ASSERT(sdp->vp != NULL); 713 (void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len, 714 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL); 715 716 /* 717 * Check for entire segment 718 */ 719 if (addr == seg->s_base && len == seg->s_size) { 720 seg_free(seg); 721 return (0); 722 } 723 724 opages = seg_pages(seg); 725 dpages = btop(len); 726 npages = opages - dpages; 727 728 /* 729 * Check for beginning of segment 730 */ 731 if (addr == seg->s_base) { 732 if (sdp->vpage != NULL) { 733 register struct vpage *ovpage; 734 735 ovpage = sdp->vpage; /* keep pointer to vpage */ 736 737 nbytes = vpgtob(npages); 738 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP); 739 bcopy(&ovpage[dpages], sdp->vpage, nbytes); 740 741 /* free up old vpage */ 742 kmem_free(ovpage, vpgtob(opages)); 743 } 744 745 /* 746 * free devmap handles from the beginning of the mapping. 747 */ 748 if (dhp != NULL) 749 devmap_handle_unmap_head(dhp, len); 750 751 sdp->offset += (offset_t)len; 752 753 seg->s_base += len; 754 seg->s_size -= len; 755 756 return (0); 757 } 758 759 /* 760 * Check for end of segment 761 */ 762 if (addr + len == seg->s_base + seg->s_size) { 763 if (sdp->vpage != NULL) { 764 register struct vpage *ovpage; 765 766 ovpage = sdp->vpage; /* keep pointer to vpage */ 767 768 nbytes = vpgtob(npages); 769 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP); 770 bcopy(ovpage, sdp->vpage, nbytes); 771 772 /* free up old vpage */ 773 kmem_free(ovpage, vpgtob(opages)); 774 } 775 seg->s_size -= len; 776 777 /* 778 * free devmap handles from addr to the end of the mapping. 779 */ 780 if (dhp != NULL) 781 devmap_handle_unmap_tail(dhp, addr); 782 783 return (0); 784 } 785 786 /* 787 * The section to go is in the middle of the segment, 788 * have to make it into two segments. nseg is made for 789 * the high end while seg is cut down at the low end. 790 */ 791 nbase = addr + len; /* new seg base */ 792 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 793 seg->s_size = addr - seg->s_base; /* shrink old seg */ 794 nseg = seg_alloc(seg->s_as, nbase, nsize); 795 if (nseg == NULL) 796 panic("segdev_unmap seg_alloc"); 797 798 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2, 799 "segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg); 800 DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n", 801 (void *)seg, (void *)nseg)); 802 nsdp = sdp_alloc(); 803 804 nseg->s_ops = seg->s_ops; 805 nseg->s_data = (void *)nsdp; 806 807 VN_HOLD(sdp->vp); 808 nsdp->mapfunc = sdp->mapfunc; 809 nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base); 810 nsdp->vp = sdp->vp; 811 nsdp->pageprot = sdp->pageprot; 812 nsdp->prot = sdp->prot; 813 nsdp->maxprot = sdp->maxprot; 814 nsdp->type = sdp->type; 815 nsdp->hat_attr = sdp->hat_attr; 816 nsdp->hat_flags = sdp->hat_flags; 817 nsdp->softlockcnt = 0; 818 819 /* 820 * Initialize per page data if the segment we are 821 * dup'ing has per page information. 822 */ 823 if (sdp->vpage != NULL) { 824 /* need to split vpage into two arrays */ 825 register size_t nnbytes; 826 register size_t nnpages; 827 register struct vpage *ovpage; 828 829 ovpage = sdp->vpage; /* keep pointer to vpage */ 830 831 npages = seg_pages(seg); /* seg has shrunk */ 832 nbytes = vpgtob(npages); 833 nnpages = seg_pages(nseg); 834 nnbytes = vpgtob(nnpages); 835 836 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP); 837 bcopy(ovpage, sdp->vpage, nbytes); 838 839 nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP); 840 bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes); 841 842 /* free up old vpage */ 843 kmem_free(ovpage, vpgtob(opages)); 844 } else 845 nsdp->vpage = NULL; 846 847 /* 848 * unmap dhps. 849 */ 850 if (dhp == NULL) { 851 nsdp->devmap_data = NULL; 852 return (0); 853 } 854 while (dhp != NULL) { 855 callbackops = &dhp->dh_callbackops; 856 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3, 857 "segdev_unmap: dhp=%p addr=%p", dhp, addr); 858 DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n", 859 (void *)dhp, (void *)addr, 860 (void *)dhp->dh_uvaddr, dhp->dh_len)); 861 862 if (addr == (dhp->dh_uvaddr + dhp->dh_len)) { 863 dhpp = dhp->dh_next; 864 dhp->dh_next = NULL; 865 dhp = dhpp; 866 } else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) { 867 dhp = dhp->dh_next; 868 } else if (addr > dhp->dh_uvaddr && 869 (addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) { 870 /* 871 * <addr, addr+len> is enclosed by dhp. 872 * create a newdhp that begins at addr+len and 873 * ends at dhp->dh_uvaddr+dhp->dh_len. 874 */ 875 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP); 876 HOLD_DHP_LOCK(dhp); 877 bcopy(dhp, newdhp, sizeof (devmap_handle_t)); 878 RELE_DHP_LOCK(dhp); 879 newdhp->dh_seg = nseg; 880 newdhp->dh_next = dhp->dh_next; 881 if (dhp->dh_softlock != NULL) 882 newdhp->dh_softlock = devmap_softlock_init( 883 newdhp->dh_dev, 884 (ulong_t)callbackops->devmap_access); 885 if (dhp->dh_ctx != NULL) 886 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev, 887 (ulong_t)callbackops->devmap_access); 888 if (newdhp->dh_flags & DEVMAP_LOCK_INITED) { 889 mutex_init(&newdhp->dh_lock, 890 NULL, MUTEX_DEFAULT, NULL); 891 } 892 if (callbackops->devmap_unmap != NULL) 893 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, 894 off, len, dhp, &dhp->dh_pvtp, 895 newdhp, &newdhp->dh_pvtp); 896 mlen = len + (addr - dhp->dh_uvaddr); 897 devmap_handle_reduce_len(newdhp, mlen); 898 nsdp->devmap_data = newdhp; 899 /* XX Changing len should recalculate LARGE flag */ 900 dhp->dh_len = addr - dhp->dh_uvaddr; 901 dhpp = dhp->dh_next; 902 dhp->dh_next = NULL; 903 dhp = dhpp; 904 } else if ((addr > dhp->dh_uvaddr) && 905 ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) { 906 mlen = dhp->dh_len + dhp->dh_uvaddr - addr; 907 /* 908 * <addr, addr+len> spans over dhps. 909 */ 910 if (callbackops->devmap_unmap != NULL) 911 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, 912 off, mlen, (devmap_cookie_t *)dhp, 913 &dhp->dh_pvtp, NULL, NULL); 914 /* XX Changing len should recalculate LARGE flag */ 915 dhp->dh_len = addr - dhp->dh_uvaddr; 916 dhpp = dhp->dh_next; 917 dhp->dh_next = NULL; 918 dhp = dhpp; 919 nsdp->devmap_data = dhp; 920 } else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) { 921 /* 922 * dhp is enclosed by <addr, addr+len>. 923 */ 924 dhp->dh_seg = nseg; 925 nsdp->devmap_data = dhp; 926 dhp = devmap_handle_unmap(dhp); 927 nsdp->devmap_data = dhp; /* XX redundant? */ 928 } else if (((addr + len) > dhp->dh_uvaddr) && 929 ((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) { 930 mlen = addr + len - dhp->dh_uvaddr; 931 if (callbackops->devmap_unmap != NULL) 932 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, 933 dhp->dh_uoff, mlen, NULL, 934 NULL, dhp, &dhp->dh_pvtp); 935 devmap_handle_reduce_len(dhp, mlen); 936 nsdp->devmap_data = dhp; 937 dhp->dh_seg = nseg; 938 dhp = dhp->dh_next; 939 } else { 940 dhp->dh_seg = nseg; 941 dhp = dhp->dh_next; 942 } 943 } 944 return (0); 945 } 946 947 /* 948 * Utility function handles reducing the length of a devmap handle during unmap 949 * Note that is only used for unmapping the front portion of the handler, 950 * i.e., we are bumping up the offset/pfn etc up by len 951 * Do not use if reducing length at the tail. 952 */ 953 static void 954 devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len) 955 { 956 struct ddi_umem_cookie *cp; 957 struct devmap_pmem_cookie *pcp; 958 /* 959 * adjust devmap handle fields 960 */ 961 ASSERT(len < dhp->dh_len); 962 963 /* Make sure only page-aligned changes are done */ 964 ASSERT((len & PAGEOFFSET) == 0); 965 966 dhp->dh_len -= len; 967 dhp->dh_uoff += (offset_t)len; 968 dhp->dh_roff += (offset_t)len; 969 dhp->dh_uvaddr += len; 970 /* Need to grab dhp lock if REMAP */ 971 HOLD_DHP_LOCK(dhp); 972 cp = dhp->dh_cookie; 973 if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) { 974 if (cookie_is_devmem(cp)) { 975 dhp->dh_pfn += btop(len); 976 } else if (cookie_is_pmem(cp)) { 977 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie; 978 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 && 979 dhp->dh_roff < ptob(pcp->dp_npages)); 980 } else { 981 ASSERT(dhp->dh_roff < cp->size); 982 ASSERT(dhp->dh_cvaddr >= cp->cvaddr && 983 dhp->dh_cvaddr < (cp->cvaddr + cp->size)); 984 ASSERT((dhp->dh_cvaddr + len) <= 985 (cp->cvaddr + cp->size)); 986 987 dhp->dh_cvaddr += len; 988 } 989 } 990 /* XXX - Should recalculate the DEVMAP_FLAG_LARGE after changes */ 991 RELE_DHP_LOCK(dhp); 992 } 993 994 /* 995 * Free devmap handle, dhp. 996 * Return the next devmap handle on the linked list. 997 */ 998 static devmap_handle_t * 999 devmap_handle_unmap(devmap_handle_t *dhp) 1000 { 1001 struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops; 1002 struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data; 1003 devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data; 1004 1005 ASSERT(dhp != NULL); 1006 1007 /* 1008 * before we free up dhp, call the driver's devmap_unmap entry point 1009 * to free resources allocated for this dhp. 1010 */ 1011 if (callbackops->devmap_unmap != NULL) { 1012 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff, 1013 dhp->dh_len, NULL, NULL, NULL, NULL); 1014 } 1015 1016 if (dhpp == dhp) { /* releasing first dhp, change sdp data */ 1017 sdp->devmap_data = dhp->dh_next; 1018 } else { 1019 while (dhpp->dh_next != dhp) { 1020 dhpp = dhpp->dh_next; 1021 } 1022 dhpp->dh_next = dhp->dh_next; 1023 } 1024 dhpp = dhp->dh_next; /* return value is next dhp in chain */ 1025 1026 if (dhp->dh_softlock != NULL) 1027 devmap_softlock_rele(dhp); 1028 1029 if (dhp->dh_ctx != NULL) 1030 devmap_ctx_rele(dhp); 1031 1032 if (dhp->dh_flags & DEVMAP_LOCK_INITED) { 1033 mutex_destroy(&dhp->dh_lock); 1034 } 1035 kmem_free(dhp, sizeof (devmap_handle_t)); 1036 1037 return (dhpp); 1038 } 1039 1040 /* 1041 * Free complete devmap handles from dhp for len bytes 1042 * dhp can be either the first handle or a subsequent handle 1043 */ 1044 static void 1045 devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len) 1046 { 1047 struct devmap_callback_ctl *callbackops; 1048 1049 /* 1050 * free the devmap handles covered by len. 1051 */ 1052 while (len >= dhp->dh_len) { 1053 len -= dhp->dh_len; 1054 dhp = devmap_handle_unmap(dhp); 1055 } 1056 if (len != 0) { /* partial unmap at head of first remaining dhp */ 1057 callbackops = &dhp->dh_callbackops; 1058 1059 /* 1060 * Call the unmap callback so the drivers can make 1061 * adjustment on its private data. 1062 */ 1063 if (callbackops->devmap_unmap != NULL) 1064 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, 1065 dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp); 1066 devmap_handle_reduce_len(dhp, len); 1067 } 1068 } 1069 1070 /* 1071 * Free devmap handles to truncate the mapping after addr 1072 * RFE: Simpler to pass in dhp pointing at correct dhp (avoid find again) 1073 * Also could then use the routine in middle unmap case too 1074 */ 1075 static void 1076 devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr) 1077 { 1078 register struct seg *seg = dhp->dh_seg; 1079 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 1080 register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data; 1081 struct devmap_callback_ctl *callbackops; 1082 register devmap_handle_t *dhpp; 1083 size_t maplen; 1084 ulong_t off; 1085 size_t len; 1086 1087 maplen = (size_t)(addr - dhp->dh_uvaddr); 1088 dhph = devmap_find_handle(dhph, addr); 1089 1090 while (dhph != NULL) { 1091 if (maplen == 0) { 1092 dhph = devmap_handle_unmap(dhph); 1093 } else { 1094 callbackops = &dhph->dh_callbackops; 1095 len = dhph->dh_len - maplen; 1096 off = (ulong_t)sdp->offset + (addr - seg->s_base); 1097 /* 1098 * Call the unmap callback so the driver 1099 * can make adjustments on its private data. 1100 */ 1101 if (callbackops->devmap_unmap != NULL) 1102 (*callbackops->devmap_unmap)(dhph, 1103 dhph->dh_pvtp, off, len, 1104 (devmap_cookie_t *)dhph, 1105 &dhph->dh_pvtp, NULL, NULL); 1106 /* XXX Reducing len needs to recalculate LARGE flag */ 1107 dhph->dh_len = maplen; 1108 maplen = 0; 1109 dhpp = dhph->dh_next; 1110 dhph->dh_next = NULL; 1111 dhph = dhpp; 1112 } 1113 } /* end while */ 1114 } 1115 1116 /* 1117 * Free a segment. 1118 */ 1119 static void 1120 segdev_free(struct seg *seg) 1121 { 1122 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 1123 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data; 1124 1125 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE, 1126 "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg); 1127 DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n", 1128 (void *)dhp, (void *)seg)); 1129 1130 /* 1131 * Since the address space is "write" locked, we 1132 * don't need the segment lock to protect "segdev" data. 1133 */ 1134 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1135 1136 while (dhp != NULL) 1137 dhp = devmap_handle_unmap(dhp); 1138 1139 VN_RELE(sdp->vp); 1140 if (sdp->vpage != NULL) 1141 kmem_free(sdp->vpage, vpgtob(seg_pages(seg))); 1142 1143 rw_destroy(&sdp->lock); 1144 kmem_free(sdp, sizeof (*sdp)); 1145 } 1146 1147 static void 1148 free_devmap_handle(devmap_handle_t *dhp) 1149 { 1150 register devmap_handle_t *dhpp; 1151 1152 /* 1153 * free up devmap handle 1154 */ 1155 while (dhp != NULL) { 1156 dhpp = dhp->dh_next; 1157 if (dhp->dh_flags & DEVMAP_LOCK_INITED) { 1158 mutex_destroy(&dhp->dh_lock); 1159 } 1160 1161 if (dhp->dh_softlock != NULL) 1162 devmap_softlock_rele(dhp); 1163 1164 if (dhp->dh_ctx != NULL) 1165 devmap_ctx_rele(dhp); 1166 1167 kmem_free(dhp, sizeof (devmap_handle_t)); 1168 dhp = dhpp; 1169 } 1170 } 1171 1172 /* 1173 * routines to lock and unlock underlying segkp segment for 1174 * KMEM_PAGEABLE type cookies. 1175 * segkp only allows a single pending F_SOFTLOCK 1176 * we keep track of number of locks in the cookie so we can 1177 * have multiple pending faults and manage the calls to segkp. 1178 * RFE: if segkp supports either pagelock or can support multiple 1179 * calls to F_SOFTLOCK, then these routines can go away. 1180 * If pagelock, segdev_faultpage can fault on a page by page basis 1181 * and simplifies the code quite a bit. 1182 * if multiple calls allowed but not partial ranges, then need for 1183 * cookie->lock and locked count goes away, code can call as_fault directly 1184 */ 1185 static faultcode_t 1186 acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages) 1187 { 1188 int err = 0; 1189 ASSERT(cookie_is_kpmem(cookie)); 1190 /* 1191 * Fault in pages in segkp with F_SOFTLOCK. 1192 * We want to hold the lock until all pages have been loaded. 1193 * segkp only allows single caller to hold SOFTLOCK, so cookie 1194 * holds a count so we dont call into segkp multiple times 1195 */ 1196 mutex_enter(&cookie->lock); 1197 1198 /* 1199 * Check for overflow in locked field 1200 */ 1201 if ((UINT32_MAX - cookie->locked) < npages) { 1202 err = FC_MAKE_ERR(ENOMEM); 1203 } else if (cookie->locked == 0) { 1204 /* First time locking */ 1205 err = as_fault(kas.a_hat, &kas, cookie->cvaddr, 1206 cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE); 1207 } 1208 if (!err) { 1209 cookie->locked += npages; 1210 } 1211 mutex_exit(&cookie->lock); 1212 return (err); 1213 } 1214 1215 static void 1216 release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages) 1217 { 1218 mutex_enter(&cookie->lock); 1219 ASSERT(cookie_is_kpmem(cookie)); 1220 ASSERT(cookie->locked >= npages); 1221 cookie->locked -= (uint_t)npages; 1222 if (cookie->locked == 0) { 1223 /* Last unlock */ 1224 if (as_fault(kas.a_hat, &kas, cookie->cvaddr, 1225 cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE)) 1226 panic("segdev releasing kpmem lock %p", (void *)cookie); 1227 } 1228 mutex_exit(&cookie->lock); 1229 } 1230 1231 /* 1232 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for 1233 * drivers with devmap_access callbacks 1234 * slock->softlocked basically works like a rw lock 1235 * -ve counts => F_SOFTLOCK in progress 1236 * +ve counts => F_INVAL/F_PROT in progress 1237 * We allow only one F_SOFTLOCK at a time 1238 * but can have multiple pending F_INVAL/F_PROT calls 1239 * 1240 * This routine waits using cv_wait_sig so killing processes is more graceful 1241 * Returns EINTR if coming out of this routine due to a signal, 0 otherwise 1242 */ 1243 static int devmap_softlock_enter( 1244 struct devmap_softlock *slock, 1245 size_t npages, 1246 enum fault_type type) 1247 { 1248 if (npages == 0) 1249 return (0); 1250 mutex_enter(&(slock->lock)); 1251 switch (type) { 1252 case F_SOFTLOCK : 1253 while (slock->softlocked) { 1254 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) { 1255 /* signalled */ 1256 mutex_exit(&(slock->lock)); 1257 return (EINTR); 1258 } 1259 } 1260 slock->softlocked -= npages; /* -ve count => locked */ 1261 break; 1262 case F_INVAL : 1263 case F_PROT : 1264 while (slock->softlocked < 0) 1265 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) { 1266 /* signalled */ 1267 mutex_exit(&(slock->lock)); 1268 return (EINTR); 1269 } 1270 slock->softlocked += npages; /* +ve count => f_invals */ 1271 break; 1272 default: 1273 ASSERT(0); 1274 } 1275 mutex_exit(&(slock->lock)); 1276 return (0); 1277 } 1278 1279 static void devmap_softlock_exit( 1280 struct devmap_softlock *slock, 1281 size_t npages, 1282 enum fault_type type) 1283 { 1284 if (slock == NULL) 1285 return; 1286 mutex_enter(&(slock->lock)); 1287 switch (type) { 1288 case F_SOFTLOCK : 1289 ASSERT(-slock->softlocked >= npages); 1290 slock->softlocked += npages; /* -ve count is softlocked */ 1291 if (slock->softlocked == 0) 1292 cv_signal(&slock->cv); 1293 break; 1294 case F_INVAL : 1295 case F_PROT: 1296 ASSERT(slock->softlocked >= npages); 1297 slock->softlocked -= npages; 1298 if (slock->softlocked == 0) 1299 cv_signal(&slock->cv); 1300 break; 1301 default: 1302 ASSERT(0); 1303 } 1304 mutex_exit(&(slock->lock)); 1305 } 1306 1307 /* 1308 * Do a F_SOFTUNLOCK call over the range requested. 1309 * The range must have already been F_SOFTLOCK'ed. 1310 * The segment lock should be held, (but not the segment private lock?) 1311 * The softunlock code below does not adjust for large page sizes 1312 * assumes the caller already did any addr/len adjustments for 1313 * pagesize mappings before calling. 1314 */ 1315 /*ARGSUSED*/ 1316 static void 1317 segdev_softunlock( 1318 struct hat *hat, /* the hat */ 1319 struct seg *seg, /* seg_dev of interest */ 1320 caddr_t addr, /* base address of range */ 1321 size_t len, /* number of bytes */ 1322 enum seg_rw rw) /* type of access at fault */ 1323 { 1324 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 1325 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data; 1326 1327 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK, 1328 "segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx", 1329 dhp_head, sdp, addr, len); 1330 DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx " 1331 "addr %p len %lx\n", 1332 (void *)dhp_head, sdp->softlockcnt, (void *)addr, len)); 1333 1334 hat_unlock(hat, addr, len); 1335 1336 if (dhp_head != NULL) { 1337 devmap_handle_t *dhp; 1338 size_t mlen; 1339 size_t tlen = len; 1340 ulong_t off; 1341 1342 dhp = devmap_find_handle(dhp_head, addr); 1343 ASSERT(dhp != NULL); 1344 1345 off = (ulong_t)(addr - dhp->dh_uvaddr); 1346 while (tlen != 0) { 1347 mlen = MIN(tlen, (dhp->dh_len - off)); 1348 1349 /* 1350 * unlock segkp memory, locked during F_SOFTLOCK 1351 */ 1352 if (dhp_is_kpmem(dhp)) { 1353 release_kpmem_lock( 1354 (struct ddi_umem_cookie *)dhp->dh_cookie, 1355 btopr(mlen)); 1356 } 1357 1358 /* 1359 * Do the softlock accounting for devmap_access 1360 */ 1361 if (dhp->dh_callbackops.devmap_access != NULL) { 1362 devmap_softlock_exit(dhp->dh_softlock, 1363 btopr(mlen), F_SOFTLOCK); 1364 } 1365 1366 tlen -= mlen; 1367 dhp = dhp->dh_next; 1368 off = 0; 1369 } 1370 } 1371 1372 mutex_enter(&freemem_lock); 1373 ASSERT(sdp->softlockcnt >= btopr(len)); 1374 sdp->softlockcnt -= btopr(len); 1375 mutex_exit(&freemem_lock); 1376 if (sdp->softlockcnt == 0) { 1377 /* 1378 * All SOFTLOCKS are gone. Wakeup any waiting 1379 * unmappers so they can try again to unmap. 1380 * Check for waiters first without the mutex 1381 * held so we don't always grab the mutex on 1382 * softunlocks. 1383 */ 1384 if (AS_ISUNMAPWAIT(seg->s_as)) { 1385 mutex_enter(&seg->s_as->a_contents); 1386 if (AS_ISUNMAPWAIT(seg->s_as)) { 1387 AS_CLRUNMAPWAIT(seg->s_as); 1388 cv_broadcast(&seg->s_as->a_cv); 1389 } 1390 mutex_exit(&seg->s_as->a_contents); 1391 } 1392 } 1393 1394 } 1395 1396 /* 1397 * Handle fault for a single page. 1398 * Done in a separate routine so we can handle errors more easily. 1399 * This routine is called only from segdev_faultpages() 1400 * when looping over the range of addresses requested. The segment lock is held. 1401 */ 1402 static faultcode_t 1403 segdev_faultpage( 1404 struct hat *hat, /* the hat */ 1405 struct seg *seg, /* seg_dev of interest */ 1406 caddr_t addr, /* address in as */ 1407 struct vpage *vpage, /* pointer to vpage for seg, addr */ 1408 enum fault_type type, /* type of fault */ 1409 enum seg_rw rw, /* type of access at fault */ 1410 devmap_handle_t *dhp) /* devmap handle if any for this page */ 1411 { 1412 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 1413 uint_t prot; 1414 pfn_t pfnum = PFN_INVALID; 1415 u_offset_t offset; 1416 uint_t hat_flags; 1417 dev_info_t *dip; 1418 1419 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE, 1420 "segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr); 1421 DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n", 1422 (void *)dhp, (void *)seg, (void *)addr)); 1423 1424 /* 1425 * Initialize protection value for this page. 1426 * If we have per page protection values check it now. 1427 */ 1428 if (sdp->pageprot) { 1429 uint_t protchk; 1430 1431 switch (rw) { 1432 case S_READ: 1433 protchk = PROT_READ; 1434 break; 1435 case S_WRITE: 1436 protchk = PROT_WRITE; 1437 break; 1438 case S_EXEC: 1439 protchk = PROT_EXEC; 1440 break; 1441 case S_OTHER: 1442 default: 1443 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 1444 break; 1445 } 1446 1447 prot = VPP_PROT(vpage); 1448 if ((prot & protchk) == 0) 1449 return (FC_PROT); /* illegal access type */ 1450 } else { 1451 prot = sdp->prot; 1452 /* caller has already done segment level protection check */ 1453 } 1454 1455 if (type == F_SOFTLOCK) { 1456 mutex_enter(&freemem_lock); 1457 sdp->softlockcnt++; 1458 mutex_exit(&freemem_lock); 1459 } 1460 1461 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD); 1462 offset = sdp->offset + (u_offset_t)(addr - seg->s_base); 1463 /* 1464 * In the devmap framework, sdp->mapfunc is set to NULL. we can get 1465 * pfnum from dhp->dh_pfn (at beginning of segment) and offset from 1466 * seg->s_base. 1467 */ 1468 if (dhp == NULL) { 1469 /* If segment has devmap_data, then dhp should be non-NULL */ 1470 ASSERT(sdp->devmap_data == NULL); 1471 pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev, 1472 (off_t)offset, prot); 1473 prot |= sdp->hat_attr; 1474 } else { 1475 ulong_t off; 1476 struct ddi_umem_cookie *cp; 1477 struct devmap_pmem_cookie *pcp; 1478 1479 /* ensure the dhp passed in contains addr. */ 1480 ASSERT(dhp == devmap_find_handle( 1481 (devmap_handle_t *)sdp->devmap_data, addr)); 1482 1483 off = addr - dhp->dh_uvaddr; 1484 1485 /* 1486 * This routine assumes that the caller makes sure that the 1487 * fields in dhp used below are unchanged due to remap during 1488 * this call. Caller does HOLD_DHP_LOCK if neeed 1489 */ 1490 cp = dhp->dh_cookie; 1491 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) { 1492 pfnum = PFN_INVALID; 1493 } else if (cookie_is_devmem(cp)) { 1494 pfnum = dhp->dh_pfn + btop(off); 1495 } else if (cookie_is_pmem(cp)) { 1496 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie; 1497 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 && 1498 dhp->dh_roff < ptob(pcp->dp_npages)); 1499 pfnum = page_pptonum( 1500 pcp->dp_pparray[btop(off + dhp->dh_roff)]); 1501 } else { 1502 ASSERT(dhp->dh_roff < cp->size); 1503 ASSERT(dhp->dh_cvaddr >= cp->cvaddr && 1504 dhp->dh_cvaddr < (cp->cvaddr + cp->size)); 1505 ASSERT((dhp->dh_cvaddr + off) <= 1506 (cp->cvaddr + cp->size)); 1507 ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <= 1508 (cp->cvaddr + cp->size)); 1509 1510 switch (cp->type) { 1511 case UMEM_LOCKED : 1512 if (cp->pparray != NULL) { 1513 ASSERT((dhp->dh_roff & 1514 PAGEOFFSET) == 0); 1515 pfnum = page_pptonum( 1516 cp->pparray[btop(off + 1517 dhp->dh_roff)]); 1518 } else { 1519 pfnum = hat_getpfnum( 1520 ((proc_t *)cp->procp)->p_as->a_hat, 1521 cp->cvaddr + off); 1522 } 1523 break; 1524 case UMEM_TRASH : 1525 pfnum = page_pptonum(trashpp); 1526 /* 1527 * We should set hat_flags to HAT_NOFAULT also 1528 * However, not all hat layers implement this 1529 */ 1530 break; 1531 case KMEM_PAGEABLE: 1532 case KMEM_NON_PAGEABLE: 1533 pfnum = hat_getpfnum(kas.a_hat, 1534 dhp->dh_cvaddr + off); 1535 break; 1536 default : 1537 pfnum = PFN_INVALID; 1538 break; 1539 } 1540 } 1541 prot |= dhp->dh_hat_attr; 1542 } 1543 if (pfnum == PFN_INVALID) { 1544 return (FC_MAKE_ERR(EFAULT)); 1545 } 1546 /* prot should already be OR'ed in with hat_attributes if needed */ 1547 1548 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1, 1549 "segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x", 1550 pfnum, pf_is_memory(pfnum), prot, hat_flags); 1551 DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x " 1552 "prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags)); 1553 1554 if (pf_is_memory(pfnum) || (dhp != NULL)) { 1555 /* 1556 * It's not _really_ required here to pass sdp->hat_flags 1557 * to hat_devload even though we do it. 1558 * This is because hat figures it out DEVMEM mappings 1559 * are non-consistent, anyway. 1560 */ 1561 hat_devload(hat, addr, PAGESIZE, pfnum, 1562 prot, hat_flags | sdp->hat_flags); 1563 return (0); 1564 } 1565 1566 /* 1567 * Fall through to the case where devmap is not used and need to call 1568 * up the device tree to set up the mapping 1569 */ 1570 1571 dip = VTOS(VTOCVP(sdp->vp))->s_dip; 1572 ASSERT(dip); 1573 1574 /* 1575 * When calling ddi_map_fault, we do not OR in sdp->hat_attr 1576 * This is because this calls drivers which may not expect 1577 * prot to have any other values than PROT_ALL 1578 * The root nexus driver has a hack to peek into the segment 1579 * structure and then OR in sdp->hat_attr. 1580 * XX In case the bus_ops interfaces are ever revisited 1581 * we need to fix this. prot should include other hat attributes 1582 */ 1583 if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL, 1584 (uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) { 1585 return (FC_MAKE_ERR(EFAULT)); 1586 } 1587 return (0); 1588 } 1589 1590 static faultcode_t 1591 segdev_fault( 1592 struct hat *hat, /* the hat */ 1593 struct seg *seg, /* the seg_dev of interest */ 1594 caddr_t addr, /* the address of the fault */ 1595 size_t len, /* the length of the range */ 1596 enum fault_type type, /* type of fault */ 1597 enum seg_rw rw) /* type of access at fault */ 1598 { 1599 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 1600 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data; 1601 devmap_handle_t *dhp; 1602 struct devmap_softlock *slock = NULL; 1603 ulong_t slpage = 0; 1604 ulong_t off; 1605 caddr_t maddr = addr; 1606 int err; 1607 int err_is_faultcode = 0; 1608 1609 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT, 1610 "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x", 1611 (void *)dhp_head, (void *)seg, (void *)addr, len, type); 1612 DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p " 1613 "addr %p len %lx type %x\n", 1614 (void *)dhp_head, (void *)seg, (void *)addr, len, type)); 1615 1616 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 1617 1618 /* Handle non-devmap case */ 1619 if (dhp_head == NULL) 1620 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL)); 1621 1622 /* Find devmap handle */ 1623 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL) 1624 return (FC_NOMAP); 1625 1626 /* 1627 * The seg_dev driver does not implement copy-on-write, 1628 * and always loads translations with maximal allowed permissions 1629 * but we got an fault trying to access the device. 1630 * Servicing the fault is not going to result in any better result 1631 * RFE: If we want devmap_access callbacks to be involved in F_PROT 1632 * faults, then the code below is written for that 1633 * Pending resolution of the following: 1634 * - determine if the F_INVAL/F_SOFTLOCK syncing 1635 * is needed for F_PROT also or not. The code below assumes it does 1636 * - If driver sees F_PROT and calls devmap_load with same type, 1637 * then segdev_faultpages will fail with FC_PROT anyway, need to 1638 * change that so calls from devmap_load to segdev_faultpages for 1639 * F_PROT type are retagged to F_INVAL. 1640 * RFE: Today we dont have drivers that use devmap and want to handle 1641 * F_PROT calls. The code in segdev_fault* is written to allow 1642 * this case but is not tested. A driver that needs this capability 1643 * should be able to remove the short-circuit case; resolve the 1644 * above issues and "should" work. 1645 */ 1646 if (type == F_PROT) { 1647 return (FC_PROT); 1648 } 1649 1650 /* 1651 * Loop through dhp list calling devmap_access or segdev_faultpages for 1652 * each devmap handle. 1653 * drivers which implement devmap_access can interpose on faults and do 1654 * device-appropriate special actions before calling devmap_load. 1655 */ 1656 1657 /* 1658 * Unfortunately, this simple loop has turned out to expose a variety 1659 * of complex problems which results in the following convoluted code. 1660 * 1661 * First, a desire to handle a serialization of F_SOFTLOCK calls 1662 * to the driver within the framework. 1663 * This results in a dh_softlock structure that is on a per device 1664 * (or device instance) basis and serializes devmap_access calls. 1665 * Ideally we would need to do this for underlying 1666 * memory/device regions that are being faulted on 1667 * but that is hard to identify and with REMAP, harder 1668 * Second, a desire to serialize F_INVAL(and F_PROT) calls w.r.t. 1669 * to F_SOFTLOCK calls to the driver. 1670 * These serializations are to simplify the driver programmer model. 1671 * To support these two features, the code first goes through the 1672 * devmap handles and counts the pages (slpage) that are covered 1673 * by devmap_access callbacks. 1674 * This part ends with a devmap_softlock_enter call 1675 * which allows only one F_SOFTLOCK active on a device instance, 1676 * but multiple F_INVAL/F_PROTs can be active except when a 1677 * F_SOFTLOCK is active 1678 * 1679 * Next, we dont short-circuit the fault code upfront to call 1680 * segdev_softunlock for F_SOFTUNLOCK, because we must use 1681 * the same length when we softlock and softunlock. 1682 * 1683 * -Hat layers may not support softunlocking lengths less than the 1684 * original length when there is large page support. 1685 * -kpmem locking is dependent on keeping the lengths same. 1686 * -if drivers handled F_SOFTLOCK, they probably also expect to 1687 * see an F_SOFTUNLOCK of the same length 1688 * Hence, if extending lengths during softlock, 1689 * softunlock has to make the same adjustments and goes through 1690 * the same loop calling segdev_faultpages/segdev_softunlock 1691 * But some of the synchronization and error handling is different 1692 */ 1693 1694 if (type != F_SOFTUNLOCK) { 1695 devmap_handle_t *dhpp = dhp; 1696 size_t slen = len; 1697 1698 /* 1699 * Calculate count of pages that are : 1700 * a) within the (potentially extended) fault region 1701 * b) AND covered by devmap handle with devmap_access 1702 */ 1703 off = (ulong_t)(addr - dhpp->dh_uvaddr); 1704 while (slen != 0) { 1705 size_t mlen; 1706 1707 /* 1708 * Softlocking on a region that allows remap is 1709 * unsupported due to unresolved locking issues 1710 * XXX: unclear what these are? 1711 * One potential is that if there is a pending 1712 * softlock, then a remap should not be allowed 1713 * until the unlock is done. This is easily 1714 * fixed by returning error in devmap*remap on 1715 * checking the dh->dh_softlock->softlocked value 1716 */ 1717 if ((type == F_SOFTLOCK) && 1718 (dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) { 1719 return (FC_NOSUPPORT); 1720 } 1721 1722 mlen = MIN(slen, (dhpp->dh_len - off)); 1723 if (dhpp->dh_callbackops.devmap_access) { 1724 size_t llen; 1725 caddr_t laddr; 1726 /* 1727 * use extended length for large page mappings 1728 */ 1729 HOLD_DHP_LOCK(dhpp); 1730 if ((sdp->pageprot == 0) && 1731 (dhpp->dh_flags & DEVMAP_FLAG_LARGE)) { 1732 devmap_get_large_pgsize(dhpp, 1733 mlen, maddr, &llen, &laddr); 1734 } else { 1735 llen = mlen; 1736 } 1737 RELE_DHP_LOCK(dhpp); 1738 1739 slpage += btopr(llen); 1740 slock = dhpp->dh_softlock; 1741 } 1742 maddr += mlen; 1743 ASSERT(slen >= mlen); 1744 slen -= mlen; 1745 dhpp = dhpp->dh_next; 1746 off = 0; 1747 } 1748 /* 1749 * synchonize with other faulting threads and wait till safe 1750 * devmap_softlock_enter might return due to signal in cv_wait 1751 * 1752 * devmap_softlock_enter has to be called outside of while loop 1753 * to prevent a deadlock if len spans over multiple dhps. 1754 * dh_softlock is based on device instance and if multiple dhps 1755 * use the same device instance, the second dhp's LOCK call 1756 * will hang waiting on the first to complete. 1757 * devmap_setup verifies that slocks in a dhp_chain are same. 1758 * RFE: this deadlock only hold true for F_SOFTLOCK. For 1759 * F_INVAL/F_PROT, since we now allow multiple in parallel, 1760 * we could have done the softlock_enter inside the loop 1761 * and supported multi-dhp mappings with dissimilar devices 1762 */ 1763 if (err = devmap_softlock_enter(slock, slpage, type)) 1764 return (FC_MAKE_ERR(err)); 1765 } 1766 1767 /* reset 'maddr' to the start addr of the range of fault. */ 1768 maddr = addr; 1769 1770 /* calculate the offset corresponds to 'addr' in the first dhp. */ 1771 off = (ulong_t)(addr - dhp->dh_uvaddr); 1772 1773 /* 1774 * The fault length may span over multiple dhps. 1775 * Loop until the total length is satisfied. 1776 */ 1777 while (len != 0) { 1778 size_t llen; 1779 size_t mlen; 1780 caddr_t laddr; 1781 1782 /* 1783 * mlen is the smaller of 'len' and the length 1784 * from addr to the end of mapping defined by dhp. 1785 */ 1786 mlen = MIN(len, (dhp->dh_len - off)); 1787 1788 HOLD_DHP_LOCK(dhp); 1789 /* 1790 * Pass the extended length and address to devmap_access 1791 * if large pagesize is used for loading address translations. 1792 */ 1793 if ((sdp->pageprot == 0) && 1794 (dhp->dh_flags & DEVMAP_FLAG_LARGE)) { 1795 devmap_get_large_pgsize(dhp, mlen, maddr, 1796 &llen, &laddr); 1797 ASSERT(maddr == addr || laddr == maddr); 1798 } else { 1799 llen = mlen; 1800 laddr = maddr; 1801 } 1802 1803 if (dhp->dh_callbackops.devmap_access != NULL) { 1804 offset_t aoff; 1805 1806 aoff = sdp->offset + (offset_t)(laddr - seg->s_base); 1807 1808 /* 1809 * call driver's devmap_access entry point which will 1810 * call devmap_load/contextmgmt to load the translations 1811 * 1812 * We drop the dhp_lock before calling access so 1813 * drivers can call devmap_*_remap within access 1814 */ 1815 RELE_DHP_LOCK(dhp); 1816 1817 err = (*dhp->dh_callbackops.devmap_access)( 1818 dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw); 1819 } else { 1820 /* 1821 * If no devmap_access entry point, then load mappings 1822 * hold dhp_lock across faultpages if REMAP 1823 */ 1824 err = segdev_faultpages(hat, seg, laddr, llen, 1825 type, rw, dhp); 1826 err_is_faultcode = 1; 1827 RELE_DHP_LOCK(dhp); 1828 } 1829 1830 if (err) { 1831 if ((type == F_SOFTLOCK) && (maddr > addr)) { 1832 /* 1833 * If not first dhp, use 1834 * segdev_fault(F_SOFTUNLOCK) for prior dhps 1835 * While this is recursion, it is incorrect to 1836 * call just segdev_softunlock 1837 * if we are using either large pages 1838 * or devmap_access. It will be more right 1839 * to go through the same loop as above 1840 * rather than call segdev_softunlock directly 1841 * It will use the right lenghths as well as 1842 * call into the driver devmap_access routines. 1843 */ 1844 size_t done = (size_t)(maddr - addr); 1845 (void) segdev_fault(hat, seg, addr, done, 1846 F_SOFTUNLOCK, S_OTHER); 1847 /* 1848 * reduce slpage by number of pages 1849 * released by segdev_softunlock 1850 */ 1851 ASSERT(slpage >= btopr(done)); 1852 devmap_softlock_exit(slock, 1853 slpage - btopr(done), type); 1854 } else { 1855 devmap_softlock_exit(slock, slpage, type); 1856 } 1857 1858 1859 /* 1860 * Segdev_faultpages() already returns a faultcode, 1861 * hence, result from segdev_faultpages() should be 1862 * returned directly. 1863 */ 1864 if (err_is_faultcode) 1865 return (err); 1866 return (FC_MAKE_ERR(err)); 1867 } 1868 1869 maddr += mlen; 1870 ASSERT(len >= mlen); 1871 len -= mlen; 1872 dhp = dhp->dh_next; 1873 off = 0; 1874 1875 ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr); 1876 } 1877 /* 1878 * release the softlock count at end of fault 1879 * For F_SOFTLOCk this is done in the later F_SOFTUNLOCK 1880 */ 1881 if ((type == F_INVAL) || (type == F_PROT)) 1882 devmap_softlock_exit(slock, slpage, type); 1883 return (0); 1884 } 1885 1886 /* 1887 * segdev_faultpages 1888 * 1889 * Used to fault in seg_dev segment pages. Called by segdev_fault or devmap_load 1890 * This routine assumes that the callers makes sure that the fields 1891 * in dhp used below are not changed due to remap during this call. 1892 * Caller does HOLD_DHP_LOCK if neeed 1893 * This routine returns a faultcode_t as a return value for segdev_fault. 1894 */ 1895 static faultcode_t 1896 segdev_faultpages( 1897 struct hat *hat, /* the hat */ 1898 struct seg *seg, /* the seg_dev of interest */ 1899 caddr_t addr, /* the address of the fault */ 1900 size_t len, /* the length of the range */ 1901 enum fault_type type, /* type of fault */ 1902 enum seg_rw rw, /* type of access at fault */ 1903 devmap_handle_t *dhp) /* devmap handle */ 1904 { 1905 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 1906 register caddr_t a; 1907 struct vpage *vpage; 1908 struct ddi_umem_cookie *kpmem_cookie = NULL; 1909 int err; 1910 1911 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES, 1912 "segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx", 1913 (void *)dhp, (void *)seg, (void *)addr, len); 1914 DEBUGF(5, (CE_CONT, "segdev_faultpages: " 1915 "dhp %p seg %p addr %p len %lx\n", 1916 (void *)dhp, (void *)seg, (void *)addr, len)); 1917 1918 /* 1919 * The seg_dev driver does not implement copy-on-write, 1920 * and always loads translations with maximal allowed permissions 1921 * but we got an fault trying to access the device. 1922 * Servicing the fault is not going to result in any better result 1923 * XXX: If we want to allow devmap_access to handle F_PROT calls, 1924 * This code should be removed and let the normal fault handling 1925 * take care of finding the error 1926 */ 1927 if (type == F_PROT) { 1928 return (FC_PROT); 1929 } 1930 1931 if (type == F_SOFTUNLOCK) { 1932 segdev_softunlock(hat, seg, addr, len, rw); 1933 return (0); 1934 } 1935 1936 /* 1937 * For kernel pageable memory, fault/lock segkp pages 1938 * We hold this until the completion of this 1939 * fault (INVAL/PROT) or till unlock (SOFTLOCK). 1940 */ 1941 if ((dhp != NULL) && dhp_is_kpmem(dhp)) { 1942 kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie; 1943 if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len))) 1944 return (err); 1945 } 1946 1947 /* 1948 * If we have the same protections for the entire segment, 1949 * insure that the access being attempted is legitimate. 1950 */ 1951 rw_enter(&sdp->lock, RW_READER); 1952 if (sdp->pageprot == 0) { 1953 uint_t protchk; 1954 1955 switch (rw) { 1956 case S_READ: 1957 protchk = PROT_READ; 1958 break; 1959 case S_WRITE: 1960 protchk = PROT_WRITE; 1961 break; 1962 case S_EXEC: 1963 protchk = PROT_EXEC; 1964 break; 1965 case S_OTHER: 1966 default: 1967 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 1968 break; 1969 } 1970 1971 if ((sdp->prot & protchk) == 0) { 1972 rw_exit(&sdp->lock); 1973 /* undo kpmem locking */ 1974 if (kpmem_cookie != NULL) { 1975 release_kpmem_lock(kpmem_cookie, btopr(len)); 1976 } 1977 return (FC_PROT); /* illegal access type */ 1978 } 1979 } 1980 1981 /* 1982 * we do a single hat_devload for the range if 1983 * - devmap framework (dhp is not NULL), 1984 * - pageprot == 0, i.e., no per-page protection set and 1985 * - is device pages, irrespective of whether we are using large pages 1986 */ 1987 if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) { 1988 pfn_t pfnum; 1989 uint_t hat_flags; 1990 1991 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) { 1992 rw_exit(&sdp->lock); 1993 return (FC_NOMAP); 1994 } 1995 1996 if (type == F_SOFTLOCK) { 1997 mutex_enter(&freemem_lock); 1998 sdp->softlockcnt += btopr(len); 1999 mutex_exit(&freemem_lock); 2000 } 2001 2002 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD); 2003 pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr)); 2004 ASSERT(!pf_is_memory(pfnum)); 2005 2006 hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr, 2007 hat_flags | sdp->hat_flags); 2008 rw_exit(&sdp->lock); 2009 return (0); 2010 } 2011 2012 /* Handle cases where we have to loop through fault handling per-page */ 2013 2014 if (sdp->vpage == NULL) 2015 vpage = NULL; 2016 else 2017 vpage = &sdp->vpage[seg_page(seg, addr)]; 2018 2019 /* loop over the address range handling each fault */ 2020 for (a = addr; a < addr + len; a += PAGESIZE) { 2021 if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) { 2022 break; 2023 } 2024 if (vpage != NULL) 2025 vpage++; 2026 } 2027 rw_exit(&sdp->lock); 2028 if (err && (type == F_SOFTLOCK)) { /* error handling for F_SOFTLOCK */ 2029 size_t done = (size_t)(a - addr); /* pages fault successfully */ 2030 if (done > 0) { 2031 /* use softunlock for those pages */ 2032 segdev_softunlock(hat, seg, addr, done, S_OTHER); 2033 } 2034 if (kpmem_cookie != NULL) { 2035 /* release kpmem lock for rest of pages */ 2036 ASSERT(len >= done); 2037 release_kpmem_lock(kpmem_cookie, btopr(len - done)); 2038 } 2039 } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) { 2040 /* for non-SOFTLOCK cases, release kpmem */ 2041 release_kpmem_lock(kpmem_cookie, btopr(len)); 2042 } 2043 return (err); 2044 } 2045 2046 /* 2047 * Asynchronous page fault. We simply do nothing since this 2048 * entry point is not supposed to load up the translation. 2049 */ 2050 /*ARGSUSED*/ 2051 static faultcode_t 2052 segdev_faulta(struct seg *seg, caddr_t addr) 2053 { 2054 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA, 2055 "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr); 2056 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2057 2058 return (0); 2059 } 2060 2061 static int 2062 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 2063 { 2064 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 2065 register devmap_handle_t *dhp; 2066 register struct vpage *vp, *evp; 2067 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data; 2068 ulong_t off; 2069 size_t mlen, sz; 2070 2071 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT, 2072 "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x", 2073 (void *)seg, (void *)addr, len, prot); 2074 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2075 2076 if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) { 2077 /* 2078 * Fail the setprot if pages are SOFTLOCKed through this 2079 * mapping. 2080 * Softlockcnt is protected from change by the as read lock. 2081 */ 2082 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1, 2083 "segdev_setprot:error softlockcnt=%lx", sz); 2084 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz)); 2085 return (EAGAIN); 2086 } 2087 2088 if (dhp_head != NULL) { 2089 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL) 2090 return (EINVAL); 2091 2092 /* 2093 * check if violate maxprot. 2094 */ 2095 off = (ulong_t)(addr - dhp->dh_uvaddr); 2096 mlen = len; 2097 while (dhp) { 2098 if ((dhp->dh_maxprot & prot) != prot) 2099 return (EACCES); /* violated maxprot */ 2100 2101 if (mlen > (dhp->dh_len - off)) { 2102 mlen -= dhp->dh_len - off; 2103 dhp = dhp->dh_next; 2104 off = 0; 2105 } else 2106 break; 2107 } 2108 } else { 2109 if ((sdp->maxprot & prot) != prot) 2110 return (EACCES); 2111 } 2112 2113 rw_enter(&sdp->lock, RW_WRITER); 2114 if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) { 2115 if (sdp->prot == prot) { 2116 rw_exit(&sdp->lock); 2117 return (0); /* all done */ 2118 } 2119 sdp->prot = (uchar_t)prot; 2120 } else { 2121 sdp->pageprot = 1; 2122 if (sdp->vpage == NULL) { 2123 /* 2124 * First time through setting per page permissions, 2125 * initialize all the vpage structures to prot 2126 */ 2127 sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)), 2128 KM_SLEEP); 2129 evp = &sdp->vpage[seg_pages(seg)]; 2130 for (vp = sdp->vpage; vp < evp; vp++) 2131 VPP_SETPROT(vp, sdp->prot); 2132 } 2133 /* 2134 * Now go change the needed vpages protections. 2135 */ 2136 evp = &sdp->vpage[seg_page(seg, addr + len)]; 2137 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) 2138 VPP_SETPROT(vp, prot); 2139 } 2140 rw_exit(&sdp->lock); 2141 2142 if (dhp_head != NULL) { 2143 devmap_handle_t *tdhp; 2144 /* 2145 * If large page size was used in hat_devload(), 2146 * the same page size must be used in hat_unload(). 2147 */ 2148 dhp = tdhp = devmap_find_handle(dhp_head, addr); 2149 while (tdhp != NULL) { 2150 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) { 2151 break; 2152 } 2153 tdhp = tdhp->dh_next; 2154 } 2155 if (tdhp) { 2156 size_t slen = len; 2157 size_t mlen; 2158 size_t soff; 2159 2160 soff = (ulong_t)(addr - dhp->dh_uvaddr); 2161 while (slen != 0) { 2162 mlen = MIN(slen, (dhp->dh_len - soff)); 2163 hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr, 2164 dhp->dh_len, HAT_UNLOAD); 2165 dhp = dhp->dh_next; 2166 ASSERT(slen >= mlen); 2167 slen -= mlen; 2168 soff = 0; 2169 } 2170 return (0); 2171 } 2172 } 2173 2174 if ((prot & ~PROT_USER) == PROT_NONE) { 2175 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 2176 } else { 2177 /* 2178 * RFE: the segment should keep track of all attributes 2179 * allowing us to remove the deprecated hat_chgprot 2180 * and use hat_chgattr. 2181 */ 2182 hat_chgprot(seg->s_as->a_hat, addr, len, prot); 2183 } 2184 2185 return (0); 2186 } 2187 2188 static int 2189 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 2190 { 2191 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 2192 struct vpage *vp, *evp; 2193 2194 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT, 2195 "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x", 2196 (void *)seg, (void *)addr, len, prot); 2197 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2198 2199 /* 2200 * If segment protection can be used, simply check against them 2201 */ 2202 rw_enter(&sdp->lock, RW_READER); 2203 if (sdp->pageprot == 0) { 2204 register int err; 2205 2206 err = ((sdp->prot & prot) != prot) ? EACCES : 0; 2207 rw_exit(&sdp->lock); 2208 return (err); 2209 } 2210 2211 /* 2212 * Have to check down to the vpage level 2213 */ 2214 evp = &sdp->vpage[seg_page(seg, addr + len)]; 2215 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 2216 if ((VPP_PROT(vp) & prot) != prot) { 2217 rw_exit(&sdp->lock); 2218 return (EACCES); 2219 } 2220 } 2221 rw_exit(&sdp->lock); 2222 return (0); 2223 } 2224 2225 static int 2226 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 2227 { 2228 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 2229 size_t pgno; 2230 2231 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT, 2232 "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p", 2233 (void *)seg, (void *)addr, len, (void *)protv); 2234 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2235 2236 pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 2237 if (pgno != 0) { 2238 rw_enter(&sdp->lock, RW_READER); 2239 if (sdp->pageprot == 0) { 2240 do { 2241 protv[--pgno] = sdp->prot; 2242 } while (pgno != 0); 2243 } else { 2244 size_t pgoff = seg_page(seg, addr); 2245 2246 do { 2247 pgno--; 2248 protv[pgno] = 2249 VPP_PROT(&sdp->vpage[pgno + pgoff]); 2250 } while (pgno != 0); 2251 } 2252 rw_exit(&sdp->lock); 2253 } 2254 return (0); 2255 } 2256 2257 static u_offset_t 2258 segdev_getoffset(register struct seg *seg, caddr_t addr) 2259 { 2260 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 2261 2262 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET, 2263 "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr); 2264 2265 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2266 2267 return ((u_offset_t)sdp->offset + (addr - seg->s_base)); 2268 } 2269 2270 /*ARGSUSED*/ 2271 static int 2272 segdev_gettype(register struct seg *seg, caddr_t addr) 2273 { 2274 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 2275 2276 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE, 2277 "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr); 2278 2279 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2280 2281 return (sdp->type); 2282 } 2283 2284 2285 /*ARGSUSED*/ 2286 static int 2287 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp) 2288 { 2289 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 2290 2291 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP, 2292 "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr); 2293 2294 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2295 2296 /* 2297 * Note that this vp is the common_vp of the device, where the 2298 * pages are hung .. 2299 */ 2300 *vpp = VTOCVP(sdp->vp); 2301 2302 return (0); 2303 } 2304 2305 static void 2306 segdev_badop(void) 2307 { 2308 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP, 2309 "segdev_badop:start"); 2310 panic("segdev_badop"); 2311 /*NOTREACHED*/ 2312 } 2313 2314 /* 2315 * segdev pages are not in the cache, and thus can't really be controlled. 2316 * Hence, syncs are simply always successful. 2317 */ 2318 /*ARGSUSED*/ 2319 static int 2320 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 2321 { 2322 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start"); 2323 2324 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2325 2326 return (0); 2327 } 2328 2329 /* 2330 * segdev pages are always "in core". 2331 */ 2332 /*ARGSUSED*/ 2333 static size_t 2334 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 2335 { 2336 size_t v = 0; 2337 2338 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start"); 2339 2340 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2341 2342 for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE, 2343 v += PAGESIZE) 2344 *vec++ = 1; 2345 return (v); 2346 } 2347 2348 /* 2349 * segdev pages are not in the cache, and thus can't really be controlled. 2350 * Hence, locks are simply always successful. 2351 */ 2352 /*ARGSUSED*/ 2353 static int 2354 segdev_lockop(struct seg *seg, caddr_t addr, 2355 size_t len, int attr, int op, ulong_t *lockmap, size_t pos) 2356 { 2357 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start"); 2358 2359 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2360 2361 return (0); 2362 } 2363 2364 /* 2365 * segdev pages are not in the cache, and thus can't really be controlled. 2366 * Hence, advise is simply always successful. 2367 */ 2368 /*ARGSUSED*/ 2369 static int 2370 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 2371 { 2372 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start"); 2373 2374 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2375 2376 return (0); 2377 } 2378 2379 /* 2380 * segdev pages are not dumped, so we just return 2381 */ 2382 /*ARGSUSED*/ 2383 static void 2384 segdev_dump(struct seg *seg) 2385 {} 2386 2387 /* 2388 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes 2389 * for a segment. Called from a drivers segmap(9E) 2390 * routine. 2391 */ 2392 /*ARGSUSED*/ 2393 int 2394 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp, 2395 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred, 2396 ddi_device_acc_attr_t *accattrp, uint_t rnumber) 2397 { 2398 struct segdev_crargs dev_a; 2399 int (*mapfunc)(dev_t dev, off_t off, int prot); 2400 uint_t hat_attr; 2401 pfn_t pfn; 2402 int error, i; 2403 2404 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP, 2405 "ddi_segmap_setup:start"); 2406 2407 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev) 2408 return (ENODEV); 2409 2410 /* 2411 * Character devices that support the d_mmap 2412 * interface can only be mmap'ed shared. 2413 */ 2414 if ((flags & MAP_TYPE) != MAP_SHARED) 2415 return (EINVAL); 2416 2417 /* 2418 * Check that this region is indeed mappable on this platform. 2419 * Use the mapping function. 2420 */ 2421 if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1) 2422 return (ENXIO); 2423 2424 /* 2425 * Check to ensure that the entire range is 2426 * legal and we are not trying to map in 2427 * more than the device will let us. 2428 */ 2429 for (i = 0; i < len; i += PAGESIZE) { 2430 if (i == 0) { 2431 /* 2432 * Save the pfn at offset here. This pfn will be 2433 * used later to get user address. 2434 */ 2435 if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset, 2436 maxprot)) == PFN_INVALID) 2437 return (ENXIO); 2438 } else { 2439 if (cdev_mmap(mapfunc, dev, offset + i, maxprot) == 2440 PFN_INVALID) 2441 return (ENXIO); 2442 } 2443 } 2444 2445 as_rangelock(as); 2446 /* Pick an address w/o worrying about any vac alignment constraints. */ 2447 error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags); 2448 if (error != 0) { 2449 as_rangeunlock(as); 2450 return (error); 2451 } 2452 2453 dev_a.mapfunc = mapfunc; 2454 dev_a.dev = dev; 2455 dev_a.offset = (offset_t)offset; 2456 dev_a.type = flags & MAP_TYPE; 2457 dev_a.prot = (uchar_t)prot; 2458 dev_a.maxprot = (uchar_t)maxprot; 2459 dev_a.hat_attr = hat_attr; 2460 dev_a.hat_flags = 0; 2461 dev_a.devmap_data = NULL; 2462 2463 error = as_map(as, *addrp, len, segdev_create, &dev_a); 2464 as_rangeunlock(as); 2465 return (error); 2466 2467 } 2468 2469 /*ARGSUSED*/ 2470 static int 2471 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len, 2472 struct page ***ppp, enum lock_type type, enum seg_rw rw) 2473 { 2474 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK, 2475 "segdev_pagelock:start"); 2476 return (ENOTSUP); 2477 } 2478 2479 /*ARGSUSED*/ 2480 static int 2481 segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len, 2482 uint_t szc) 2483 { 2484 return (ENOTSUP); 2485 } 2486 2487 /* 2488 * devmap_device: Used by devmap framework to establish mapping 2489 * called by devmap_seup(9F) during map setup time. 2490 */ 2491 /*ARGSUSED*/ 2492 static int 2493 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr, 2494 offset_t off, size_t len, uint_t flags) 2495 { 2496 devmap_handle_t *rdhp, *maxdhp; 2497 struct segdev_crargs dev_a; 2498 int err; 2499 uint_t maxprot = PROT_ALL; 2500 offset_t offset = 0; 2501 pfn_t pfn; 2502 struct devmap_pmem_cookie *pcp; 2503 2504 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE, 2505 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx", 2506 (void *)dhp, (void *)addr, off, len); 2507 2508 DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n", 2509 (void *)dhp, (void *)addr, off, len)); 2510 2511 as_rangelock(as); 2512 if ((flags & MAP_FIXED) == 0) { 2513 offset_t aligned_off; 2514 2515 rdhp = maxdhp = dhp; 2516 while (rdhp != NULL) { 2517 maxdhp = (maxdhp->dh_len > rdhp->dh_len) ? 2518 maxdhp : rdhp; 2519 rdhp = rdhp->dh_next; 2520 maxprot |= dhp->dh_maxprot; 2521 } 2522 offset = maxdhp->dh_uoff - dhp->dh_uoff; 2523 2524 /* 2525 * Use the dhp that has the 2526 * largest len to get user address. 2527 */ 2528 /* 2529 * If MAPPING_INVALID, cannot use dh_pfn/dh_cvaddr, 2530 * use 0 which is as good as any other. 2531 */ 2532 if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) { 2533 aligned_off = (offset_t)0; 2534 } else if (dhp_is_devmem(maxdhp)) { 2535 aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset; 2536 } else if (dhp_is_pmem(maxdhp)) { 2537 pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie; 2538 pfn = page_pptonum( 2539 pcp->dp_pparray[btop(maxdhp->dh_roff)]); 2540 aligned_off = (offset_t)ptob(pfn) - offset; 2541 } else { 2542 aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr - 2543 offset; 2544 } 2545 2546 /* 2547 * Pick an address aligned to dh_cookie. 2548 * for kernel memory/user memory, cookie is cvaddr. 2549 * for device memory, cookie is physical address. 2550 */ 2551 map_addr(addr, len, aligned_off, 1, flags); 2552 if (*addr == NULL) { 2553 as_rangeunlock(as); 2554 return (ENOMEM); 2555 } 2556 } else { 2557 /* 2558 * User-specified address; blow away any previous mappings. 2559 */ 2560 (void) as_unmap(as, *addr, len); 2561 } 2562 2563 dev_a.mapfunc = NULL; 2564 dev_a.dev = dhp->dh_dev; 2565 dev_a.type = flags & MAP_TYPE; 2566 dev_a.offset = off; 2567 /* 2568 * sdp->maxprot has the least restrict protection of all dhps. 2569 */ 2570 dev_a.maxprot = maxprot; 2571 dev_a.prot = dhp->dh_prot; 2572 /* 2573 * devmap uses dhp->dh_hat_attr for hat. 2574 */ 2575 dev_a.hat_flags = 0; 2576 dev_a.hat_attr = 0; 2577 dev_a.devmap_data = (void *)dhp; 2578 2579 err = as_map(as, *addr, len, segdev_create, &dev_a); 2580 as_rangeunlock(as); 2581 return (err); 2582 } 2583 2584 int 2585 devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len, 2586 uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t, 2587 size_t, uint_t, uint_t)) 2588 { 2589 register devmap_handle_t *dhp = (devmap_handle_t *)dhc; 2590 struct devmap_ctx *devctx; 2591 int do_timeout = 0; 2592 int ret; 2593 2594 #ifdef lint 2595 pvtp = pvtp; 2596 #endif 2597 2598 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT, 2599 "devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx", 2600 (void *)dhp, off, len); 2601 DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n", 2602 (void *)dhp, off, len)); 2603 2604 if (ctxmgt == NULL) 2605 return (FC_HWERR); 2606 2607 devctx = dhp->dh_ctx; 2608 2609 /* 2610 * If we are on an MP system with more than one cpu running 2611 * and if a thread on some CPU already has the context, wait 2612 * for it to finish if there is a hysteresis timeout. 2613 * 2614 * We call cv_wait() instead of cv_wait_sig() because 2615 * it does not matter much if it returned due to a signal 2616 * or due to a cv_signal() or cv_broadcast(). In either event 2617 * we need to complete the mapping otherwise the processes 2618 * will die with a SEGV. 2619 */ 2620 if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) { 2621 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1, 2622 "devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p", 2623 devctx, dhp); 2624 do_timeout = 1; 2625 mutex_enter(&devctx->lock); 2626 while (devctx->oncpu) 2627 cv_wait(&devctx->cv, &devctx->lock); 2628 devctx->oncpu = 1; 2629 mutex_exit(&devctx->lock); 2630 } 2631 2632 /* 2633 * Call the contextmgt callback so that the driver can handle 2634 * the fault. 2635 */ 2636 ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw); 2637 2638 /* 2639 * If devmap_access() returned -1, then there was a hardware 2640 * error so we need to convert the return value to something 2641 * that trap() will understand. Otherwise, the return value 2642 * is already a fault code generated by devmap_unload() 2643 * or devmap_load(). 2644 */ 2645 if (ret) { 2646 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2, 2647 "devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p", 2648 ret, dhp, devctx); 2649 DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n", 2650 ret, (void *)dhp)); 2651 if (devctx->oncpu) { 2652 mutex_enter(&devctx->lock); 2653 devctx->oncpu = 0; 2654 cv_signal(&devctx->cv); 2655 mutex_exit(&devctx->lock); 2656 } 2657 return (FC_HWERR); 2658 } 2659 2660 /* 2661 * Setup the timeout if we need to 2662 */ 2663 if (do_timeout) { 2664 mutex_enter(&devctx->lock); 2665 if (dhp->dh_timeout_length > 0) { 2666 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3, 2667 "devmap_do_ctxmgt:timeout set"); 2668 devctx->timeout = timeout(devmap_ctxto, 2669 devctx, dhp->dh_timeout_length); 2670 } else { 2671 /* 2672 * We don't want to wait so set oncpu to 2673 * 0 and wake up anyone waiting. 2674 */ 2675 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4, 2676 "devmap_do_ctxmgt:timeout not set"); 2677 devctx->oncpu = 0; 2678 cv_signal(&devctx->cv); 2679 } 2680 mutex_exit(&devctx->lock); 2681 } 2682 2683 return (DDI_SUCCESS); 2684 } 2685 2686 /* 2687 * end of mapping 2688 * poff fault_offset | 2689 * base | | | 2690 * | | | | 2691 * V V V V 2692 * +-----------+---------------+-------+---------+-------+ 2693 * ^ ^ ^ ^ 2694 * |<--- offset--->|<-len->| | 2695 * |<--- dh_len(size of mapping) --->| 2696 * |<-- pg -->| 2697 * -->|rlen|<-- 2698 */ 2699 static ulong_t 2700 devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len, 2701 ulong_t *opfn, ulong_t *pagesize) 2702 { 2703 register int level; 2704 ulong_t pg; 2705 ulong_t poff; 2706 ulong_t base; 2707 caddr_t uvaddr; 2708 long rlen; 2709 2710 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP, 2711 "devmap_roundup:start dhp=%p off=%lx len=%lx", 2712 (void *)dhp, offset, len); 2713 DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n", 2714 (void *)dhp, offset, len)); 2715 2716 /* 2717 * get the max. pagesize that is aligned within the range 2718 * <dh_pfn, dh_pfn+offset>. 2719 * 2720 * The calculations below use physical address to ddetermine 2721 * the page size to use. The same calculations can use the 2722 * virtual address to determine the page size. 2723 */ 2724 base = (ulong_t)ptob(dhp->dh_pfn); 2725 for (level = dhp->dh_mmulevel; level >= 0; level--) { 2726 pg = page_get_pagesize(level); 2727 poff = ((base + offset) & ~(pg - 1)); 2728 uvaddr = dhp->dh_uvaddr + (poff - base); 2729 if ((poff >= base) && 2730 ((poff + pg) <= (base + dhp->dh_len)) && 2731 VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg)) 2732 break; 2733 } 2734 2735 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1, 2736 "devmap_roundup: base=%lx poff=%lx dhp=%p", 2737 base, poff, dhp); 2738 DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n", 2739 base, poff, dhp->dh_pfn)); 2740 2741 ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg)); 2742 ASSERT(level >= 0); 2743 2744 *pagesize = pg; 2745 *opfn = dhp->dh_pfn + btop(poff - base); 2746 2747 rlen = len + offset - (poff - base + pg); 2748 2749 ASSERT(rlen < (long)len); 2750 2751 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2, 2752 "devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p", 2753 (void *)dhp, level, rlen, pagesize, opfn); 2754 DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p " 2755 "level %x rlen %lx psize %lx opfn %lx\n", 2756 (void *)dhp, level, rlen, *pagesize, *opfn)); 2757 2758 return ((ulong_t)((rlen > 0) ? rlen : 0)); 2759 } 2760 2761 /* 2762 * find the dhp that contains addr. 2763 */ 2764 static devmap_handle_t * 2765 devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr) 2766 { 2767 devmap_handle_t *dhp; 2768 2769 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE, 2770 "devmap_find_handle:start"); 2771 2772 dhp = dhp_head; 2773 while (dhp) { 2774 if (addr >= dhp->dh_uvaddr && 2775 addr < (dhp->dh_uvaddr + dhp->dh_len)) 2776 return (dhp); 2777 dhp = dhp->dh_next; 2778 } 2779 2780 return ((devmap_handle_t *)NULL); 2781 } 2782 2783 /* 2784 * devmap_unload: 2785 * Marks a segdev segment or pages if offset->offset+len 2786 * is not the entire segment as intercept and unloads the 2787 * pages in the range offset -> offset+len. 2788 */ 2789 int 2790 devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len) 2791 { 2792 register devmap_handle_t *dhp = (devmap_handle_t *)dhc; 2793 caddr_t addr; 2794 ulong_t size; 2795 ssize_t soff; 2796 2797 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD, 2798 "devmap_unload:start dhp=%p offset=%llx len=%lx", 2799 (void *)dhp, offset, len); 2800 DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n", 2801 (void *)dhp, offset, len)); 2802 2803 soff = (ssize_t)(offset - dhp->dh_uoff); 2804 soff = round_down_p2(soff, PAGESIZE); 2805 if (soff < 0 || soff >= dhp->dh_len) 2806 return (FC_MAKE_ERR(EINVAL)); 2807 2808 /* 2809 * Address and size must be page aligned. Len is set to the 2810 * number of bytes in the number of pages that are required to 2811 * support len. Offset is set to the byte offset of the first byte 2812 * of the page that contains offset. 2813 */ 2814 len = round_up_p2(len, PAGESIZE); 2815 2816 /* 2817 * If len is == 0, then calculate the size by getting 2818 * the number of bytes from offset to the end of the segment. 2819 */ 2820 if (len == 0) 2821 size = dhp->dh_len - soff; 2822 else { 2823 size = len; 2824 if ((soff + size) > dhp->dh_len) 2825 return (FC_MAKE_ERR(EINVAL)); 2826 } 2827 2828 /* 2829 * The address is offset bytes from the base address of 2830 * the dhp. 2831 */ 2832 addr = (caddr_t)(soff + dhp->dh_uvaddr); 2833 2834 /* 2835 * If large page size was used in hat_devload(), 2836 * the same page size must be used in hat_unload(). 2837 */ 2838 if (dhp->dh_flags & DEVMAP_FLAG_LARGE) { 2839 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr, 2840 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER); 2841 } else { 2842 hat_unload(dhp->dh_seg->s_as->a_hat, addr, size, 2843 HAT_UNLOAD|HAT_UNLOAD_OTHER); 2844 } 2845 2846 return (0); 2847 } 2848 2849 /* 2850 * calculates the optimal page size that will be used for hat_devload(). 2851 */ 2852 static void 2853 devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr, 2854 size_t *llen, caddr_t *laddr) 2855 { 2856 ulong_t off; 2857 ulong_t pfn; 2858 ulong_t pgsize; 2859 uint_t first = 1; 2860 2861 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE, 2862 "devmap_get_large_pgsize:start"); 2863 2864 /* 2865 * RFE - Code only supports large page mappings for devmem 2866 * This code could be changed in future if we want to support 2867 * large page mappings for kernel exported memory. 2868 */ 2869 ASSERT(dhp_is_devmem(dhp)); 2870 ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)); 2871 2872 *llen = 0; 2873 off = (ulong_t)(addr - dhp->dh_uvaddr); 2874 while ((long)len > 0) { 2875 /* 2876 * get the optimal pfn to minimize address translations. 2877 * devmap_roundup() returns residue bytes for next round 2878 * calculations. 2879 */ 2880 len = devmap_roundup(dhp, off, len, &pfn, &pgsize); 2881 2882 if (first) { 2883 *laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn); 2884 first = 0; 2885 } 2886 2887 *llen += pgsize; 2888 off = ptob(pfn - dhp->dh_pfn) + pgsize; 2889 } 2890 /* Large page mapping len/addr cover more range than original fault */ 2891 ASSERT(*llen >= len && *laddr <= addr); 2892 ASSERT((*laddr + *llen) >= (addr + len)); 2893 } 2894 2895 /* 2896 * Initialize the devmap_softlock structure. 2897 */ 2898 static struct devmap_softlock * 2899 devmap_softlock_init(dev_t dev, ulong_t id) 2900 { 2901 struct devmap_softlock *slock; 2902 struct devmap_softlock *tmp; 2903 2904 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT, 2905 "devmap_softlock_init:start"); 2906 2907 tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP); 2908 mutex_enter(&devmap_slock); 2909 2910 for (slock = devmap_slist; slock != NULL; slock = slock->next) 2911 if ((slock->dev == dev) && (slock->id == id)) 2912 break; 2913 2914 if (slock == NULL) { 2915 slock = tmp; 2916 slock->dev = dev; 2917 slock->id = id; 2918 mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL); 2919 cv_init(&slock->cv, NULL, CV_DEFAULT, NULL); 2920 slock->next = devmap_slist; 2921 devmap_slist = slock; 2922 } else 2923 kmem_free(tmp, sizeof (struct devmap_softlock)); 2924 2925 mutex_enter(&slock->lock); 2926 slock->refcnt++; 2927 mutex_exit(&slock->lock); 2928 mutex_exit(&devmap_slock); 2929 2930 return (slock); 2931 } 2932 2933 /* 2934 * Wake up processes that sleep on softlocked. 2935 * Free dh_softlock if refcnt is 0. 2936 */ 2937 static void 2938 devmap_softlock_rele(devmap_handle_t *dhp) 2939 { 2940 struct devmap_softlock *slock = dhp->dh_softlock; 2941 struct devmap_softlock *tmp; 2942 struct devmap_softlock *parent; 2943 2944 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE, 2945 "devmap_softlock_rele:start"); 2946 2947 mutex_enter(&devmap_slock); 2948 mutex_enter(&slock->lock); 2949 2950 ASSERT(slock->refcnt > 0); 2951 2952 slock->refcnt--; 2953 2954 /* 2955 * If no one is using the device, free up the slock data. 2956 */ 2957 if (slock->refcnt == 0) { 2958 slock->softlocked = 0; 2959 cv_signal(&slock->cv); 2960 2961 if (devmap_slist == slock) 2962 devmap_slist = slock->next; 2963 else { 2964 parent = devmap_slist; 2965 for (tmp = devmap_slist->next; tmp != NULL; 2966 tmp = tmp->next) { 2967 if (tmp == slock) { 2968 parent->next = tmp->next; 2969 break; 2970 } 2971 parent = tmp; 2972 } 2973 } 2974 mutex_exit(&slock->lock); 2975 mutex_destroy(&slock->lock); 2976 cv_destroy(&slock->cv); 2977 kmem_free(slock, sizeof (struct devmap_softlock)); 2978 } else 2979 mutex_exit(&slock->lock); 2980 2981 mutex_exit(&devmap_slock); 2982 } 2983 2984 /* 2985 * Wake up processes that sleep on dh_ctx->locked. 2986 * Free dh_ctx if refcnt is 0. 2987 */ 2988 static void 2989 devmap_ctx_rele(devmap_handle_t *dhp) 2990 { 2991 struct devmap_ctx *devctx = dhp->dh_ctx; 2992 struct devmap_ctx *tmp; 2993 struct devmap_ctx *parent; 2994 timeout_id_t tid; 2995 2996 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE, 2997 "devmap_ctx_rele:start"); 2998 2999 mutex_enter(&devmapctx_lock); 3000 mutex_enter(&devctx->lock); 3001 3002 ASSERT(devctx->refcnt > 0); 3003 3004 devctx->refcnt--; 3005 3006 /* 3007 * If no one is using the device, free up the devctx data. 3008 */ 3009 if (devctx->refcnt == 0) { 3010 /* 3011 * Untimeout any threads using this mapping as they are about 3012 * to go away. 3013 */ 3014 if (devctx->timeout != 0) { 3015 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1, 3016 "devmap_ctx_rele:untimeout ctx->timeout"); 3017 3018 tid = devctx->timeout; 3019 mutex_exit(&devctx->lock); 3020 (void) untimeout(tid); 3021 mutex_enter(&devctx->lock); 3022 } 3023 3024 devctx->oncpu = 0; 3025 cv_signal(&devctx->cv); 3026 3027 if (devmapctx_list == devctx) 3028 devmapctx_list = devctx->next; 3029 else { 3030 parent = devmapctx_list; 3031 for (tmp = devmapctx_list->next; tmp != NULL; 3032 tmp = tmp->next) { 3033 if (tmp == devctx) { 3034 parent->next = tmp->next; 3035 break; 3036 } 3037 parent = tmp; 3038 } 3039 } 3040 mutex_exit(&devctx->lock); 3041 mutex_destroy(&devctx->lock); 3042 cv_destroy(&devctx->cv); 3043 kmem_free(devctx, sizeof (struct devmap_ctx)); 3044 } else 3045 mutex_exit(&devctx->lock); 3046 3047 mutex_exit(&devmapctx_lock); 3048 } 3049 3050 /* 3051 * devmap_load: 3052 * Marks a segdev segment or pages if offset->offset+len 3053 * is not the entire segment as nointercept and faults in 3054 * the pages in the range offset -> offset+len. 3055 */ 3056 int 3057 devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type, 3058 uint_t rw) 3059 { 3060 devmap_handle_t *dhp = (devmap_handle_t *)dhc; 3061 struct as *asp = dhp->dh_seg->s_as; 3062 caddr_t addr; 3063 ulong_t size; 3064 ssize_t soff; /* offset from the beginning of the segment */ 3065 int rc; 3066 3067 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD, 3068 "devmap_load:start dhp=%p offset=%llx len=%lx", 3069 (void *)dhp, offset, len); 3070 3071 DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n", 3072 (void *)dhp, offset, len)); 3073 3074 /* 3075 * Hat layer only supports devload to process' context for which 3076 * the as lock is held. Verify here and return error if drivers 3077 * inadvertently call devmap_load on a wrong devmap handle. 3078 */ 3079 if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock)) 3080 return (FC_MAKE_ERR(EINVAL)); 3081 3082 soff = (ssize_t)(offset - dhp->dh_uoff); 3083 soff = round_down_p2(soff, PAGESIZE); 3084 if (soff < 0 || soff >= dhp->dh_len) 3085 return (FC_MAKE_ERR(EINVAL)); 3086 3087 /* 3088 * Address and size must be page aligned. Len is set to the 3089 * number of bytes in the number of pages that are required to 3090 * support len. Offset is set to the byte offset of the first byte 3091 * of the page that contains offset. 3092 */ 3093 len = round_up_p2(len, PAGESIZE); 3094 3095 /* 3096 * If len == 0, then calculate the size by getting 3097 * the number of bytes from offset to the end of the segment. 3098 */ 3099 if (len == 0) 3100 size = dhp->dh_len - soff; 3101 else { 3102 size = len; 3103 if ((soff + size) > dhp->dh_len) 3104 return (FC_MAKE_ERR(EINVAL)); 3105 } 3106 3107 /* 3108 * The address is offset bytes from the base address of 3109 * the segment. 3110 */ 3111 addr = (caddr_t)(soff + dhp->dh_uvaddr); 3112 3113 HOLD_DHP_LOCK(dhp); 3114 rc = segdev_faultpages(asp->a_hat, 3115 dhp->dh_seg, addr, size, type, rw, dhp); 3116 RELE_DHP_LOCK(dhp); 3117 return (rc); 3118 } 3119 3120 int 3121 devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp, 3122 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred) 3123 { 3124 register devmap_handle_t *dhp; 3125 int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t, 3126 size_t *, uint_t); 3127 int (*mmap)(dev_t, off_t, int); 3128 struct devmap_callback_ctl *callbackops; 3129 devmap_handle_t *dhp_head = NULL; 3130 devmap_handle_t *dhp_prev = NULL; 3131 devmap_handle_t *dhp_curr; 3132 caddr_t addr; 3133 int map_flag; 3134 int ret; 3135 ulong_t total_len; 3136 size_t map_len; 3137 size_t resid_len = len; 3138 offset_t map_off = off; 3139 struct devmap_softlock *slock = NULL; 3140 3141 #ifdef lint 3142 cred = cred; 3143 #endif 3144 3145 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP, 3146 "devmap_setup:start off=%llx len=%lx", off, len); 3147 DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n", 3148 off, len)); 3149 3150 devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap; 3151 mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap; 3152 3153 /* 3154 * driver must provide devmap(9E) entry point in cb_ops to use the 3155 * devmap framework. 3156 */ 3157 if (devmap == NULL || devmap == nulldev || devmap == nodev) 3158 return (EINVAL); 3159 3160 /* 3161 * To protect from an inadvertent entry because the devmap entry point 3162 * is not NULL, return error if D_DEVMAP bit is not set in cb_flag and 3163 * mmap is NULL. 3164 */ 3165 map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag; 3166 if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev)) 3167 return (EINVAL); 3168 3169 /* 3170 * devmap allows mmap(2) to map multiple registers. 3171 * one devmap_handle is created for each register mapped. 3172 */ 3173 for (total_len = 0; total_len < len; total_len += map_len) { 3174 dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP); 3175 3176 if (dhp_prev != NULL) 3177 dhp_prev->dh_next = dhp; 3178 else 3179 dhp_head = dhp; 3180 dhp_prev = dhp; 3181 3182 dhp->dh_prot = prot; 3183 dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot; 3184 dhp->dh_dev = dev; 3185 dhp->dh_timeout_length = CTX_TIMEOUT_VALUE; 3186 dhp->dh_uoff = map_off; 3187 3188 /* 3189 * Get mapping specific info from 3190 * the driver, such as rnumber, roff, len, callbackops, 3191 * accattrp and, if the mapping is for kernel memory, 3192 * ddi_umem_cookie. 3193 */ 3194 if ((ret = cdev_devmap(dev, dhp, map_off, 3195 resid_len, &map_len, get_udatamodel())) != 0) { 3196 free_devmap_handle(dhp_head); 3197 return (ENXIO); 3198 } 3199 3200 if (map_len & PAGEOFFSET) { 3201 free_devmap_handle(dhp_head); 3202 return (EINVAL); 3203 } 3204 3205 callbackops = &dhp->dh_callbackops; 3206 3207 if ((callbackops->devmap_access == NULL) || 3208 (callbackops->devmap_access == nulldev) || 3209 (callbackops->devmap_access == nodev)) { 3210 /* 3211 * Normally devmap does not support MAP_PRIVATE unless 3212 * the drivers provide a valid devmap_access routine. 3213 */ 3214 if ((flags & MAP_PRIVATE) != 0) { 3215 free_devmap_handle(dhp_head); 3216 return (EINVAL); 3217 } 3218 } else { 3219 /* 3220 * Initialize dhp_softlock and dh_ctx if the drivers 3221 * provide devmap_access. 3222 */ 3223 dhp->dh_softlock = devmap_softlock_init(dev, 3224 (ulong_t)callbackops->devmap_access); 3225 dhp->dh_ctx = devmap_ctxinit(dev, 3226 (ulong_t)callbackops->devmap_access); 3227 3228 /* 3229 * segdev_fault can only work when all 3230 * dh_softlock in a multi-dhp mapping 3231 * are same. see comments in segdev_fault 3232 * This code keeps track of the first 3233 * dh_softlock allocated in slock and 3234 * compares all later allocations and if 3235 * not similar, returns an error. 3236 */ 3237 if (slock == NULL) 3238 slock = dhp->dh_softlock; 3239 if (slock != dhp->dh_softlock) { 3240 free_devmap_handle(dhp_head); 3241 return (ENOTSUP); 3242 } 3243 } 3244 3245 map_off += map_len; 3246 resid_len -= map_len; 3247 } 3248 3249 /* 3250 * get the user virtual address and establish the mapping between 3251 * uvaddr and device physical address. 3252 */ 3253 if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags)) 3254 != 0) { 3255 /* 3256 * free devmap handles if error during the mapping. 3257 */ 3258 free_devmap_handle(dhp_head); 3259 3260 return (ret); 3261 } 3262 3263 /* 3264 * call the driver's devmap_map callback to do more after the mapping, 3265 * such as to allocate driver private data for context management. 3266 */ 3267 dhp = dhp_head; 3268 map_off = off; 3269 addr = *addrp; 3270 while (dhp != NULL) { 3271 callbackops = &dhp->dh_callbackops; 3272 dhp->dh_uvaddr = addr; 3273 dhp_curr = dhp; 3274 if (callbackops->devmap_map != NULL) { 3275 ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp, 3276 dev, flags, map_off, 3277 dhp->dh_len, &dhp->dh_pvtp); 3278 if (ret != 0) { 3279 struct segdev_data *sdp; 3280 3281 /* 3282 * call driver's devmap_unmap entry point 3283 * to free driver resources. 3284 */ 3285 dhp = dhp_head; 3286 map_off = off; 3287 while (dhp != dhp_curr) { 3288 callbackops = &dhp->dh_callbackops; 3289 if (callbackops->devmap_unmap != NULL) { 3290 (*callbackops->devmap_unmap)( 3291 dhp, dhp->dh_pvtp, 3292 map_off, dhp->dh_len, 3293 NULL, NULL, NULL, NULL); 3294 } 3295 map_off += dhp->dh_len; 3296 dhp = dhp->dh_next; 3297 } 3298 sdp = dhp_head->dh_seg->s_data; 3299 sdp->devmap_data = NULL; 3300 free_devmap_handle(dhp_head); 3301 return (ENXIO); 3302 } 3303 } 3304 map_off += dhp->dh_len; 3305 addr += dhp->dh_len; 3306 dhp = dhp->dh_next; 3307 } 3308 3309 return (0); 3310 } 3311 3312 int 3313 ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp, 3314 off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred) 3315 { 3316 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP, 3317 "devmap_segmap:start"); 3318 return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp, 3319 (size_t)len, prot, maxprot, flags, cred)); 3320 } 3321 3322 /* 3323 * Called from devmap_devmem_setup/remap to see if can use large pages for 3324 * this device mapping. 3325 * Also calculate the max. page size for this mapping. 3326 * this page size will be used in fault routine for 3327 * optimal page size calculations. 3328 */ 3329 static void 3330 devmap_devmem_large_page_setup(devmap_handle_t *dhp) 3331 { 3332 ASSERT(dhp_is_devmem(dhp)); 3333 dhp->dh_mmulevel = 0; 3334 3335 /* 3336 * use large page size only if: 3337 * 1. device memory. 3338 * 2. mmu supports multiple page sizes, 3339 * 3. Driver did not disallow it 3340 * 4. dhp length is at least as big as the large pagesize 3341 * 5. the uvaddr and pfn are large pagesize aligned 3342 */ 3343 if (page_num_pagesizes() > 1 && 3344 !(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) { 3345 ulong_t base; 3346 int level; 3347 3348 base = (ulong_t)ptob(dhp->dh_pfn); 3349 for (level = 1; level < page_num_pagesizes(); level++) { 3350 size_t pgsize = page_get_pagesize(level); 3351 if ((dhp->dh_len < pgsize) || 3352 (!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr, 3353 base, pgsize))) { 3354 break; 3355 } 3356 } 3357 dhp->dh_mmulevel = level - 1; 3358 } 3359 if (dhp->dh_mmulevel > 0) { 3360 dhp->dh_flags |= DEVMAP_FLAG_LARGE; 3361 } else { 3362 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE; 3363 } 3364 } 3365 3366 /* 3367 * Called by driver devmap routine to pass device specific info to 3368 * the framework. used for device memory mapping only. 3369 */ 3370 int 3371 devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip, 3372 struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff, 3373 size_t len, uint_t maxprot, uint_t flags, ddi_device_acc_attr_t *accattrp) 3374 { 3375 devmap_handle_t *dhp = (devmap_handle_t *)dhc; 3376 ddi_acc_handle_t handle; 3377 ddi_map_req_t mr; 3378 ddi_acc_hdl_t *hp; 3379 int err; 3380 3381 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP, 3382 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx", 3383 (void *)dhp, roff, rnumber, (uint_t)len); 3384 DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx " 3385 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len)); 3386 3387 /* 3388 * First to check if this function has been called for this dhp. 3389 */ 3390 if (dhp->dh_flags & DEVMAP_SETUP_DONE) 3391 return (DDI_FAILURE); 3392 3393 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot) 3394 return (DDI_FAILURE); 3395 3396 if (flags & DEVMAP_MAPPING_INVALID) { 3397 /* 3398 * Don't go up the tree to get pfn if the driver specifies 3399 * DEVMAP_MAPPING_INVALID in flags. 3400 * 3401 * If DEVMAP_MAPPING_INVALID is specified, we have to grant 3402 * remap permission. 3403 */ 3404 if (!(flags & DEVMAP_ALLOW_REMAP)) { 3405 return (DDI_FAILURE); 3406 } 3407 dhp->dh_pfn = PFN_INVALID; 3408 } else { 3409 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 3410 if (handle == NULL) 3411 return (DDI_FAILURE); 3412 3413 hp = impl_acc_hdl_get(handle); 3414 hp->ah_vers = VERS_ACCHDL; 3415 hp->ah_dip = dip; 3416 hp->ah_rnumber = rnumber; 3417 hp->ah_offset = roff; 3418 hp->ah_len = len; 3419 if (accattrp != NULL) 3420 hp->ah_acc = *accattrp; 3421 3422 mr.map_op = DDI_MO_MAP_LOCKED; 3423 mr.map_type = DDI_MT_RNUMBER; 3424 mr.map_obj.rnumber = rnumber; 3425 mr.map_prot = maxprot & dhp->dh_orig_maxprot; 3426 mr.map_flags = DDI_MF_DEVICE_MAPPING; 3427 mr.map_handlep = hp; 3428 mr.map_vers = DDI_MAP_VERSION; 3429 3430 /* 3431 * up the device tree to get pfn. 3432 * The rootnex_map_regspec() routine in nexus drivers has been 3433 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING. 3434 */ 3435 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn); 3436 dhp->dh_hat_attr = hp->ah_hat_flags; 3437 impl_acc_hdl_free(handle); 3438 3439 if (err) 3440 return (DDI_FAILURE); 3441 } 3442 /* Should not be using devmem setup for memory pages */ 3443 ASSERT(!pf_is_memory(dhp->dh_pfn)); 3444 3445 /* Only some of the flags bits are settable by the driver */ 3446 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS); 3447 dhp->dh_len = ptob(btopr(len)); 3448 3449 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE; 3450 dhp->dh_roff = ptob(btop(roff)); 3451 3452 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */ 3453 devmap_devmem_large_page_setup(dhp); 3454 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot; 3455 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot); 3456 3457 3458 if (callbackops != NULL) { 3459 bcopy(callbackops, &dhp->dh_callbackops, 3460 sizeof (struct devmap_callback_ctl)); 3461 } 3462 3463 /* 3464 * Initialize dh_lock if we want to do remap. 3465 */ 3466 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) { 3467 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL); 3468 dhp->dh_flags |= DEVMAP_LOCK_INITED; 3469 } 3470 3471 dhp->dh_flags |= DEVMAP_SETUP_DONE; 3472 3473 return (DDI_SUCCESS); 3474 } 3475 3476 int 3477 devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip, 3478 uint_t rnumber, offset_t roff, size_t len, uint_t maxprot, 3479 uint_t flags, ddi_device_acc_attr_t *accattrp) 3480 { 3481 devmap_handle_t *dhp = (devmap_handle_t *)dhc; 3482 ddi_acc_handle_t handle; 3483 ddi_map_req_t mr; 3484 ddi_acc_hdl_t *hp; 3485 pfn_t pfn; 3486 uint_t hat_flags; 3487 int err; 3488 3489 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP, 3490 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx", 3491 (void *)dhp, roff, rnumber, (uint_t)len); 3492 DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx " 3493 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len)); 3494 3495 /* 3496 * Return failure if setup has not been done or no remap permission 3497 * has been granted during the setup. 3498 */ 3499 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 || 3500 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0) 3501 return (DDI_FAILURE); 3502 3503 /* Only DEVMAP_MAPPING_INVALID flag supported for remap */ 3504 if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID)) 3505 return (DDI_FAILURE); 3506 3507 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot) 3508 return (DDI_FAILURE); 3509 3510 if (!(flags & DEVMAP_MAPPING_INVALID)) { 3511 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 3512 if (handle == NULL) 3513 return (DDI_FAILURE); 3514 } 3515 3516 HOLD_DHP_LOCK(dhp); 3517 3518 /* 3519 * Unload the old mapping, so next fault will setup the new mappings 3520 * Do this while holding the dhp lock so other faults dont reestablish 3521 * the mappings 3522 */ 3523 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr, 3524 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER); 3525 3526 if (flags & DEVMAP_MAPPING_INVALID) { 3527 dhp->dh_flags |= DEVMAP_MAPPING_INVALID; 3528 dhp->dh_pfn = PFN_INVALID; 3529 } else { 3530 /* clear any prior DEVMAP_MAPPING_INVALID flag */ 3531 dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID; 3532 hp = impl_acc_hdl_get(handle); 3533 hp->ah_vers = VERS_ACCHDL; 3534 hp->ah_dip = dip; 3535 hp->ah_rnumber = rnumber; 3536 hp->ah_offset = roff; 3537 hp->ah_len = len; 3538 if (accattrp != NULL) 3539 hp->ah_acc = *accattrp; 3540 3541 mr.map_op = DDI_MO_MAP_LOCKED; 3542 mr.map_type = DDI_MT_RNUMBER; 3543 mr.map_obj.rnumber = rnumber; 3544 mr.map_prot = maxprot & dhp->dh_orig_maxprot; 3545 mr.map_flags = DDI_MF_DEVICE_MAPPING; 3546 mr.map_handlep = hp; 3547 mr.map_vers = DDI_MAP_VERSION; 3548 3549 /* 3550 * up the device tree to get pfn. 3551 * The rootnex_map_regspec() routine in nexus drivers has been 3552 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING. 3553 */ 3554 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn); 3555 hat_flags = hp->ah_hat_flags; 3556 impl_acc_hdl_free(handle); 3557 if (err) { 3558 RELE_DHP_LOCK(dhp); 3559 return (DDI_FAILURE); 3560 } 3561 /* 3562 * Store result of ddi_map first in local variables, as we do 3563 * not want to overwrite the existing dhp with wrong data. 3564 */ 3565 dhp->dh_pfn = pfn; 3566 dhp->dh_hat_attr = hat_flags; 3567 } 3568 3569 /* clear the large page size flag */ 3570 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE; 3571 3572 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE; 3573 dhp->dh_roff = ptob(btop(roff)); 3574 3575 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */ 3576 devmap_devmem_large_page_setup(dhp); 3577 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot; 3578 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot); 3579 3580 RELE_DHP_LOCK(dhp); 3581 return (DDI_SUCCESS); 3582 } 3583 3584 /* 3585 * called by driver devmap routine to pass kernel virtual address mapping 3586 * info to the framework. used only for kernel memory 3587 * allocated from ddi_umem_alloc(). 3588 */ 3589 int 3590 devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip, 3591 struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie, 3592 offset_t off, size_t len, uint_t maxprot, uint_t flags, 3593 ddi_device_acc_attr_t *accattrp) 3594 { 3595 devmap_handle_t *dhp = (devmap_handle_t *)dhc; 3596 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie; 3597 3598 #ifdef lint 3599 dip = dip; 3600 #endif 3601 3602 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP, 3603 "devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx", 3604 (void *)dhp, off, cookie, len); 3605 DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx " 3606 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len)); 3607 3608 if (cookie == NULL) 3609 return (DDI_FAILURE); 3610 3611 /* For UMEM_TRASH, this restriction is not needed */ 3612 if ((off + len) > cp->size) 3613 return (DDI_FAILURE); 3614 3615 /* check if the cache attributes are supported */ 3616 if (i_ddi_check_cache_attr(flags) == B_FALSE) 3617 return (DDI_FAILURE); 3618 3619 /* 3620 * First to check if this function has been called for this dhp. 3621 */ 3622 if (dhp->dh_flags & DEVMAP_SETUP_DONE) 3623 return (DDI_FAILURE); 3624 3625 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot) 3626 return (DDI_FAILURE); 3627 3628 if (flags & DEVMAP_MAPPING_INVALID) { 3629 /* 3630 * If DEVMAP_MAPPING_INVALID is specified, we have to grant 3631 * remap permission. 3632 */ 3633 if (!(flags & DEVMAP_ALLOW_REMAP)) { 3634 return (DDI_FAILURE); 3635 } 3636 } else { 3637 dhp->dh_cookie = cookie; 3638 dhp->dh_roff = ptob(btop(off)); 3639 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff; 3640 /* set HAT cache attributes */ 3641 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr); 3642 /* set HAT endianess attributes */ 3643 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr); 3644 } 3645 3646 /* 3647 * The default is _not_ to pass HAT_LOAD_NOCONSIST to hat_devload(); 3648 * we pass HAT_LOAD_NOCONSIST _only_ in cases where hat tries to 3649 * create consistent mappings but our intention was to create 3650 * non-consistent mappings. 3651 * 3652 * DEVMEM: hat figures it out it's DEVMEM and creates non-consistent 3653 * mappings. 3654 * 3655 * kernel exported memory: hat figures it out it's memory and always 3656 * creates consistent mappings. 3657 * 3658 * /dev/mem: non-consistent mappings. See comments in common/io/mem.c 3659 * 3660 * /dev/kmem: consistent mappings are created unless they are 3661 * MAP_FIXED. We _explicitly_ tell hat to create non-consistent 3662 * mappings by passing HAT_LOAD_NOCONSIST in case of MAP_FIXED 3663 * mappings of /dev/kmem. See common/io/mem.c 3664 */ 3665 3666 /* Only some of the flags bits are settable by the driver */ 3667 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS); 3668 3669 dhp->dh_len = ptob(btopr(len)); 3670 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot; 3671 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot); 3672 3673 if (callbackops != NULL) { 3674 bcopy(callbackops, &dhp->dh_callbackops, 3675 sizeof (struct devmap_callback_ctl)); 3676 } 3677 /* 3678 * Initialize dh_lock if we want to do remap. 3679 */ 3680 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) { 3681 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL); 3682 dhp->dh_flags |= DEVMAP_LOCK_INITED; 3683 } 3684 3685 dhp->dh_flags |= DEVMAP_SETUP_DONE; 3686 3687 return (DDI_SUCCESS); 3688 } 3689 3690 int 3691 devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip, 3692 ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot, 3693 uint_t flags, ddi_device_acc_attr_t *accattrp) 3694 { 3695 devmap_handle_t *dhp = (devmap_handle_t *)dhc; 3696 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie; 3697 3698 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP, 3699 "devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx", 3700 (void *)dhp, off, cookie, len); 3701 DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx " 3702 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len)); 3703 3704 #ifdef lint 3705 dip = dip; 3706 accattrp = accattrp; 3707 #endif 3708 /* 3709 * Reture failure if setup has not been done or no remap permission 3710 * has been granted during the setup. 3711 */ 3712 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 || 3713 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0) 3714 return (DDI_FAILURE); 3715 3716 /* No flags supported for remap yet */ 3717 if (flags != 0) 3718 return (DDI_FAILURE); 3719 3720 /* check if the cache attributes are supported */ 3721 if (i_ddi_check_cache_attr(flags) == B_FALSE) 3722 return (DDI_FAILURE); 3723 3724 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot) 3725 return (DDI_FAILURE); 3726 3727 /* For UMEM_TRASH, this restriction is not needed */ 3728 if ((off + len) > cp->size) 3729 return (DDI_FAILURE); 3730 3731 HOLD_DHP_LOCK(dhp); 3732 /* 3733 * Unload the old mapping, so next fault will setup the new mappings 3734 * Do this while holding the dhp lock so other faults dont reestablish 3735 * the mappings 3736 */ 3737 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr, 3738 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER); 3739 3740 dhp->dh_cookie = cookie; 3741 dhp->dh_roff = ptob(btop(off)); 3742 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff; 3743 /* set HAT cache attributes */ 3744 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr); 3745 /* set HAT endianess attributes */ 3746 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr); 3747 3748 /* clear the large page size flag */ 3749 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE; 3750 3751 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot; 3752 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot); 3753 RELE_DHP_LOCK(dhp); 3754 return (DDI_SUCCESS); 3755 } 3756 3757 /* 3758 * to set timeout value for the driver's context management callback, e.g. 3759 * devmap_access(). 3760 */ 3761 void 3762 devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks) 3763 { 3764 devmap_handle_t *dhp = (devmap_handle_t *)dhc; 3765 3766 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT, 3767 "devmap_set_ctx_timeout:start dhp=%p ticks=%x", 3768 (void *)dhp, ticks); 3769 dhp->dh_timeout_length = ticks; 3770 } 3771 3772 int 3773 devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off, 3774 size_t len, uint_t type, uint_t rw) 3775 { 3776 #ifdef lint 3777 pvtp = pvtp; 3778 #endif 3779 3780 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS, 3781 "devmap_default_access:start"); 3782 return (devmap_load(dhp, off, len, type, rw)); 3783 } 3784 3785 /* 3786 * segkmem_alloc() wrapper to allocate memory which is both 3787 * non-relocatable (for DR) and sharelocked, since the rest 3788 * of this segment driver requires it. 3789 */ 3790 static void * 3791 devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag) 3792 { 3793 ASSERT(vmp != NULL); 3794 ASSERT(kvseg.s_base != NULL); 3795 vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED); 3796 return (segkmem_alloc(vmp, size, vmflag)); 3797 } 3798 3799 /* 3800 * This is where things are a bit incestuous with seg_kmem: unlike 3801 * seg_kp, seg_kmem does not keep its pages long-term sharelocked, so 3802 * we need to do a bit of a dance around that to prevent duplication of 3803 * code until we decide to bite the bullet and implement a new kernel 3804 * segment for driver-allocated memory that is exported to user space. 3805 */ 3806 static void 3807 devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size) 3808 { 3809 page_t *pp; 3810 caddr_t addr = inaddr; 3811 caddr_t eaddr; 3812 pgcnt_t npages = btopr(size); 3813 3814 ASSERT(vmp != NULL); 3815 ASSERT(kvseg.s_base != NULL); 3816 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 3817 3818 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 3819 3820 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 3821 /* 3822 * Use page_find() instead of page_lookup() to find the page 3823 * since we know that it is hashed and has a shared lock. 3824 */ 3825 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr); 3826 3827 if (pp == NULL) 3828 panic("devmap_free_pages: page not found"); 3829 if (!page_tryupgrade(pp)) { 3830 page_unlock(pp); 3831 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, 3832 SE_EXCL); 3833 if (pp == NULL) 3834 panic("devmap_free_pages: page already freed"); 3835 } 3836 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */ 3837 pp->p_lckcnt = 0; 3838 page_destroy(pp, 0); 3839 } 3840 page_unresv(npages); 3841 3842 if (vmp != NULL) 3843 vmem_free(vmp, inaddr, size); 3844 } 3845 3846 /* 3847 * devmap_umem_alloc_np() replaces kmem_zalloc() as the method for 3848 * allocating non-pageable kmem in response to a ddi_umem_alloc() 3849 * default request. For now we allocate our own pages and we keep 3850 * them long-term sharelocked, since: A) the fault routines expect the 3851 * memory to already be locked; B) pageable umem is already long-term 3852 * locked; C) it's a lot of work to make it otherwise, particularly 3853 * since the nexus layer expects the pages to never fault. An RFE is to 3854 * not keep the pages long-term locked, but instead to be able to 3855 * take faults on them and simply look them up in kvp in case we 3856 * fault on them. Even then, we must take care not to let pageout 3857 * steal them from us since the data must remain resident; if we 3858 * do this we must come up with some way to pin the pages to prevent 3859 * faults while a driver is doing DMA to/from them. 3860 */ 3861 static void * 3862 devmap_umem_alloc_np(size_t size, size_t flags) 3863 { 3864 void *buf; 3865 int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP; 3866 3867 buf = vmem_alloc(umem_np_arena, size, vmflags); 3868 if (buf != NULL) 3869 bzero(buf, size); 3870 return (buf); 3871 } 3872 3873 static void 3874 devmap_umem_free_np(void *addr, size_t size) 3875 { 3876 vmem_free(umem_np_arena, addr, size); 3877 } 3878 3879 /* 3880 * allocate page aligned kernel memory for exporting to user land. 3881 * The devmap framework will use the cookie allocated by ddi_umem_alloc() 3882 * to find a user virtual address that is in same color as the address 3883 * allocated here. 3884 */ 3885 void * 3886 ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie) 3887 { 3888 register size_t len = ptob(btopr(size)); 3889 void *buf = NULL; 3890 struct ddi_umem_cookie *cp; 3891 int iflags = 0; 3892 3893 *cookie = NULL; 3894 3895 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC, 3896 "devmap_umem_alloc:start"); 3897 if (len == 0) 3898 return ((void *)NULL); 3899 3900 /* 3901 * allocate cookie 3902 */ 3903 if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie), 3904 flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) { 3905 ASSERT(flags & DDI_UMEM_NOSLEEP); 3906 return ((void *)NULL); 3907 } 3908 3909 if (flags & DDI_UMEM_PAGEABLE) { 3910 /* Only one of the flags is allowed */ 3911 ASSERT(!(flags & DDI_UMEM_TRASH)); 3912 /* initialize resource with 0 */ 3913 iflags = KPD_ZERO; 3914 3915 /* 3916 * to allocate unlocked pageable memory, use segkp_get() to 3917 * create a segkp segment. Since segkp can only service kas, 3918 * other segment drivers such as segdev have to do 3919 * as_fault(segkp, SOFTLOCK) in its fault routine, 3920 */ 3921 if (flags & DDI_UMEM_NOSLEEP) 3922 iflags |= KPD_NOWAIT; 3923 3924 if ((buf = segkp_get(segkp, len, iflags)) == NULL) { 3925 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 3926 return ((void *)NULL); 3927 } 3928 cp->type = KMEM_PAGEABLE; 3929 mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL); 3930 cp->locked = 0; 3931 } else if (flags & DDI_UMEM_TRASH) { 3932 /* Only one of the flags is allowed */ 3933 ASSERT(!(flags & DDI_UMEM_PAGEABLE)); 3934 cp->type = UMEM_TRASH; 3935 buf = NULL; 3936 } else { 3937 if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) { 3938 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 3939 return ((void *)NULL); 3940 } 3941 3942 cp->type = KMEM_NON_PAGEABLE; 3943 } 3944 3945 /* 3946 * need to save size here. size will be used when 3947 * we do kmem_free. 3948 */ 3949 cp->size = len; 3950 cp->cvaddr = (caddr_t)buf; 3951 3952 *cookie = (void *)cp; 3953 return (buf); 3954 } 3955 3956 void 3957 ddi_umem_free(ddi_umem_cookie_t cookie) 3958 { 3959 struct ddi_umem_cookie *cp; 3960 3961 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE, 3962 "devmap_umem_free:start"); 3963 3964 /* 3965 * if cookie is NULL, no effects on the system 3966 */ 3967 if (cookie == NULL) 3968 return; 3969 3970 cp = (struct ddi_umem_cookie *)cookie; 3971 3972 switch (cp->type) { 3973 case KMEM_PAGEABLE : 3974 ASSERT(cp->cvaddr != NULL && cp->size != 0); 3975 /* 3976 * Check if there are still any pending faults on the cookie 3977 * while the driver is deleting it, 3978 * XXX - could change to an ASSERT but wont catch errant drivers 3979 */ 3980 mutex_enter(&cp->lock); 3981 if (cp->locked) { 3982 mutex_exit(&cp->lock); 3983 panic("ddi_umem_free for cookie with pending faults %p", 3984 (void *)cp); 3985 return; 3986 } 3987 3988 segkp_release(segkp, cp->cvaddr); 3989 3990 /* 3991 * release mutex associated with this cookie. 3992 */ 3993 mutex_destroy(&cp->lock); 3994 break; 3995 case KMEM_NON_PAGEABLE : 3996 ASSERT(cp->cvaddr != NULL && cp->size != 0); 3997 devmap_umem_free_np(cp->cvaddr, cp->size); 3998 break; 3999 case UMEM_TRASH : 4000 break; 4001 case UMEM_LOCKED : 4002 /* Callers should use ddi_umem_unlock for this type */ 4003 ddi_umem_unlock(cookie); 4004 /* Frees the cookie too */ 4005 return; 4006 default: 4007 /* panic so we can diagnose the underlying cause */ 4008 panic("ddi_umem_free: illegal cookie type 0x%x\n", 4009 cp->type); 4010 } 4011 4012 kmem_free(cookie, sizeof (struct ddi_umem_cookie)); 4013 } 4014 4015 4016 static int 4017 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 4018 { 4019 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 4020 4021 /* 4022 * It looks as if it is always mapped shared 4023 */ 4024 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID, 4025 "segdev_getmemid:start"); 4026 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp); 4027 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base); 4028 return (0); 4029 } 4030 4031 /*ARGSUSED*/ 4032 static int 4033 segdev_capable(struct seg *seg, segcapability_t capability) 4034 { 4035 return (0); 4036 } 4037 4038 /* 4039 * ddi_umem_alloc() non-pageable quantum cache max size. 4040 * This is just a SWAG. 4041 */ 4042 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE) 4043 4044 /* 4045 * Initialize seg_dev from boot. This routine sets up the trash page 4046 * and creates the umem_np_arena used to back non-pageable memory 4047 * requests. 4048 */ 4049 void 4050 segdev_init(void) 4051 { 4052 struct seg kseg; 4053 4054 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE, 4055 devmap_alloc_pages, devmap_free_pages, heap_arena, 4056 DEVMAP_UMEM_QUANTUM, VM_SLEEP); 4057 4058 kseg.s_as = &kas; 4059 trashpp = page_create_va(&trashvp, 0, PAGESIZE, 4060 PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL); 4061 if (trashpp == NULL) 4062 panic("segdev_init: failed to create trash page"); 4063 pagezero(trashpp, 0, PAGESIZE); 4064 page_downgrade(trashpp); 4065 } 4066 4067 /* 4068 * Invoke platform-dependent support routines so that /proc can have 4069 * the platform code deal with curious hardware. 4070 */ 4071 int 4072 segdev_copyfrom(struct seg *seg, 4073 caddr_t uaddr, const void *devaddr, void *kaddr, size_t len) 4074 { 4075 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 4076 struct snode *sp = VTOS(VTOCVP(sdp->vp)); 4077 4078 return (e_ddi_copyfromdev(sp->s_dip, 4079 (off_t)(uaddr - seg->s_base), devaddr, kaddr, len)); 4080 } 4081 4082 int 4083 segdev_copyto(struct seg *seg, 4084 caddr_t uaddr, const void *kaddr, void *devaddr, size_t len) 4085 { 4086 struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 4087 struct snode *sp = VTOS(VTOCVP(sdp->vp)); 4088 4089 return (e_ddi_copytodev(sp->s_dip, 4090 (off_t)(uaddr - seg->s_base), kaddr, devaddr, len)); 4091 }