Print this page
5382 pvn_getpages handles lengths <= PAGESIZE just fine
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/specfs/specvnops.c
+++ new/usr/src/uts/common/fs/specfs/specvnops.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 25 */
25 26
26 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 28 /* All Rights Reserved */
28 29
29 30 /*
30 31 * University Copyright- Copyright (c) 1982, 1986, 1988
31 32 * The Regents of the University of California
32 33 * All Rights Reserved
33 34 *
34 35 * University Acknowledgment- Portions of this document are derived from
35 36 * software developed by the University of California, Berkeley, and its
36 37 * contributors.
37 38 */
38 39
39 40 #include <sys/types.h>
40 41 #include <sys/thread.h>
41 42 #include <sys/t_lock.h>
42 43 #include <sys/param.h>
43 44 #include <sys/systm.h>
44 45 #include <sys/bitmap.h>
45 46 #include <sys/buf.h>
46 47 #include <sys/cmn_err.h>
47 48 #include <sys/conf.h>
48 49 #include <sys/ddi.h>
49 50 #include <sys/debug.h>
50 51 #include <sys/dkio.h>
51 52 #include <sys/errno.h>
52 53 #include <sys/time.h>
53 54 #include <sys/fcntl.h>
54 55 #include <sys/flock.h>
55 56 #include <sys/file.h>
56 57 #include <sys/kmem.h>
57 58 #include <sys/mman.h>
58 59 #include <sys/open.h>
59 60 #include <sys/swap.h>
60 61 #include <sys/sysmacros.h>
61 62 #include <sys/uio.h>
62 63 #include <sys/vfs.h>
63 64 #include <sys/vfs_opreg.h>
64 65 #include <sys/vnode.h>
65 66 #include <sys/stat.h>
66 67 #include <sys/poll.h>
67 68 #include <sys/stream.h>
68 69 #include <sys/strsubr.h>
69 70 #include <sys/policy.h>
70 71 #include <sys/devpolicy.h>
71 72
72 73 #include <sys/proc.h>
73 74 #include <sys/user.h>
74 75 #include <sys/session.h>
75 76 #include <sys/vmsystm.h>
76 77 #include <sys/vtrace.h>
77 78 #include <sys/pathname.h>
78 79
79 80 #include <sys/fs/snode.h>
80 81
81 82 #include <vm/seg.h>
82 83 #include <vm/seg_map.h>
83 84 #include <vm/page.h>
84 85 #include <vm/pvn.h>
85 86 #include <vm/seg_dev.h>
86 87 #include <vm/seg_vn.h>
87 88
88 89 #include <fs/fs_subr.h>
89 90
90 91 #include <sys/esunddi.h>
91 92 #include <sys/autoconf.h>
92 93 #include <sys/sunndi.h>
93 94 #include <sys/contract/device_impl.h>
94 95
95 96
96 97 static int spec_open(struct vnode **, int, struct cred *, caller_context_t *);
97 98 static int spec_close(struct vnode *, int, int, offset_t, struct cred *,
98 99 caller_context_t *);
99 100 static int spec_read(struct vnode *, struct uio *, int, struct cred *,
100 101 caller_context_t *);
101 102 static int spec_write(struct vnode *, struct uio *, int, struct cred *,
102 103 caller_context_t *);
103 104 static int spec_ioctl(struct vnode *, int, intptr_t, int, struct cred *, int *,
104 105 caller_context_t *);
105 106 static int spec_getattr(struct vnode *, struct vattr *, int, struct cred *,
106 107 caller_context_t *);
107 108 static int spec_setattr(struct vnode *, struct vattr *, int, struct cred *,
108 109 caller_context_t *);
109 110 static int spec_access(struct vnode *, int, int, struct cred *,
110 111 caller_context_t *);
111 112 static int spec_create(struct vnode *, char *, vattr_t *, enum vcexcl, int,
112 113 struct vnode **, struct cred *, int, caller_context_t *, vsecattr_t *);
113 114 static int spec_fsync(struct vnode *, int, struct cred *, caller_context_t *);
114 115 static void spec_inactive(struct vnode *, struct cred *, caller_context_t *);
115 116 static int spec_fid(struct vnode *, struct fid *, caller_context_t *);
116 117 static int spec_seek(struct vnode *, offset_t, offset_t *, caller_context_t *);
117 118 static int spec_frlock(struct vnode *, int, struct flock64 *, int, offset_t,
118 119 struct flk_callback *, struct cred *, caller_context_t *);
119 120 static int spec_realvp(struct vnode *, struct vnode **, caller_context_t *);
120 121
121 122 static int spec_getpage(struct vnode *, offset_t, size_t, uint_t *, page_t **,
122 123 size_t, struct seg *, caddr_t, enum seg_rw, struct cred *,
123 124 caller_context_t *);
124 125 static int spec_putapage(struct vnode *, page_t *, u_offset_t *, size_t *, int,
125 126 struct cred *);
126 127 static struct buf *spec_startio(struct vnode *, page_t *, u_offset_t, size_t,
127 128 int);
128 129 static int spec_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
129 130 page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
130 131 static int spec_map(struct vnode *, offset_t, struct as *, caddr_t *, size_t,
131 132 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
132 133 static int spec_addmap(struct vnode *, offset_t, struct as *, caddr_t, size_t,
133 134 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
134 135 static int spec_delmap(struct vnode *, offset_t, struct as *, caddr_t, size_t,
135 136 uint_t, uint_t, uint_t, struct cred *, caller_context_t *);
136 137
137 138 static int spec_poll(struct vnode *, short, int, short *, struct pollhead **,
138 139 caller_context_t *);
139 140 static int spec_dump(struct vnode *, caddr_t, offset_t, offset_t,
140 141 caller_context_t *);
141 142 static int spec_pageio(struct vnode *, page_t *, u_offset_t, size_t, int,
142 143 cred_t *, caller_context_t *);
143 144
144 145 static int spec_getsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
145 146 caller_context_t *);
146 147 static int spec_setsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
147 148 caller_context_t *);
148 149 static int spec_pathconf(struct vnode *, int, ulong_t *, struct cred *,
149 150 caller_context_t *);
150 151
151 152 #define SN_HOLD(csp) { \
152 153 mutex_enter(&csp->s_lock); \
153 154 csp->s_count++; \
154 155 mutex_exit(&csp->s_lock); \
155 156 }
156 157
157 158 #define SN_RELE(csp) { \
158 159 mutex_enter(&csp->s_lock); \
159 160 csp->s_count--; \
160 161 ASSERT((csp->s_count > 0) || (csp->s_vnode->v_stream == NULL)); \
161 162 mutex_exit(&csp->s_lock); \
162 163 }
163 164
164 165 #define S_ISFENCED(sp) ((VTOS((sp)->s_commonvp))->s_flag & SFENCED)
165 166
166 167 struct vnodeops *spec_vnodeops;
167 168
168 169 /*
169 170 * *PLEASE NOTE*: If you add new entry points to specfs, do
170 171 * not forget to add support for fencing. A fenced snode
171 172 * is indicated by the SFENCED flag in the common snode.
172 173 * If a snode is fenced, determine if your entry point is
173 174 * a configuration operation (Example: open), a detection
174 175 * operation (Example: gettattr), an I/O operation (Example: ioctl())
175 176 * or an unconfiguration operation (Example: close). If it is
176 177 * a configuration or detection operation, fail the operation
177 178 * for a fenced snode with an ENXIO or EIO as appropriate. If
178 179 * it is any other operation, let it through.
179 180 */
180 181
181 182 const fs_operation_def_t spec_vnodeops_template[] = {
182 183 VOPNAME_OPEN, { .vop_open = spec_open },
183 184 VOPNAME_CLOSE, { .vop_close = spec_close },
184 185 VOPNAME_READ, { .vop_read = spec_read },
185 186 VOPNAME_WRITE, { .vop_write = spec_write },
186 187 VOPNAME_IOCTL, { .vop_ioctl = spec_ioctl },
187 188 VOPNAME_GETATTR, { .vop_getattr = spec_getattr },
188 189 VOPNAME_SETATTR, { .vop_setattr = spec_setattr },
189 190 VOPNAME_ACCESS, { .vop_access = spec_access },
190 191 VOPNAME_CREATE, { .vop_create = spec_create },
191 192 VOPNAME_FSYNC, { .vop_fsync = spec_fsync },
192 193 VOPNAME_INACTIVE, { .vop_inactive = spec_inactive },
193 194 VOPNAME_FID, { .vop_fid = spec_fid },
194 195 VOPNAME_SEEK, { .vop_seek = spec_seek },
195 196 VOPNAME_PATHCONF, { .vop_pathconf = spec_pathconf },
196 197 VOPNAME_FRLOCK, { .vop_frlock = spec_frlock },
197 198 VOPNAME_REALVP, { .vop_realvp = spec_realvp },
198 199 VOPNAME_GETPAGE, { .vop_getpage = spec_getpage },
199 200 VOPNAME_PUTPAGE, { .vop_putpage = spec_putpage },
200 201 VOPNAME_MAP, { .vop_map = spec_map },
201 202 VOPNAME_ADDMAP, { .vop_addmap = spec_addmap },
202 203 VOPNAME_DELMAP, { .vop_delmap = spec_delmap },
203 204 VOPNAME_POLL, { .vop_poll = spec_poll },
204 205 VOPNAME_DUMP, { .vop_dump = spec_dump },
205 206 VOPNAME_PAGEIO, { .vop_pageio = spec_pageio },
206 207 VOPNAME_SETSECATTR, { .vop_setsecattr = spec_setsecattr },
207 208 VOPNAME_GETSECATTR, { .vop_getsecattr = spec_getsecattr },
208 209 NULL, NULL
209 210 };
210 211
211 212 /*
212 213 * Return address of spec_vnodeops
213 214 */
214 215 struct vnodeops *
215 216 spec_getvnodeops(void)
216 217 {
217 218 return (spec_vnodeops);
218 219 }
219 220
220 221 extern vnode_t *rconsvp;
221 222
222 223 /*
223 224 * Acquire the serial lock on the common snode.
224 225 */
225 226 #define LOCK_CSP(csp) (void) spec_lockcsp(csp, 0, 1, 0)
226 227 #define LOCKHOLD_CSP_SIG(csp) spec_lockcsp(csp, 1, 1, 1)
227 228 #define SYNCHOLD_CSP_SIG(csp, intr) spec_lockcsp(csp, intr, 0, 1)
228 229
229 230 typedef enum {
230 231 LOOP,
231 232 INTR,
232 233 SUCCESS
233 234 } slock_ret_t;
234 235
235 236 /*
236 237 * Synchronize with active SLOCKED snode, optionally checking for a signal and
237 238 * optionally returning with SLOCKED set and SN_HOLD done. The 'intr'
238 239 * argument determines if the thread is interruptible by a signal while
239 240 * waiting, the function returns INTR if interrupted while there is another
240 241 * thread closing this snonde and LOOP if interrupted otherwise.
241 242 * When SUCCESS is returned the 'hold' argument determines if the open
242 243 * count (SN_HOLD) has been incremented and the 'setlock' argument
243 244 * determines if the function returns with SLOCKED set.
244 245 */
245 246 static slock_ret_t
246 247 spec_lockcsp(struct snode *csp, int intr, int setlock, int hold)
247 248 {
248 249 slock_ret_t ret = SUCCESS;
249 250 mutex_enter(&csp->s_lock);
250 251 while (csp->s_flag & SLOCKED) {
251 252 csp->s_flag |= SWANT;
252 253 if (intr) {
253 254 if (!cv_wait_sig(&csp->s_cv, &csp->s_lock)) {
254 255 if (csp->s_flag & SCLOSING)
255 256 ret = INTR;
256 257 else
257 258 ret = LOOP;
258 259 mutex_exit(&csp->s_lock);
259 260 return (ret); /* interrupted */
260 261 }
261 262 } else {
262 263 cv_wait(&csp->s_cv, &csp->s_lock);
263 264 }
264 265 }
265 266 if (setlock)
266 267 csp->s_flag |= SLOCKED;
267 268 if (hold)
268 269 csp->s_count++; /* one more open reference : SN_HOLD */
269 270 mutex_exit(&csp->s_lock);
270 271 return (ret); /* serialized/locked */
271 272 }
272 273
273 274 /*
274 275 * Unlock the serial lock on the common snode
275 276 */
276 277 #define UNLOCK_CSP_LOCK_HELD(csp) \
277 278 ASSERT(mutex_owned(&csp->s_lock)); \
278 279 if (csp->s_flag & SWANT) \
279 280 cv_broadcast(&csp->s_cv); \
280 281 csp->s_flag &= ~(SWANT|SLOCKED);
281 282
282 283 #define UNLOCK_CSP(csp) \
283 284 mutex_enter(&csp->s_lock); \
284 285 UNLOCK_CSP_LOCK_HELD(csp); \
285 286 mutex_exit(&csp->s_lock);
286 287
287 288 /*
288 289 * compute/return the size of the device
289 290 */
290 291 #define SPEC_SIZE(csp) \
291 292 (((csp)->s_flag & SSIZEVALID) ? (csp)->s_size : spec_size(csp))
292 293
293 294 /*
294 295 * Compute and return the size. If the size in the common snode is valid then
295 296 * return it. If not valid then get the size from the driver and set size in
296 297 * the common snode. If the device has not been attached then we don't ask for
297 298 * an update from the driver- for non-streams SSIZEVALID stays unset until the
298 299 * device is attached. A stat of a mknod outside /devices (non-devfs) may
299 300 * report UNKNOWN_SIZE because the device may not be attached yet (SDIPSET not
300 301 * established in mknod until open time). An stat in /devices will report the
301 302 * size correctly. Specfs should always call SPEC_SIZE instead of referring
302 303 * directly to s_size to initialize/retrieve the size of a device.
303 304 *
304 305 * XXX There is an inconsistency between block and raw - "unknown" is
305 306 * UNKNOWN_SIZE for VBLK and 0 for VCHR(raw).
306 307 */
307 308 static u_offset_t
308 309 spec_size(struct snode *csp)
309 310 {
310 311 struct vnode *cvp = STOV(csp);
311 312 u_offset_t size;
312 313 int plen;
313 314 uint32_t size32;
314 315 dev_t dev;
315 316 dev_info_t *devi;
316 317 major_t maj;
317 318 uint_t blksize;
318 319 int blkshift;
319 320
320 321 ASSERT((csp)->s_commonvp == cvp); /* must be common node */
321 322
322 323 /* return cached value */
323 324 mutex_enter(&csp->s_lock);
324 325 if (csp->s_flag & SSIZEVALID) {
325 326 mutex_exit(&csp->s_lock);
326 327 return (csp->s_size);
327 328 }
328 329
329 330 /* VOP_GETATTR of mknod has not had devcnt restriction applied */
330 331 dev = cvp->v_rdev;
331 332 maj = getmajor(dev);
332 333 if (maj >= devcnt) {
333 334 /* return non-cached UNKNOWN_SIZE */
334 335 mutex_exit(&csp->s_lock);
335 336 return ((cvp->v_type == VCHR) ? 0 : UNKNOWN_SIZE);
336 337 }
337 338
338 339 /* establish cached zero size for streams */
339 340 if (STREAMSTAB(maj)) {
340 341 csp->s_size = 0;
341 342 csp->s_flag |= SSIZEVALID;
342 343 mutex_exit(&csp->s_lock);
343 344 return (0);
344 345 }
345 346
346 347 /*
347 348 * Return non-cached UNKNOWN_SIZE if not open.
348 349 *
349 350 * NB: This check is bogus, calling prop_op(9E) should be gated by
350 351 * attach, not open. Not having this check however opens up a new
351 352 * context under which a driver's prop_op(9E) could be called. Calling
352 353 * prop_op(9E) in this new context has been shown to expose latent
353 354 * driver bugs (insufficient NULL pointer checks that lead to panic).
354 355 * We are keeping this open check for now to avoid these panics.
355 356 */
356 357 if (csp->s_count == 0) {
357 358 mutex_exit(&csp->s_lock);
358 359 return ((cvp->v_type == VCHR) ? 0 : UNKNOWN_SIZE);
359 360 }
360 361
361 362 /* Return non-cached UNKNOWN_SIZE if not attached. */
362 363 if (((csp->s_flag & SDIPSET) == 0) || (csp->s_dip == NULL) ||
363 364 !i_ddi_devi_attached(csp->s_dip)) {
364 365 mutex_exit(&csp->s_lock);
365 366 return ((cvp->v_type == VCHR) ? 0 : UNKNOWN_SIZE);
366 367 }
367 368
368 369 devi = csp->s_dip;
369 370
370 371 /*
371 372 * Established cached size obtained from the attached driver. Since we
372 373 * know the devinfo node, for efficiency we use cdev_prop_op directly
373 374 * instead of [cb]dev_[Ss]size.
374 375 */
375 376 if (cvp->v_type == VCHR) {
376 377 size = 0;
377 378 plen = sizeof (size);
378 379 if (cdev_prop_op(dev, devi, PROP_LEN_AND_VAL_BUF,
379 380 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS |
380 381 DDI_PROP_CONSUMER_TYPED, "Size", (caddr_t)&size,
381 382 &plen) != DDI_PROP_SUCCESS) {
382 383 plen = sizeof (size32);
383 384 if (cdev_prop_op(dev, devi, PROP_LEN_AND_VAL_BUF,
384 385 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
385 386 "size", (caddr_t)&size32, &plen) ==
386 387 DDI_PROP_SUCCESS)
387 388 size = size32;
388 389 }
389 390 } else {
390 391 size = UNKNOWN_SIZE;
391 392 plen = sizeof (size);
392 393 if (cdev_prop_op(dev, devi, PROP_LEN_AND_VAL_BUF,
393 394 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS |
394 395 DDI_PROP_CONSUMER_TYPED, "Nblocks", (caddr_t)&size,
395 396 &plen) != DDI_PROP_SUCCESS) {
396 397 plen = sizeof (size32);
397 398 if (cdev_prop_op(dev, devi, PROP_LEN_AND_VAL_BUF,
398 399 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
399 400 "nblocks", (caddr_t)&size32, &plen) ==
400 401 DDI_PROP_SUCCESS)
401 402 size = size32;
402 403 }
403 404
404 405 if (size != UNKNOWN_SIZE) {
405 406 blksize = DEV_BSIZE; /* default */
406 407 plen = sizeof (blksize);
407 408
408 409 /* try to get dev_t specific "blksize" */
409 410 if (cdev_prop_op(dev, devi, PROP_LEN_AND_VAL_BUF,
410 411 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
411 412 "blksize", (caddr_t)&blksize, &plen) !=
412 413 DDI_PROP_SUCCESS) {
413 414 /*
414 415 * Try for dev_info node "device-blksize".
415 416 * If this fails then blksize will still be
416 417 * DEV_BSIZE default value.
417 418 */
418 419 (void) cdev_prop_op(DDI_DEV_T_ANY, devi,
419 420 PROP_LEN_AND_VAL_BUF,
420 421 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
421 422 "device-blksize", (caddr_t)&blksize, &plen);
422 423 }
423 424
424 425 /* blksize must be a power of two */
425 426 ASSERT(BIT_ONLYONESET(blksize));
426 427 blkshift = highbit(blksize) - 1;
427 428
428 429 /* convert from block size to byte size */
429 430 if (size < (MAXOFFSET_T >> blkshift))
430 431 size = size << blkshift;
431 432 else
432 433 size = UNKNOWN_SIZE;
433 434 }
434 435 }
435 436
436 437 csp->s_size = size;
437 438 csp->s_flag |= SSIZEVALID;
438 439
439 440 mutex_exit(&csp->s_lock);
440 441 return (size);
441 442 }
442 443
443 444 /*
444 445 * This function deal with vnode substitution in the case of
445 446 * device cloning.
446 447 */
447 448 static int
448 449 spec_clone(struct vnode **vpp, dev_t newdev, int vtype, struct stdata *stp)
449 450 {
450 451 dev_t dev = (*vpp)->v_rdev;
451 452 major_t maj = getmajor(dev);
452 453 major_t newmaj = getmajor(newdev);
453 454 int sysclone = (maj == clone_major);
454 455 int qassociate_used = 0;
455 456 struct snode *oldsp, *oldcsp;
456 457 struct snode *newsp, *newcsp;
457 458 struct vnode *newvp, *newcvp;
458 459 dev_info_t *dip;
459 460 queue_t *dq;
460 461
461 462 ASSERT(dev != newdev);
462 463
463 464 /*
464 465 * Check for cloning across different drivers.
465 466 * We only support this under the system provided clone driver
466 467 */
467 468 if ((maj != newmaj) && !sysclone) {
468 469 cmn_err(CE_NOTE,
469 470 "unsupported clone open maj = %u, newmaj = %u",
470 471 maj, newmaj);
471 472 return (ENXIO);
472 473 }
473 474
474 475 /* old */
475 476 oldsp = VTOS(*vpp);
476 477 oldcsp = VTOS(oldsp->s_commonvp);
477 478
478 479 /* new */
479 480 newvp = makespecvp(newdev, vtype);
480 481 ASSERT(newvp != NULL);
481 482 newsp = VTOS(newvp);
482 483 newcvp = newsp->s_commonvp;
483 484 newcsp = VTOS(newcvp);
484 485
485 486 /*
486 487 * Clones inherit fsid, realvp, and dip.
487 488 * XXX realvp inherit is not occurring, does fstat of clone work?
488 489 */
489 490 newsp->s_fsid = oldsp->s_fsid;
490 491 if (sysclone) {
491 492 newsp->s_flag |= SCLONE;
492 493 dip = NULL;
493 494 } else {
494 495 newsp->s_flag |= SSELFCLONE;
495 496 dip = oldcsp->s_dip;
496 497 }
497 498
498 499 /*
499 500 * If we cloned to an opened newdev that already has called
500 501 * spec_assoc_vp_with_devi (SDIPSET set) then the association is
501 502 * already established.
502 503 */
503 504 if (!(newcsp->s_flag & SDIPSET)) {
504 505 /*
505 506 * Establish s_dip association for newdev.
506 507 *
507 508 * If we trusted the getinfo(9E) DDI_INFO_DEVT2INSTANCE
508 509 * implementation of all cloning drivers (SCLONE and SELFCLONE)
509 510 * we would always use e_ddi_hold_devi_by_dev(). We know that
510 511 * many drivers have had (still have?) problems with
511 512 * DDI_INFO_DEVT2INSTANCE, so we try to minimize reliance by
512 513 * detecting drivers that use QASSOCIATE (by looking down the
513 514 * stream) and setting their s_dip association to NULL.
514 515 */
515 516 qassociate_used = 0;
516 517 if (stp) {
517 518 for (dq = stp->sd_wrq; dq; dq = dq->q_next) {
518 519 if (_RD(dq)->q_flag & _QASSOCIATED) {
519 520 qassociate_used = 1;
520 521 dip = NULL;
521 522 break;
522 523 }
523 524 }
524 525 }
525 526
526 527 if (dip || qassociate_used) {
527 528 spec_assoc_vp_with_devi(newvp, dip);
528 529 } else {
529 530 /* derive association from newdev */
530 531 dip = e_ddi_hold_devi_by_dev(newdev, 0);
531 532 spec_assoc_vp_with_devi(newvp, dip);
532 533 if (dip)
533 534 ddi_release_devi(dip);
534 535 }
535 536 }
536 537
537 538 SN_HOLD(newcsp);
538 539
539 540 /* deal with stream stuff */
540 541 if (stp != NULL) {
541 542 LOCK_CSP(newcsp); /* synchronize stream open/close */
542 543 mutex_enter(&newcsp->s_lock);
543 544 newcvp->v_stream = newvp->v_stream = stp;
544 545 stp->sd_vnode = newcvp;
545 546 stp->sd_strtab = STREAMSTAB(newmaj);
546 547 mutex_exit(&newcsp->s_lock);
547 548 UNLOCK_CSP(newcsp);
548 549 }
549 550
550 551 /* substitute the vnode */
551 552 SN_RELE(oldcsp);
552 553 VN_RELE(*vpp);
553 554 *vpp = newvp;
554 555
555 556 return (0);
556 557 }
557 558
558 559 static int
559 560 spec_open(struct vnode **vpp, int flag, struct cred *cr, caller_context_t *cc)
560 561 {
561 562 major_t maj;
562 563 dev_t dev, newdev;
563 564 struct vnode *vp, *cvp;
564 565 struct snode *sp, *csp;
565 566 struct stdata *stp;
566 567 dev_info_t *dip;
567 568 int error, type;
568 569 contract_t *ct = NULL;
569 570 int open_returns_eintr;
570 571 slock_ret_t spec_locksp_ret;
571 572
572 573
573 574 flag &= ~FCREAT; /* paranoia */
574 575
575 576 vp = *vpp;
576 577 sp = VTOS(vp);
577 578 ASSERT((vp->v_type == VCHR) || (vp->v_type == VBLK));
578 579 if ((vp->v_type != VCHR) && (vp->v_type != VBLK))
579 580 return (ENXIO);
580 581
581 582 /*
582 583 * If the VFS_NODEVICES bit was set for the mount,
583 584 * do not allow opens of special devices.
584 585 */
585 586 if (sp->s_realvp && (sp->s_realvp->v_vfsp->vfs_flag & VFS_NODEVICES))
586 587 return (ENXIO);
587 588
588 589 newdev = dev = vp->v_rdev;
589 590
590 591 /*
591 592 * If we are opening a node that has not had spec_assoc_vp_with_devi
592 593 * called against it (mknod outside /devices or a non-dacf makespecvp
593 594 * node) then SDIPSET will not be set. In this case we call an
594 595 * interface which will reconstruct the path and lookup (drive attach)
595 596 * through devfs (e_ddi_hold_devi_by_dev -> e_ddi_hold_devi_by_path ->
596 597 * devfs_lookupname). For support of broken drivers that don't call
597 598 * ddi_create_minor_node for all minor nodes in their instance space,
598 599 * we call interfaces that operates at the directory/devinfo
599 600 * (major/instance) level instead of to the leaf/minor node level.
600 601 * After finding and attaching the dip we associate it with the
601 602 * common specfs vnode (s_dip), which sets SDIPSET. A DL_DETACH_REQ
602 603 * to style-2 stream driver may set s_dip to NULL with SDIPSET set.
603 604 *
604 605 * NOTE: Although e_ddi_hold_devi_by_dev takes a dev_t argument, its
605 606 * implementation operates at the major/instance level since it only
606 607 * need to return a dip.
607 608 */
608 609 cvp = sp->s_commonvp;
609 610 csp = VTOS(cvp);
610 611 if (!(csp->s_flag & SDIPSET)) {
611 612 /* try to attach, return error if we fail */
612 613 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
613 614 return (ENXIO);
614 615
615 616 /* associate dip with the common snode s_dip */
616 617 spec_assoc_vp_with_devi(vp, dip);
617 618 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
618 619 }
619 620
620 621 /* check if device fenced off */
621 622 if (S_ISFENCED(sp))
622 623 return (ENXIO);
623 624
624 625 #ifdef DEBUG
625 626 /* verify attach/open exclusion guarantee */
626 627 dip = csp->s_dip;
627 628 ASSERT((dip == NULL) || i_ddi_devi_attached(dip));
628 629 #endif /* DEBUG */
629 630
630 631 if ((error = secpolicy_spec_open(cr, vp, flag)) != 0)
631 632 return (error);
632 633
633 634 /* Verify existance of open(9E) implementation. */
634 635 maj = getmajor(dev);
635 636 if ((maj >= devcnt) ||
636 637 (devopsp[maj]->devo_cb_ops == NULL) ||
637 638 (devopsp[maj]->devo_cb_ops->cb_open == NULL))
638 639 return (ENXIO);
639 640
640 641 /*
641 642 * split STREAMS vs. non-STREAMS
642 643 *
643 644 * If the device is a dual-personality device, then we might want
644 645 * to allow for a regular OTYP_BLK open. If however it's strictly
645 646 * a pure STREAMS device, the cb_open entry point will be
646 647 * nodev() which returns ENXIO. This does make this failure path
647 648 * somewhat longer, but such attempts to use OTYP_BLK with STREAMS
648 649 * devices should be exceedingly rare. (Most of the time they will
649 650 * be due to programmer error.)
650 651 */
651 652 if ((vp->v_type == VCHR) && (STREAMSTAB(maj)))
652 653 goto streams_open;
653 654
654 655 not_streams:
655 656 /*
656 657 * Wait for in progress last close to complete. This guarantees
657 658 * to the driver writer that we will never be in the drivers
658 659 * open and close on the same (dev_t, otype) at the same time.
659 660 * Open count already incremented (SN_HOLD) on non-zero return.
660 661 * The wait is interruptible by a signal if the driver sets the
661 662 * D_OPEN_RETURNS_EINTR cb_ops(9S) cb_flag or sets the
662 663 * ddi-open-returns-eintr(9P) property in its driver.conf.
663 664 */
664 665 if ((devopsp[maj]->devo_cb_ops->cb_flag & D_OPEN_RETURNS_EINTR) ||
665 666 (devnamesp[maj].dn_flags & DN_OPEN_RETURNS_EINTR))
666 667 open_returns_eintr = 1;
667 668 else
668 669 open_returns_eintr = 0;
669 670 while ((spec_locksp_ret = SYNCHOLD_CSP_SIG(csp, open_returns_eintr)) !=
670 671 SUCCESS) {
671 672 if (spec_locksp_ret == INTR)
672 673 return (EINTR);
673 674 }
674 675
675 676 /* non streams open */
676 677 type = (vp->v_type == VBLK ? OTYP_BLK : OTYP_CHR);
677 678 error = dev_open(&newdev, flag, type, cr);
678 679
679 680 /* deal with clone case */
680 681 if (error == 0 && dev != newdev) {
681 682 error = spec_clone(vpp, newdev, vp->v_type, NULL);
682 683 /*
683 684 * bail on clone failure, further processing
684 685 * results in undefined behaviors.
685 686 */
686 687 if (error != 0)
687 688 return (error);
688 689 sp = VTOS(*vpp);
689 690 csp = VTOS(sp->s_commonvp);
690 691 }
691 692
692 693 /*
693 694 * create contracts only for userland opens
694 695 * Successful open and cloning is done at this point.
695 696 */
696 697 if (error == 0 && !(flag & FKLYR)) {
697 698 int spec_type;
698 699 spec_type = (STOV(csp)->v_type == VCHR) ? S_IFCHR : S_IFBLK;
699 700 if (contract_device_open(newdev, spec_type, NULL) != 0) {
700 701 error = EIO;
701 702 }
702 703 }
703 704
704 705 if (error == 0) {
705 706 sp->s_size = SPEC_SIZE(csp);
706 707
707 708 if ((csp->s_flag & SNEEDCLOSE) == 0) {
708 709 int nmaj = getmajor(newdev);
709 710 mutex_enter(&csp->s_lock);
710 711 /* successful open needs a close later */
711 712 csp->s_flag |= SNEEDCLOSE;
712 713
713 714 /*
714 715 * Invalidate possible cached "unknown" size
715 716 * established by a VOP_GETATTR while open was in
716 717 * progress, and the driver might fail prop_op(9E).
717 718 */
718 719 if (((cvp->v_type == VCHR) && (csp->s_size == 0)) ||
719 720 ((cvp->v_type == VBLK) &&
720 721 (csp->s_size == UNKNOWN_SIZE)))
721 722 csp->s_flag &= ~SSIZEVALID;
722 723
723 724 if (devopsp[nmaj]->devo_cb_ops->cb_flag & D_64BIT)
724 725 csp->s_flag |= SLOFFSET;
725 726 if (devopsp[nmaj]->devo_cb_ops->cb_flag & D_U64BIT)
726 727 csp->s_flag |= SLOFFSET | SANYOFFSET;
727 728 mutex_exit(&csp->s_lock);
728 729 }
729 730 return (0);
730 731 }
731 732
732 733 /*
733 734 * Open failed. If we missed a close operation because
734 735 * we were trying to get the device open and it is the
735 736 * last in progress open that is failing then call close.
736 737 *
737 738 * NOTE: Only non-streams open has this race condition.
738 739 */
739 740 mutex_enter(&csp->s_lock);
740 741 csp->s_count--; /* decrement open count : SN_RELE */
741 742 if ((csp->s_count == 0) && /* no outstanding open */
742 743 (csp->s_mapcnt == 0) && /* no mapping */
743 744 (csp->s_flag & SNEEDCLOSE)) { /* need a close */
744 745 csp->s_flag &= ~(SNEEDCLOSE | SSIZEVALID);
745 746
746 747 /* See comment in spec_close() */
747 748 if (csp->s_flag & (SCLONE | SSELFCLONE))
748 749 csp->s_flag &= ~SDIPSET;
749 750
750 751 csp->s_flag |= SCLOSING;
751 752 mutex_exit(&csp->s_lock);
752 753
753 754 ASSERT(*vpp != NULL);
754 755 (void) device_close(*vpp, flag, cr);
755 756
756 757 mutex_enter(&csp->s_lock);
757 758 csp->s_flag &= ~SCLOSING;
758 759 mutex_exit(&csp->s_lock);
759 760 } else {
760 761 mutex_exit(&csp->s_lock);
761 762 }
762 763 return (error);
763 764
764 765 streams_open:
765 766 /*
766 767 * Lock common snode to prevent any new clone opens on this
767 768 * stream while one is in progress. This is necessary since
768 769 * the stream currently associated with the clone device will
769 770 * not be part of it after the clone open completes. Unfortunately
770 771 * we don't know in advance if this is a clone
771 772 * device so we have to lock all opens.
772 773 *
773 774 * If we fail, it's because of an interrupt - EINTR return is an
774 775 * expected aspect of opening a stream so we don't need to check
775 776 * D_OPEN_RETURNS_EINTR. Open count already incremented (SN_HOLD)
776 777 * on non-zero return.
777 778 */
778 779 if (LOCKHOLD_CSP_SIG(csp) != SUCCESS)
779 780 return (EINTR);
780 781
781 782 error = stropen(cvp, &newdev, flag, cr);
782 783 stp = cvp->v_stream;
783 784
784 785 /* deal with the clone case */
785 786 if ((error == 0) && (dev != newdev)) {
786 787 vp->v_stream = cvp->v_stream = NULL;
787 788 UNLOCK_CSP(csp);
788 789 error = spec_clone(vpp, newdev, vp->v_type, stp);
789 790 /*
790 791 * bail on clone failure, further processing
791 792 * results in undefined behaviors.
792 793 */
793 794 if (error != 0)
794 795 return (error);
795 796 sp = VTOS(*vpp);
796 797 csp = VTOS(sp->s_commonvp);
797 798 } else if (error == 0) {
798 799 vp->v_stream = stp;
799 800 UNLOCK_CSP(csp);
800 801 }
801 802
802 803 /*
803 804 * create contracts only for userland opens
804 805 * Successful open and cloning is done at this point.
805 806 */
806 807 if (error == 0 && !(flag & FKLYR)) {
807 808 /* STREAM is of type S_IFCHR */
808 809 if (contract_device_open(newdev, S_IFCHR, &ct) != 0) {
809 810 UNLOCK_CSP(csp);
810 811 (void) spec_close(vp, flag, 1, 0, cr, cc);
811 812 return (EIO);
812 813 }
813 814 }
814 815
815 816 if (error == 0) {
816 817 /* STREAMS devices don't have a size */
817 818 sp->s_size = csp->s_size = 0;
818 819
819 820 if (!(stp->sd_flag & STRISTTY) || (flag & FNOCTTY))
820 821 return (0);
821 822
822 823 /* try to allocate it as a controlling terminal */
823 824 if (strctty(stp) != EINTR)
824 825 return (0);
825 826
826 827 /* strctty() was interrupted by a signal */
827 828 if (ct) {
828 829 /* we only create contracts for userland opens */
829 830 ASSERT(ttoproc(curthread));
830 831 (void) contract_abandon(ct, ttoproc(curthread), 0);
831 832 }
832 833 (void) spec_close(vp, flag, 1, 0, cr, cc);
833 834 return (EINTR);
834 835 }
835 836
836 837 /*
837 838 * Deal with stropen failure.
838 839 *
839 840 * sd_flag in the stream head cannot change since the
840 841 * common snode is locked before the call to stropen().
841 842 */
842 843 if ((stp != NULL) && (stp->sd_flag & STREOPENFAIL)) {
843 844 /*
844 845 * Open failed part way through.
845 846 */
846 847 mutex_enter(&stp->sd_lock);
847 848 stp->sd_flag &= ~STREOPENFAIL;
848 849 mutex_exit(&stp->sd_lock);
849 850
850 851 UNLOCK_CSP(csp);
851 852 (void) spec_close(vp, flag, 1, 0, cr, cc);
852 853 } else {
853 854 UNLOCK_CSP(csp);
854 855 SN_RELE(csp);
855 856 }
856 857
857 858 /*
858 859 * Resolution for STREAMS vs. regular character device: If the
859 860 * STREAMS open(9e) returns ENOSTR, then try an ordinary device
860 861 * open instead.
861 862 */
862 863 if (error == ENOSTR) {
863 864 goto not_streams;
864 865 }
865 866 return (error);
866 867 }
867 868
868 869 /*ARGSUSED2*/
869 870 static int
870 871 spec_close(
871 872 struct vnode *vp,
872 873 int flag,
873 874 int count,
874 875 offset_t offset,
875 876 struct cred *cr,
876 877 caller_context_t *ct)
877 878 {
878 879 struct vnode *cvp;
879 880 struct snode *sp, *csp;
880 881 enum vtype type;
881 882 dev_t dev;
882 883 int error = 0;
883 884 int sysclone;
884 885
885 886 if (!(flag & FKLYR)) {
886 887 /* this only applies to closes of devices from userland */
887 888 cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
888 889 cleanshares(vp, ttoproc(curthread)->p_pid);
889 890 if (vp->v_stream)
890 891 strclean(vp);
891 892 }
892 893 if (count > 1)
893 894 return (0);
894 895
895 896 /* we allow close to succeed even if device is fenced off */
896 897 sp = VTOS(vp);
897 898 cvp = sp->s_commonvp;
898 899
899 900 dev = sp->s_dev;
900 901 type = vp->v_type;
901 902
902 903 ASSERT(type == VCHR || type == VBLK);
903 904
904 905 /*
905 906 * Prevent close/close and close/open races by serializing closes
906 907 * on this common snode. Clone opens are held up until after
907 908 * we have closed this device so the streams linkage is maintained
908 909 */
909 910 csp = VTOS(cvp);
910 911
911 912 LOCK_CSP(csp);
912 913 mutex_enter(&csp->s_lock);
913 914
914 915 csp->s_count--; /* one fewer open reference : SN_RELE */
915 916 sysclone = sp->s_flag & SCLONE;
916 917
917 918 /*
918 919 * Invalidate size on each close.
919 920 *
920 921 * XXX We do this on each close because we don't have interfaces that
921 922 * allow a driver to invalidate the size. Since clearing this on each
922 923 * close this causes property overhead we skip /dev/null and
923 924 * /dev/zero to avoid degrading kenbus performance.
924 925 */
925 926 if (getmajor(dev) != mm_major)
926 927 csp->s_flag &= ~SSIZEVALID;
927 928
928 929 /*
929 930 * Only call the close routine when the last open reference through
930 931 * any [s, v]node goes away. This can be checked by looking at
931 932 * s_count on the common vnode.
932 933 */
933 934 if ((csp->s_count == 0) && (csp->s_mapcnt == 0)) {
934 935 /* we don't need a close */
935 936 csp->s_flag &= ~(SNEEDCLOSE | SSIZEVALID);
936 937
937 938 /*
938 939 * A cloning driver may open-clone to the same dev_t that we
939 940 * are closing before spec_inactive destroys the common snode.
940 941 * If this occurs the s_dip association needs to be reevaluated.
941 942 * We clear SDIPSET to force reevaluation in this case. When
942 943 * reevaluation occurs (by spec_clone after open), if the
943 944 * devinfo association has changed then the old association
944 945 * will be released as the new association is established by
945 946 * spec_assoc_vp_with_devi().
946 947 */
947 948 if (csp->s_flag & (SCLONE | SSELFCLONE))
948 949 csp->s_flag &= ~SDIPSET;
949 950
950 951 csp->s_flag |= SCLOSING;
951 952 mutex_exit(&csp->s_lock);
952 953 error = device_close(vp, flag, cr);
953 954
954 955 /*
955 956 * Decrement the devops held in clnopen()
956 957 */
957 958 if (sysclone) {
958 959 ddi_rele_driver(getmajor(dev));
959 960 }
960 961 mutex_enter(&csp->s_lock);
961 962 csp->s_flag &= ~SCLOSING;
962 963 }
963 964
964 965 UNLOCK_CSP_LOCK_HELD(csp);
965 966 mutex_exit(&csp->s_lock);
966 967
967 968 return (error);
968 969 }
969 970
970 971 /*ARGSUSED2*/
971 972 static int
972 973 spec_read(
973 974 struct vnode *vp,
974 975 struct uio *uiop,
975 976 int ioflag,
976 977 struct cred *cr,
977 978 caller_context_t *ct)
978 979 {
979 980 int error;
980 981 struct snode *sp = VTOS(vp);
981 982 dev_t dev = sp->s_dev;
982 983 size_t n;
983 984 ulong_t on;
984 985 u_offset_t bdevsize;
985 986 offset_t maxoff;
986 987 offset_t off;
987 988 struct vnode *blkvp;
988 989
989 990 ASSERT(vp->v_type == VCHR || vp->v_type == VBLK);
990 991
991 992 if (vp->v_stream) {
992 993 ASSERT(vp->v_type == VCHR);
993 994 smark(sp, SACC);
994 995 return (strread(vp, uiop, cr));
995 996 }
996 997
997 998 if (uiop->uio_resid == 0)
998 999 return (0);
999 1000
1000 1001 /*
1001 1002 * Plain old character devices that set D_U64BIT can have
1002 1003 * unrestricted offsets.
1003 1004 */
1004 1005 maxoff = spec_maxoffset(vp);
1005 1006 ASSERT(maxoff != -1 || vp->v_type == VCHR);
1006 1007
1007 1008 if (maxoff != -1 && (uiop->uio_loffset < 0 ||
1008 1009 uiop->uio_loffset + uiop->uio_resid > maxoff))
1009 1010 return (EINVAL);
1010 1011
1011 1012 if (vp->v_type == VCHR) {
1012 1013 smark(sp, SACC);
1013 1014 ASSERT(vp->v_stream == NULL);
1014 1015 return (cdev_read(dev, uiop, cr));
1015 1016 }
1016 1017
1017 1018 /*
1018 1019 * Block device.
1019 1020 */
1020 1021 error = 0;
1021 1022 blkvp = sp->s_commonvp;
1022 1023 bdevsize = SPEC_SIZE(VTOS(blkvp));
1023 1024
1024 1025 do {
1025 1026 caddr_t base;
1026 1027 offset_t diff;
1027 1028
1028 1029 off = uiop->uio_loffset & (offset_t)MAXBMASK;
1029 1030 on = (size_t)(uiop->uio_loffset & MAXBOFFSET);
1030 1031 n = (size_t)MIN(MAXBSIZE - on, uiop->uio_resid);
1031 1032 diff = bdevsize - uiop->uio_loffset;
1032 1033
1033 1034 if (diff <= 0)
1034 1035 break;
1035 1036 if (diff < n)
1036 1037 n = (size_t)diff;
1037 1038
1038 1039 if (vpm_enable) {
1039 1040 error = vpm_data_copy(blkvp, (u_offset_t)(off + on),
1040 1041 n, uiop, 1, NULL, 0, S_READ);
1041 1042 } else {
1042 1043 base = segmap_getmapflt(segkmap, blkvp,
1043 1044 (u_offset_t)(off + on), n, 1, S_READ);
1044 1045
1045 1046 error = uiomove(base + on, n, UIO_READ, uiop);
1046 1047 }
1047 1048 if (!error) {
1048 1049 int flags = 0;
1049 1050 /*
1050 1051 * If we read a whole block, we won't need this
1051 1052 * buffer again soon.
1052 1053 */
1053 1054 if (n + on == MAXBSIZE)
1054 1055 flags = SM_DONTNEED | SM_FREE;
1055 1056 if (vpm_enable) {
1056 1057 error = vpm_sync_pages(blkvp, off, n, flags);
1057 1058 } else {
1058 1059 error = segmap_release(segkmap, base, flags);
1059 1060 }
1060 1061 } else {
1061 1062 if (vpm_enable) {
1062 1063 (void) vpm_sync_pages(blkvp, off, n, 0);
1063 1064 } else {
1064 1065 (void) segmap_release(segkmap, base, 0);
1065 1066 }
1066 1067 if (bdevsize == UNKNOWN_SIZE) {
1067 1068 error = 0;
1068 1069 break;
1069 1070 }
1070 1071 }
1071 1072 } while (error == 0 && uiop->uio_resid > 0 && n != 0);
1072 1073
1073 1074 return (error);
1074 1075 }
1075 1076
1076 1077 /*ARGSUSED*/
1077 1078 static int
1078 1079 spec_write(
1079 1080 struct vnode *vp,
1080 1081 struct uio *uiop,
1081 1082 int ioflag,
1082 1083 struct cred *cr,
1083 1084 caller_context_t *ct)
1084 1085 {
1085 1086 int error;
1086 1087 struct snode *sp = VTOS(vp);
1087 1088 dev_t dev = sp->s_dev;
1088 1089 size_t n;
1089 1090 ulong_t on;
1090 1091 u_offset_t bdevsize;
1091 1092 offset_t maxoff;
1092 1093 offset_t off;
1093 1094 struct vnode *blkvp;
1094 1095
1095 1096 ASSERT(vp->v_type == VCHR || vp->v_type == VBLK);
1096 1097
1097 1098 if (vp->v_stream) {
1098 1099 ASSERT(vp->v_type == VCHR);
1099 1100 smark(sp, SUPD);
1100 1101 return (strwrite(vp, uiop, cr));
1101 1102 }
1102 1103
1103 1104 /*
1104 1105 * Plain old character devices that set D_U64BIT can have
1105 1106 * unrestricted offsets.
1106 1107 */
1107 1108 maxoff = spec_maxoffset(vp);
1108 1109 ASSERT(maxoff != -1 || vp->v_type == VCHR);
1109 1110
1110 1111 if (maxoff != -1 && (uiop->uio_loffset < 0 ||
1111 1112 uiop->uio_loffset + uiop->uio_resid > maxoff))
1112 1113 return (EINVAL);
1113 1114
1114 1115 if (vp->v_type == VCHR) {
1115 1116 smark(sp, SUPD);
1116 1117 ASSERT(vp->v_stream == NULL);
1117 1118 return (cdev_write(dev, uiop, cr));
1118 1119 }
1119 1120
1120 1121 if (uiop->uio_resid == 0)
1121 1122 return (0);
1122 1123
1123 1124 error = 0;
1124 1125 blkvp = sp->s_commonvp;
1125 1126 bdevsize = SPEC_SIZE(VTOS(blkvp));
1126 1127
1127 1128 do {
1128 1129 int pagecreate;
1129 1130 int newpage;
1130 1131 caddr_t base;
1131 1132 offset_t diff;
1132 1133
1133 1134 off = uiop->uio_loffset & (offset_t)MAXBMASK;
1134 1135 on = (ulong_t)(uiop->uio_loffset & MAXBOFFSET);
1135 1136 n = (size_t)MIN(MAXBSIZE - on, uiop->uio_resid);
1136 1137 pagecreate = 0;
1137 1138
1138 1139 diff = bdevsize - uiop->uio_loffset;
1139 1140 if (diff <= 0) {
1140 1141 error = ENXIO;
1141 1142 break;
1142 1143 }
1143 1144 if (diff < n)
1144 1145 n = (size_t)diff;
1145 1146
1146 1147 /*
1147 1148 * Check to see if we can skip reading in the page
1148 1149 * and just allocate the memory. We can do this
1149 1150 * if we are going to rewrite the entire mapping
1150 1151 * or if we are going to write to end of the device
1151 1152 * from the beginning of the mapping.
1152 1153 */
1153 1154 if (n == MAXBSIZE || (on == 0 && (off + n) == bdevsize))
1154 1155 pagecreate = 1;
1155 1156
1156 1157 newpage = 0;
1157 1158
1158 1159 /*
1159 1160 * Touch the page and fault it in if it is not in core
1160 1161 * before segmap_getmapflt or vpm_data_copy can lock it.
1161 1162 * This is to avoid the deadlock if the buffer is mapped
1162 1163 * to the same file through mmap which we want to write.
1163 1164 */
1164 1165 uio_prefaultpages((long)n, uiop);
1165 1166
1166 1167 if (vpm_enable) {
1167 1168 error = vpm_data_copy(blkvp, (u_offset_t)(off + on),
1168 1169 n, uiop, !pagecreate, NULL, 0, S_WRITE);
1169 1170 } else {
1170 1171 base = segmap_getmapflt(segkmap, blkvp,
1171 1172 (u_offset_t)(off + on), n, !pagecreate, S_WRITE);
1172 1173
1173 1174 /*
1174 1175 * segmap_pagecreate() returns 1 if it calls
1175 1176 * page_create_va() to allocate any pages.
1176 1177 */
1177 1178
1178 1179 if (pagecreate)
1179 1180 newpage = segmap_pagecreate(segkmap, base + on,
1180 1181 n, 0);
1181 1182
1182 1183 error = uiomove(base + on, n, UIO_WRITE, uiop);
1183 1184 }
1184 1185
1185 1186 if (!vpm_enable && pagecreate &&
1186 1187 uiop->uio_loffset <
1187 1188 P2ROUNDUP_TYPED(off + on + n, PAGESIZE, offset_t)) {
1188 1189 /*
1189 1190 * We created pages w/o initializing them completely,
1190 1191 * thus we need to zero the part that wasn't set up.
1191 1192 * This can happen if we write to the end of the device
1192 1193 * or if we had some sort of error during the uiomove.
1193 1194 */
1194 1195 long nzero;
1195 1196 offset_t nmoved;
1196 1197
1197 1198 nmoved = (uiop->uio_loffset - (off + on));
1198 1199 if (nmoved < 0 || nmoved > n) {
1199 1200 panic("spec_write: nmoved bogus");
1200 1201 /*NOTREACHED*/
1201 1202 }
1202 1203 nzero = (long)P2ROUNDUP(on + n, PAGESIZE) -
1203 1204 (on + nmoved);
1204 1205 if (nzero < 0 || (on + nmoved + nzero > MAXBSIZE)) {
1205 1206 panic("spec_write: nzero bogus");
1206 1207 /*NOTREACHED*/
1207 1208 }
1208 1209 (void) kzero(base + on + nmoved, (size_t)nzero);
1209 1210 }
1210 1211
1211 1212 /*
1212 1213 * Unlock the pages which have been allocated by
1213 1214 * page_create_va() in segmap_pagecreate().
1214 1215 */
1215 1216 if (!vpm_enable && newpage)
1216 1217 segmap_pageunlock(segkmap, base + on,
1217 1218 (size_t)n, S_WRITE);
1218 1219
1219 1220 if (error == 0) {
1220 1221 int flags = 0;
1221 1222
1222 1223 /*
1223 1224 * Force write back for synchronous write cases.
1224 1225 */
1225 1226 if (ioflag & (FSYNC|FDSYNC))
1226 1227 flags = SM_WRITE;
1227 1228 else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
1228 1229 /*
1229 1230 * Have written a whole block.
1230 1231 * Start an asynchronous write and
1231 1232 * mark the buffer to indicate that
1232 1233 * it won't be needed again soon.
1233 1234 * Push swap files here, since it
1234 1235 * won't happen anywhere else.
1235 1236 */
1236 1237 flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
1237 1238 }
1238 1239 smark(sp, SUPD|SCHG);
1239 1240 if (vpm_enable) {
1240 1241 error = vpm_sync_pages(blkvp, off, n, flags);
1241 1242 } else {
1242 1243 error = segmap_release(segkmap, base, flags);
1243 1244 }
1244 1245 } else {
1245 1246 if (vpm_enable) {
1246 1247 (void) vpm_sync_pages(blkvp, off, n, SM_INVAL);
1247 1248 } else {
1248 1249 (void) segmap_release(segkmap, base, SM_INVAL);
1249 1250 }
1250 1251 }
1251 1252
1252 1253 } while (error == 0 && uiop->uio_resid > 0 && n != 0);
1253 1254
1254 1255 return (error);
1255 1256 }
1256 1257
1257 1258 /*ARGSUSED6*/
1258 1259 static int
1259 1260 spec_ioctl(struct vnode *vp, int cmd, intptr_t arg, int mode, struct cred *cr,
1260 1261 int *rvalp, caller_context_t *ct)
1261 1262 {
1262 1263 struct snode *sp;
1263 1264 dev_t dev;
1264 1265 int error;
1265 1266
1266 1267 if (vp->v_type != VCHR)
1267 1268 return (ENOTTY);
1268 1269
1269 1270 /*
1270 1271 * allow ioctls() to go through even for fenced snodes, as they
1271 1272 * may include unconfiguration operation - for example popping of
1272 1273 * streams modules.
1273 1274 */
1274 1275
1275 1276 sp = VTOS(vp);
1276 1277 dev = sp->s_dev;
1277 1278 if (vp->v_stream) {
1278 1279 error = strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp);
1279 1280 } else {
1280 1281 error = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1281 1282 }
1282 1283 return (error);
1283 1284 }
1284 1285
1285 1286 static int
1286 1287 spec_getattr(
1287 1288 struct vnode *vp,
1288 1289 struct vattr *vap,
1289 1290 int flags,
1290 1291 struct cred *cr,
1291 1292 caller_context_t *ct)
1292 1293 {
1293 1294 int error;
1294 1295 struct snode *sp;
1295 1296 struct vnode *realvp;
1296 1297
1297 1298 /* With ATTR_COMM we will not get attributes from realvp */
1298 1299 if (flags & ATTR_COMM) {
1299 1300 sp = VTOS(vp);
1300 1301 vp = sp->s_commonvp;
1301 1302 }
1302 1303 sp = VTOS(vp);
1303 1304
1304 1305 /* we want stat() to fail with ENXIO if the device is fenced off */
1305 1306 if (S_ISFENCED(sp))
1306 1307 return (ENXIO);
1307 1308
1308 1309 realvp = sp->s_realvp;
1309 1310
1310 1311 if (realvp == NULL) {
1311 1312 static int snode_shift = 0;
1312 1313
1313 1314 /*
1314 1315 * Calculate the amount of bitshift to a snode pointer which
1315 1316 * will still keep it unique. See below.
1316 1317 */
1317 1318 if (snode_shift == 0)
1318 1319 snode_shift = highbit(sizeof (struct snode));
1319 1320 ASSERT(snode_shift > 0);
1320 1321
1321 1322 /*
1322 1323 * No real vnode behind this one. Fill in the fields
1323 1324 * from the snode.
1324 1325 *
1325 1326 * This code should be refined to return only the
1326 1327 * attributes asked for instead of all of them.
1327 1328 */
1328 1329 vap->va_type = vp->v_type;
1329 1330 vap->va_mode = 0;
1330 1331 vap->va_uid = vap->va_gid = 0;
1331 1332 vap->va_fsid = sp->s_fsid;
1332 1333
1333 1334 /*
1334 1335 * If the va_nodeid is > MAX_USHORT, then i386 stats might
1335 1336 * fail. So we shift down the snode pointer to try and get
1336 1337 * the most uniqueness into 16-bits.
1337 1338 */
1338 1339 vap->va_nodeid = ((ino64_t)(uintptr_t)sp >> snode_shift) &
1339 1340 0xFFFF;
1340 1341 vap->va_nlink = 0;
1341 1342 vap->va_rdev = sp->s_dev;
1342 1343
1343 1344 /*
1344 1345 * va_nblocks is the number of 512 byte blocks used to store
1345 1346 * the mknod for the device, not the number of blocks on the
1346 1347 * device itself. This is typically zero since the mknod is
1347 1348 * represented directly in the inode itself.
1348 1349 */
1349 1350 vap->va_nblocks = 0;
1350 1351 } else {
1351 1352 error = VOP_GETATTR(realvp, vap, flags, cr, ct);
1352 1353 if (error != 0)
1353 1354 return (error);
1354 1355 }
1355 1356
1356 1357 /* set the size from the snode */
1357 1358 vap->va_size = SPEC_SIZE(VTOS(sp->s_commonvp));
1358 1359 vap->va_blksize = MAXBSIZE;
1359 1360
1360 1361 mutex_enter(&sp->s_lock);
1361 1362 vap->va_atime.tv_sec = sp->s_atime;
1362 1363 vap->va_mtime.tv_sec = sp->s_mtime;
1363 1364 vap->va_ctime.tv_sec = sp->s_ctime;
1364 1365 mutex_exit(&sp->s_lock);
1365 1366
1366 1367 vap->va_atime.tv_nsec = 0;
1367 1368 vap->va_mtime.tv_nsec = 0;
1368 1369 vap->va_ctime.tv_nsec = 0;
1369 1370 vap->va_seq = 0;
1370 1371
1371 1372 return (0);
1372 1373 }
1373 1374
1374 1375 static int
1375 1376 spec_setattr(
1376 1377 struct vnode *vp,
1377 1378 struct vattr *vap,
1378 1379 int flags,
1379 1380 struct cred *cr,
1380 1381 caller_context_t *ct)
1381 1382 {
1382 1383 struct snode *sp = VTOS(vp);
1383 1384 struct vnode *realvp;
1384 1385 int error;
1385 1386
1386 1387 /* fail with ENXIO if the device is fenced off */
1387 1388 if (S_ISFENCED(sp))
1388 1389 return (ENXIO);
1389 1390
1390 1391 if (vp->v_type == VCHR && vp->v_stream && (vap->va_mask & AT_SIZE)) {
1391 1392 /*
1392 1393 * 1135080: O_TRUNC should have no effect on
1393 1394 * named pipes and terminal devices.
1394 1395 */
1395 1396 ASSERT(vap->va_mask == AT_SIZE);
1396 1397 return (0);
1397 1398 }
1398 1399
1399 1400 if ((realvp = sp->s_realvp) == NULL)
1400 1401 error = 0; /* no real vnode to update */
1401 1402 else
1402 1403 error = VOP_SETATTR(realvp, vap, flags, cr, ct);
1403 1404 if (error == 0) {
1404 1405 /*
1405 1406 * If times were changed, update snode.
1406 1407 */
1407 1408 mutex_enter(&sp->s_lock);
1408 1409 if (vap->va_mask & AT_ATIME)
1409 1410 sp->s_atime = vap->va_atime.tv_sec;
1410 1411 if (vap->va_mask & AT_MTIME) {
1411 1412 sp->s_mtime = vap->va_mtime.tv_sec;
1412 1413 sp->s_ctime = gethrestime_sec();
1413 1414 }
1414 1415 mutex_exit(&sp->s_lock);
1415 1416 }
1416 1417 return (error);
1417 1418 }
1418 1419
1419 1420 static int
1420 1421 spec_access(
1421 1422 struct vnode *vp,
1422 1423 int mode,
1423 1424 int flags,
1424 1425 struct cred *cr,
1425 1426 caller_context_t *ct)
1426 1427 {
1427 1428 struct vnode *realvp;
1428 1429 struct snode *sp = VTOS(vp);
1429 1430
1430 1431 /* fail with ENXIO if the device is fenced off */
1431 1432 if (S_ISFENCED(sp))
1432 1433 return (ENXIO);
1433 1434
1434 1435 if ((realvp = sp->s_realvp) != NULL)
1435 1436 return (VOP_ACCESS(realvp, mode, flags, cr, ct));
1436 1437 else
1437 1438 return (0); /* Allow all access. */
1438 1439 }
1439 1440
1440 1441 /*
1441 1442 * This can be called if creat or an open with O_CREAT is done on the root
1442 1443 * of a lofs mount where the mounted entity is a special file.
1443 1444 */
1444 1445 /*ARGSUSED*/
1445 1446 static int
1446 1447 spec_create(
1447 1448 struct vnode *dvp,
1448 1449 char *name,
1449 1450 vattr_t *vap,
1450 1451 enum vcexcl excl,
1451 1452 int mode,
1452 1453 struct vnode **vpp,
1453 1454 struct cred *cr,
1454 1455 int flag,
1455 1456 caller_context_t *ct,
1456 1457 vsecattr_t *vsecp)
1457 1458 {
1458 1459 int error;
1459 1460 struct snode *sp = VTOS(dvp);
1460 1461
1461 1462 /* fail with ENXIO if the device is fenced off */
1462 1463 if (S_ISFENCED(sp))
1463 1464 return (ENXIO);
1464 1465
1465 1466 ASSERT(dvp && (dvp->v_flag & VROOT) && *name == '\0');
1466 1467 if (excl == NONEXCL) {
1467 1468 if (mode && (error = spec_access(dvp, mode, 0, cr, ct)))
1468 1469 return (error);
1469 1470 VN_HOLD(dvp);
1470 1471 return (0);
1471 1472 }
1472 1473 return (EEXIST);
1473 1474 }
1474 1475
1475 1476 /*
1476 1477 * In order to sync out the snode times without multi-client problems,
1477 1478 * make sure the times written out are never earlier than the times
1478 1479 * already set in the vnode.
1479 1480 */
1480 1481 static int
1481 1482 spec_fsync(
1482 1483 struct vnode *vp,
1483 1484 int syncflag,
1484 1485 struct cred *cr,
1485 1486 caller_context_t *ct)
1486 1487 {
1487 1488 struct snode *sp = VTOS(vp);
1488 1489 struct vnode *realvp;
1489 1490 struct vnode *cvp;
1490 1491 struct vattr va, vatmp;
1491 1492
1492 1493 /* allow syncing even if device is fenced off */
1493 1494
1494 1495 /* If times didn't change, don't flush anything. */
1495 1496 mutex_enter(&sp->s_lock);
1496 1497 if ((sp->s_flag & (SACC|SUPD|SCHG)) == 0 && vp->v_type != VBLK) {
1497 1498 mutex_exit(&sp->s_lock);
1498 1499 return (0);
1499 1500 }
1500 1501 sp->s_flag &= ~(SACC|SUPD|SCHG);
1501 1502 mutex_exit(&sp->s_lock);
1502 1503 cvp = sp->s_commonvp;
1503 1504 realvp = sp->s_realvp;
1504 1505
1505 1506 if (vp->v_type == VBLK && cvp != vp && vn_has_cached_data(cvp) &&
1506 1507 (cvp->v_flag & VISSWAP) == 0)
1507 1508 (void) VOP_PUTPAGE(cvp, (offset_t)0, 0, 0, cr, ct);
1508 1509
1509 1510 /*
1510 1511 * For devices that support it, force write cache to stable storage.
1511 1512 * We don't need the lock to check s_flags since we can treat
1512 1513 * SNOFLUSH as a hint.
1513 1514 */
1514 1515 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
1515 1516 !(sp->s_flag & SNOFLUSH)) {
1516 1517 int rval, rc;
1517 1518 struct dk_callback spec_callback;
1518 1519
1519 1520 spec_callback.dkc_flag = FLUSH_VOLATILE;
1520 1521 spec_callback.dkc_callback = NULL;
1521 1522
1522 1523 /* synchronous flush on volatile cache */
1523 1524 rc = cdev_ioctl(vp->v_rdev, DKIOCFLUSHWRITECACHE,
1524 1525 (intptr_t)&spec_callback, FNATIVE|FKIOCTL, cr, &rval);
1525 1526
1526 1527 if (rc == ENOTSUP || rc == ENOTTY) {
1527 1528 mutex_enter(&sp->s_lock);
1528 1529 sp->s_flag |= SNOFLUSH;
1529 1530 mutex_exit(&sp->s_lock);
1530 1531 }
1531 1532 }
1532 1533
1533 1534 /*
1534 1535 * If no real vnode to update, don't flush anything.
1535 1536 */
1536 1537 if (realvp == NULL)
1537 1538 return (0);
1538 1539
1539 1540 vatmp.va_mask = AT_ATIME|AT_MTIME;
1540 1541 if (VOP_GETATTR(realvp, &vatmp, 0, cr, ct) == 0) {
1541 1542
1542 1543 mutex_enter(&sp->s_lock);
1543 1544 if (vatmp.va_atime.tv_sec > sp->s_atime)
1544 1545 va.va_atime = vatmp.va_atime;
1545 1546 else {
1546 1547 va.va_atime.tv_sec = sp->s_atime;
1547 1548 va.va_atime.tv_nsec = 0;
1548 1549 }
1549 1550 if (vatmp.va_mtime.tv_sec > sp->s_mtime)
1550 1551 va.va_mtime = vatmp.va_mtime;
1551 1552 else {
1552 1553 va.va_mtime.tv_sec = sp->s_mtime;
1553 1554 va.va_mtime.tv_nsec = 0;
1554 1555 }
1555 1556 mutex_exit(&sp->s_lock);
1556 1557
1557 1558 va.va_mask = AT_ATIME|AT_MTIME;
1558 1559 (void) VOP_SETATTR(realvp, &va, 0, cr, ct);
1559 1560 }
1560 1561 (void) VOP_FSYNC(realvp, syncflag, cr, ct);
1561 1562 return (0);
1562 1563 }
1563 1564
1564 1565 /*ARGSUSED*/
1565 1566 static void
1566 1567 spec_inactive(struct vnode *vp, struct cred *cr, caller_context_t *ct)
1567 1568 {
1568 1569 struct snode *sp = VTOS(vp);
1569 1570 struct vnode *cvp;
1570 1571 struct vnode *rvp;
1571 1572
1572 1573 /*
1573 1574 * If no one has reclaimed the vnode, remove from the
1574 1575 * cache now.
1575 1576 */
1576 1577 if (vp->v_count < 1) {
1577 1578 panic("spec_inactive: Bad v_count");
1578 1579 /*NOTREACHED*/
1579 1580 }
1580 1581 mutex_enter(&stable_lock);
1581 1582
1582 1583 mutex_enter(&vp->v_lock);
1583 1584 /*
1584 1585 * Drop the temporary hold by vn_rele now
1585 1586 */
1586 1587 if (--vp->v_count != 0) {
1587 1588 mutex_exit(&vp->v_lock);
1588 1589 mutex_exit(&stable_lock);
1589 1590 return;
1590 1591 }
1591 1592 mutex_exit(&vp->v_lock);
1592 1593
1593 1594 sdelete(sp);
1594 1595 mutex_exit(&stable_lock);
1595 1596
1596 1597 /* We are the sole owner of sp now */
1597 1598 cvp = sp->s_commonvp;
1598 1599 rvp = sp->s_realvp;
1599 1600
1600 1601 if (rvp) {
1601 1602 /*
1602 1603 * If the snode times changed, then update the times
1603 1604 * associated with the "realvp".
1604 1605 */
1605 1606 if ((sp->s_flag & (SACC|SUPD|SCHG)) != 0) {
1606 1607
1607 1608 struct vattr va, vatmp;
1608 1609
1609 1610 mutex_enter(&sp->s_lock);
1610 1611 sp->s_flag &= ~(SACC|SUPD|SCHG);
1611 1612 mutex_exit(&sp->s_lock);
1612 1613 vatmp.va_mask = AT_ATIME|AT_MTIME;
1613 1614 /*
1614 1615 * The user may not own the device, but we
1615 1616 * want to update the attributes anyway.
1616 1617 */
1617 1618 if (VOP_GETATTR(rvp, &vatmp, 0, kcred, ct) == 0) {
1618 1619 if (vatmp.va_atime.tv_sec > sp->s_atime)
1619 1620 va.va_atime = vatmp.va_atime;
1620 1621 else {
1621 1622 va.va_atime.tv_sec = sp->s_atime;
1622 1623 va.va_atime.tv_nsec = 0;
1623 1624 }
1624 1625 if (vatmp.va_mtime.tv_sec > sp->s_mtime)
1625 1626 va.va_mtime = vatmp.va_mtime;
1626 1627 else {
1627 1628 va.va_mtime.tv_sec = sp->s_mtime;
1628 1629 va.va_mtime.tv_nsec = 0;
1629 1630 }
1630 1631
1631 1632 va.va_mask = AT_ATIME|AT_MTIME;
1632 1633 (void) VOP_SETATTR(rvp, &va, 0, kcred, ct);
1633 1634 }
1634 1635 }
1635 1636 }
1636 1637 ASSERT(!vn_has_cached_data(vp));
1637 1638 vn_invalid(vp);
1638 1639
1639 1640 /* if we are sharing another file systems vfs, release it */
1640 1641 if (vp->v_vfsp && (vp->v_vfsp != &spec_vfs))
1641 1642 VFS_RELE(vp->v_vfsp);
1642 1643
1643 1644 /* if we have a realvp, release the realvp */
1644 1645 if (rvp)
1645 1646 VN_RELE(rvp);
1646 1647
1647 1648 /* if we have a common, release the common */
1648 1649 if (cvp && (cvp != vp)) {
1649 1650 VN_RELE(cvp);
1650 1651 #ifdef DEBUG
1651 1652 } else if (cvp) {
1652 1653 /*
1653 1654 * if this is the last reference to a common vnode, any
1654 1655 * associated stream had better have been closed
1655 1656 */
1656 1657 ASSERT(cvp == vp);
1657 1658 ASSERT(cvp->v_stream == NULL);
1658 1659 #endif /* DEBUG */
1659 1660 }
1660 1661
1661 1662 /*
1662 1663 * if we have a hold on a devinfo node (established by
1663 1664 * spec_assoc_vp_with_devi), release the hold
1664 1665 */
1665 1666 if (sp->s_dip)
1666 1667 ddi_release_devi(sp->s_dip);
1667 1668
1668 1669 /*
1669 1670 * If we have an associated device policy, release it.
1670 1671 */
1671 1672 if (sp->s_plcy != NULL)
1672 1673 dpfree(sp->s_plcy);
1673 1674
1674 1675 /*
1675 1676 * If all holds on the devinfo node are through specfs/devfs
1676 1677 * and we just destroyed the last specfs node associated with the
1677 1678 * device, then the devinfo node reference count should now be
1678 1679 * zero. We can't check this because there may be other holds
1679 1680 * on the node from non file system sources: ddi_hold_devi_by_instance
1680 1681 * for example.
1681 1682 */
1682 1683 kmem_cache_free(snode_cache, sp);
1683 1684 }
1684 1685
1685 1686 static int
1686 1687 spec_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
1687 1688 {
1688 1689 struct vnode *realvp;
1689 1690 struct snode *sp = VTOS(vp);
1690 1691
1691 1692 if ((realvp = sp->s_realvp) != NULL)
1692 1693 return (VOP_FID(realvp, fidp, ct));
1693 1694 else
1694 1695 return (EINVAL);
1695 1696 }
1696 1697
1697 1698 /*ARGSUSED1*/
1698 1699 static int
1699 1700 spec_seek(
1700 1701 struct vnode *vp,
1701 1702 offset_t ooff,
1702 1703 offset_t *noffp,
1703 1704 caller_context_t *ct)
1704 1705 {
1705 1706 offset_t maxoff = spec_maxoffset(vp);
1706 1707
1707 1708 if (maxoff == -1 || *noffp <= maxoff)
1708 1709 return (0);
1709 1710 else
1710 1711 return (EINVAL);
1711 1712 }
1712 1713
1713 1714 static int
1714 1715 spec_frlock(
1715 1716 struct vnode *vp,
1716 1717 int cmd,
1717 1718 struct flock64 *bfp,
1718 1719 int flag,
1719 1720 offset_t offset,
1720 1721 struct flk_callback *flk_cbp,
1721 1722 struct cred *cr,
1722 1723 caller_context_t *ct)
1723 1724 {
1724 1725 struct snode *sp = VTOS(vp);
1725 1726 struct snode *csp;
1726 1727
1727 1728 csp = VTOS(sp->s_commonvp);
1728 1729 /*
1729 1730 * If file is being mapped, disallow frlock.
1730 1731 */
1731 1732 if (csp->s_mapcnt > 0)
1732 1733 return (EAGAIN);
1733 1734
1734 1735 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
1735 1736 }
1736 1737
1737 1738 static int
1738 1739 spec_realvp(struct vnode *vp, struct vnode **vpp, caller_context_t *ct)
1739 1740 {
1740 1741 struct vnode *rvp;
1741 1742
1742 1743 if ((rvp = VTOS(vp)->s_realvp) != NULL) {
1743 1744 vp = rvp;
1744 1745 if (VOP_REALVP(vp, &rvp, ct) == 0)
1745 1746 vp = rvp;
1746 1747 }
1747 1748
1748 1749 *vpp = vp;
1749 1750 return (0);
1750 1751 }
1751 1752
1752 1753 /*
1753 1754 * Return all the pages from [off..off + len] in block
1754 1755 * or character device.
1755 1756 */
1756 1757 /*ARGSUSED*/
1757 1758 static int
1758 1759 spec_getpage(
1759 1760 struct vnode *vp,
1760 1761 offset_t off,
1761 1762 size_t len,
1762 1763 uint_t *protp,
1763 1764 page_t *pl[],
1764 1765 size_t plsz,
1765 1766 struct seg *seg,
1766 1767 caddr_t addr,
1767 1768 enum seg_rw rw,
1768 1769 struct cred *cr,
1769 1770 caller_context_t *ct)
1770 1771 {
1771 1772 struct snode *sp = VTOS(vp);
1772 1773 int err;
1773 1774
1774 1775 ASSERT(sp->s_commonvp == vp);
1775 1776
1776 1777 /*
1777 1778 * XXX Given the above assertion, this might not do
1778 1779 * what is wanted here.
1779 1780 */
1780 1781 if (vp->v_flag & VNOMAP)
1781 1782 return (ENOSYS);
1782 1783 TRACE_4(TR_FAC_SPECFS, TR_SPECFS_GETPAGE,
1783 1784 "specfs getpage:vp %p off %llx len %ld snode %p",
↓ open down ↓ |
1750 lines elided |
↑ open up ↑ |
1784 1785 vp, off, len, sp);
1785 1786
1786 1787 switch (vp->v_type) {
1787 1788 case VBLK:
1788 1789 if (protp != NULL)
1789 1790 *protp = PROT_ALL;
1790 1791
1791 1792 if (((u_offset_t)off + len) > (SPEC_SIZE(sp) + PAGEOFFSET))
1792 1793 return (EFAULT); /* beyond EOF */
1793 1794
1794 - if (len <= PAGESIZE)
1795 - err = spec_getapage(vp, (u_offset_t)off, len, protp, pl,
1796 - plsz, seg, addr, rw, cr);
1797 - else
1798 - err = pvn_getpages(spec_getapage, vp, (u_offset_t)off,
1799 - len, protp, pl, plsz, seg, addr, rw, cr);
1795 + err = pvn_getpages(spec_getapage, vp, (u_offset_t)off, len,
1796 + protp, pl, plsz, seg, addr, rw, cr);
1800 1797 break;
1801 1798
1802 1799 case VCHR:
1803 1800 cmn_err(CE_NOTE, "spec_getpage called for character device. "
1804 1801 "Check any non-ON consolidation drivers");
1805 1802 err = 0;
1806 1803 pl[0] = (page_t *)0;
1807 1804 break;
1808 1805
1809 1806 default:
1810 1807 panic("spec_getpage: bad v_type 0x%x", vp->v_type);
1811 1808 /*NOTREACHED*/
1812 1809 }
1813 1810
1814 1811 return (err);
1815 1812 }
1816 1813
1817 1814 extern int klustsize; /* set in machdep.c */
1818 1815
1819 1816 int spec_ra = 1;
1820 1817 int spec_lostpage; /* number of times we lost original page */
1821 1818
1822 1819 /*ARGSUSED2*/
1823 1820 static int
1824 1821 spec_getapage(
1825 1822 struct vnode *vp,
1826 1823 u_offset_t off,
1827 1824 size_t len,
1828 1825 uint_t *protp,
1829 1826 page_t *pl[],
1830 1827 size_t plsz,
1831 1828 struct seg *seg,
1832 1829 caddr_t addr,
1833 1830 enum seg_rw rw,
1834 1831 struct cred *cr)
1835 1832 {
1836 1833 struct snode *sp;
1837 1834 struct buf *bp;
1838 1835 page_t *pp, *pp2;
1839 1836 u_offset_t io_off1, io_off2;
1840 1837 size_t io_len1;
1841 1838 size_t io_len2;
1842 1839 size_t blksz;
1843 1840 u_offset_t blkoff;
1844 1841 int dora, err;
1845 1842 page_t *pagefound;
1846 1843 uint_t xlen;
1847 1844 size_t adj_klustsize;
1848 1845 u_offset_t size;
1849 1846 u_offset_t tmpoff;
1850 1847
1851 1848 sp = VTOS(vp);
1852 1849 TRACE_3(TR_FAC_SPECFS, TR_SPECFS_GETAPAGE,
1853 1850 "specfs getapage:vp %p off %llx snode %p", vp, off, sp);
1854 1851 reread:
1855 1852
1856 1853 err = 0;
1857 1854 bp = NULL;
1858 1855 pp = NULL;
1859 1856 pp2 = NULL;
1860 1857
1861 1858 if (pl != NULL)
1862 1859 pl[0] = NULL;
1863 1860
1864 1861 size = SPEC_SIZE(VTOS(sp->s_commonvp));
1865 1862
1866 1863 if (spec_ra && sp->s_nextr == off)
1867 1864 dora = 1;
1868 1865 else
1869 1866 dora = 0;
1870 1867
1871 1868 if (size == UNKNOWN_SIZE) {
1872 1869 dora = 0;
1873 1870 adj_klustsize = PAGESIZE;
1874 1871 } else {
1875 1872 adj_klustsize = dora ? klustsize : PAGESIZE;
1876 1873 }
1877 1874
1878 1875 again:
1879 1876 if ((pagefound = page_exists(vp, off)) == NULL) {
1880 1877 if (rw == S_CREATE) {
1881 1878 /*
1882 1879 * We're allocating a swap slot and it's
1883 1880 * associated page was not found, so allocate
1884 1881 * and return it.
1885 1882 */
1886 1883 if ((pp = page_create_va(vp, off,
1887 1884 PAGESIZE, PG_WAIT, seg, addr)) == NULL) {
1888 1885 panic("spec_getapage: page_create");
1889 1886 /*NOTREACHED*/
1890 1887 }
1891 1888 io_len1 = PAGESIZE;
1892 1889 sp->s_nextr = off + PAGESIZE;
1893 1890 } else {
1894 1891 /*
1895 1892 * Need to really do disk I/O to get the page(s).
1896 1893 */
1897 1894 blkoff = (off / adj_klustsize) * adj_klustsize;
1898 1895 if (size == UNKNOWN_SIZE) {
1899 1896 blksz = PAGESIZE;
1900 1897 } else {
1901 1898 if (blkoff + adj_klustsize <= size)
1902 1899 blksz = adj_klustsize;
1903 1900 else
1904 1901 blksz =
1905 1902 MIN(size - blkoff, adj_klustsize);
1906 1903 }
1907 1904
1908 1905 pp = pvn_read_kluster(vp, off, seg, addr, &tmpoff,
1909 1906 &io_len1, blkoff, blksz, 0);
1910 1907 io_off1 = tmpoff;
1911 1908 /*
1912 1909 * Make sure the page didn't sneek into the
1913 1910 * cache while we blocked in pvn_read_kluster.
1914 1911 */
1915 1912 if (pp == NULL)
1916 1913 goto again;
1917 1914
1918 1915 /*
1919 1916 * Zero part of page which we are not
1920 1917 * going to be reading from disk now.
1921 1918 */
1922 1919 xlen = (uint_t)(io_len1 & PAGEOFFSET);
1923 1920 if (xlen != 0)
1924 1921 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
1925 1922
1926 1923 bp = spec_startio(vp, pp, io_off1, io_len1,
1927 1924 pl == NULL ? (B_ASYNC | B_READ) : B_READ);
1928 1925 sp->s_nextr = io_off1 + io_len1;
1929 1926 }
1930 1927 }
1931 1928
1932 1929 if (dora && rw != S_CREATE) {
1933 1930 u_offset_t off2;
1934 1931 caddr_t addr2;
1935 1932
1936 1933 off2 = ((off / adj_klustsize) + 1) * adj_klustsize;
1937 1934 addr2 = addr + (off2 - off);
1938 1935
1939 1936 pp2 = NULL;
1940 1937 /*
1941 1938 * If we are past EOF then don't bother trying
1942 1939 * with read-ahead.
1943 1940 */
1944 1941 if (off2 >= size)
1945 1942 pp2 = NULL;
1946 1943 else {
1947 1944 if (off2 + adj_klustsize <= size)
1948 1945 blksz = adj_klustsize;
1949 1946 else
1950 1947 blksz = MIN(size - off2, adj_klustsize);
1951 1948
1952 1949 pp2 = pvn_read_kluster(vp, off2, seg, addr2, &tmpoff,
1953 1950 &io_len2, off2, blksz, 1);
1954 1951 io_off2 = tmpoff;
1955 1952 }
1956 1953
1957 1954 if (pp2 != NULL) {
1958 1955 /*
1959 1956 * Zero part of page which we are not
1960 1957 * going to be reading from disk now.
1961 1958 */
1962 1959 xlen = (uint_t)(io_len2 & PAGEOFFSET);
1963 1960 if (xlen != 0)
1964 1961 pagezero(pp2->p_prev, xlen, PAGESIZE - xlen);
1965 1962
1966 1963 (void) spec_startio(vp, pp2, io_off2, io_len2,
1967 1964 B_READ | B_ASYNC);
1968 1965 }
1969 1966 }
1970 1967
1971 1968 if (pl == NULL)
1972 1969 return (err);
1973 1970
1974 1971 if (bp != NULL) {
1975 1972 err = biowait(bp);
1976 1973 pageio_done(bp);
1977 1974
1978 1975 if (err) {
1979 1976 if (pp != NULL)
1980 1977 pvn_read_done(pp, B_ERROR);
1981 1978 return (err);
1982 1979 }
1983 1980 }
1984 1981
1985 1982 if (pagefound) {
1986 1983 se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
1987 1984 /*
1988 1985 * Page exists in the cache, acquire the appropriate
1989 1986 * lock. If this fails, start all over again.
1990 1987 */
1991 1988
1992 1989 if ((pp = page_lookup(vp, off, se)) == NULL) {
1993 1990 spec_lostpage++;
1994 1991 goto reread;
1995 1992 }
1996 1993 pl[0] = pp;
1997 1994 pl[1] = NULL;
1998 1995
1999 1996 sp->s_nextr = off + PAGESIZE;
2000 1997 return (0);
2001 1998 }
2002 1999
2003 2000 if (pp != NULL)
2004 2001 pvn_plist_init(pp, pl, plsz, off, io_len1, rw);
2005 2002 return (0);
2006 2003 }
2007 2004
2008 2005 /*
2009 2006 * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED, B_FORCE}.
2010 2007 * If len == 0, do from off to EOF.
2011 2008 *
2012 2009 * The normal cases should be len == 0 & off == 0 (entire vp list),
2013 2010 * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
2014 2011 * (from pageout).
2015 2012 */
2016 2013 /*ARGSUSED5*/
2017 2014 int
2018 2015 spec_putpage(
2019 2016 struct vnode *vp,
2020 2017 offset_t off,
2021 2018 size_t len,
2022 2019 int flags,
2023 2020 struct cred *cr,
2024 2021 caller_context_t *ct)
2025 2022 {
2026 2023 struct snode *sp = VTOS(vp);
2027 2024 struct vnode *cvp;
2028 2025 page_t *pp;
2029 2026 u_offset_t io_off;
2030 2027 size_t io_len = 0; /* for lint */
2031 2028 int err = 0;
2032 2029 u_offset_t size;
2033 2030 u_offset_t tmpoff;
2034 2031
2035 2032 ASSERT(vp->v_count != 0);
2036 2033
2037 2034 if (vp->v_flag & VNOMAP)
2038 2035 return (ENOSYS);
2039 2036
2040 2037 cvp = sp->s_commonvp;
2041 2038 size = SPEC_SIZE(VTOS(cvp));
2042 2039
2043 2040 if (!vn_has_cached_data(vp) || off >= size)
2044 2041 return (0);
2045 2042
2046 2043 ASSERT(vp->v_type == VBLK && cvp == vp);
2047 2044 TRACE_4(TR_FAC_SPECFS, TR_SPECFS_PUTPAGE,
2048 2045 "specfs putpage:vp %p off %llx len %ld snode %p",
2049 2046 vp, off, len, sp);
2050 2047
2051 2048 if (len == 0) {
2052 2049 /*
2053 2050 * Search the entire vp list for pages >= off.
2054 2051 */
2055 2052 err = pvn_vplist_dirty(vp, off, spec_putapage,
2056 2053 flags, cr);
2057 2054 } else {
2058 2055 u_offset_t eoff;
2059 2056
2060 2057 /*
2061 2058 * Loop over all offsets in the range [off...off + len]
2062 2059 * looking for pages to deal with. We set limits so
2063 2060 * that we kluster to klustsize boundaries.
2064 2061 */
2065 2062 eoff = off + len;
2066 2063 for (io_off = off; io_off < eoff && io_off < size;
2067 2064 io_off += io_len) {
2068 2065 /*
2069 2066 * If we are not invalidating, synchronously
2070 2067 * freeing or writing pages use the routine
2071 2068 * page_lookup_nowait() to prevent reclaiming
2072 2069 * them from the free list.
2073 2070 */
2074 2071 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
2075 2072 pp = page_lookup(vp, io_off,
2076 2073 (flags & (B_INVAL | B_FREE)) ?
2077 2074 SE_EXCL : SE_SHARED);
2078 2075 } else {
2079 2076 pp = page_lookup_nowait(vp, io_off,
2080 2077 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2081 2078 }
2082 2079
2083 2080 if (pp == NULL || pvn_getdirty(pp, flags) == 0)
2084 2081 io_len = PAGESIZE;
2085 2082 else {
2086 2083 err = spec_putapage(vp, pp, &tmpoff, &io_len,
2087 2084 flags, cr);
2088 2085 io_off = tmpoff;
2089 2086 if (err != 0)
2090 2087 break;
2091 2088 /*
2092 2089 * "io_off" and "io_len" are returned as
2093 2090 * the range of pages we actually wrote.
2094 2091 * This allows us to skip ahead more quickly
2095 2092 * since several pages may've been dealt
2096 2093 * with by this iteration of the loop.
2097 2094 */
2098 2095 }
2099 2096 }
2100 2097 }
2101 2098 return (err);
2102 2099 }
2103 2100
2104 2101
2105 2102 /*
2106 2103 * Write out a single page, possibly klustering adjacent
2107 2104 * dirty pages.
2108 2105 */
2109 2106 /*ARGSUSED5*/
2110 2107 static int
2111 2108 spec_putapage(
2112 2109 struct vnode *vp,
2113 2110 page_t *pp,
2114 2111 u_offset_t *offp, /* return value */
2115 2112 size_t *lenp, /* return value */
2116 2113 int flags,
2117 2114 struct cred *cr)
2118 2115 {
2119 2116 struct snode *sp = VTOS(vp);
2120 2117 u_offset_t io_off;
2121 2118 size_t io_len;
2122 2119 size_t blksz;
2123 2120 u_offset_t blkoff;
2124 2121 int err = 0;
2125 2122 struct buf *bp;
2126 2123 u_offset_t size;
2127 2124 size_t adj_klustsize;
2128 2125 u_offset_t tmpoff;
2129 2126
2130 2127 /*
2131 2128 * Destroy read ahead value since we are really going to write.
2132 2129 */
2133 2130 sp->s_nextr = 0;
2134 2131 size = SPEC_SIZE(VTOS(sp->s_commonvp));
2135 2132
2136 2133 adj_klustsize = klustsize;
2137 2134
2138 2135 blkoff = (pp->p_offset / adj_klustsize) * adj_klustsize;
2139 2136
2140 2137 if (blkoff + adj_klustsize <= size)
2141 2138 blksz = adj_klustsize;
2142 2139 else
2143 2140 blksz = size - blkoff;
2144 2141
2145 2142 /*
2146 2143 * Find a kluster that fits in one contiguous chunk.
2147 2144 */
2148 2145 pp = pvn_write_kluster(vp, pp, &tmpoff, &io_len, blkoff,
2149 2146 blksz, flags);
2150 2147 io_off = tmpoff;
2151 2148
2152 2149 /*
2153 2150 * Check for page length rounding problems
2154 2151 * XXX - Is this necessary?
2155 2152 */
2156 2153 if (io_off + io_len > size) {
2157 2154 ASSERT((io_off + io_len) - size < PAGESIZE);
2158 2155 io_len = size - io_off;
2159 2156 }
2160 2157
2161 2158 bp = spec_startio(vp, pp, io_off, io_len, B_WRITE | flags);
2162 2159
2163 2160 /*
2164 2161 * Wait for i/o to complete if the request is not B_ASYNC.
2165 2162 */
2166 2163 if ((flags & B_ASYNC) == 0) {
2167 2164 err = biowait(bp);
2168 2165 pageio_done(bp);
2169 2166 pvn_write_done(pp, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2170 2167 }
2171 2168
2172 2169 if (offp)
2173 2170 *offp = io_off;
2174 2171 if (lenp)
2175 2172 *lenp = io_len;
2176 2173 TRACE_4(TR_FAC_SPECFS, TR_SPECFS_PUTAPAGE,
2177 2174 "specfs putapage:vp %p offp %p snode %p err %d",
2178 2175 vp, offp, sp, err);
2179 2176 return (err);
2180 2177 }
2181 2178
2182 2179 /*
2183 2180 * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
2184 2181 */
2185 2182 static struct buf *
2186 2183 spec_startio(
2187 2184 struct vnode *vp,
2188 2185 page_t *pp,
2189 2186 u_offset_t io_off,
2190 2187 size_t io_len,
2191 2188 int flags)
2192 2189 {
2193 2190 struct buf *bp;
2194 2191
2195 2192 bp = pageio_setup(pp, io_len, vp, flags);
2196 2193
2197 2194 bp->b_edev = vp->v_rdev;
2198 2195 bp->b_dev = cmpdev(vp->v_rdev);
2199 2196 bp->b_blkno = btodt(io_off);
2200 2197 bp->b_un.b_addr = (caddr_t)0;
2201 2198
2202 2199 (void) bdev_strategy(bp);
2203 2200
2204 2201 if (flags & B_READ)
2205 2202 lwp_stat_update(LWP_STAT_INBLK, 1);
2206 2203 else
2207 2204 lwp_stat_update(LWP_STAT_OUBLK, 1);
2208 2205
2209 2206 return (bp);
2210 2207 }
2211 2208
2212 2209 static int
2213 2210 spec_poll(
2214 2211 struct vnode *vp,
2215 2212 short events,
2216 2213 int anyyet,
2217 2214 short *reventsp,
2218 2215 struct pollhead **phpp,
2219 2216 caller_context_t *ct)
2220 2217 {
2221 2218 dev_t dev;
2222 2219 int error;
2223 2220
2224 2221 if (vp->v_type == VBLK)
2225 2222 error = fs_poll(vp, events, anyyet, reventsp, phpp, ct);
2226 2223 else {
2227 2224 ASSERT(vp->v_type == VCHR);
2228 2225 dev = vp->v_rdev;
2229 2226 if (vp->v_stream) {
2230 2227 ASSERT(vp->v_stream != NULL);
2231 2228 error = strpoll(vp->v_stream, events, anyyet,
2232 2229 reventsp, phpp);
2233 2230 } else if (devopsp[getmajor(dev)]->devo_cb_ops->cb_chpoll) {
2234 2231 error = cdev_poll(dev, events, anyyet, reventsp, phpp);
2235 2232 } else {
2236 2233 error = fs_poll(vp, events, anyyet, reventsp, phpp, ct);
2237 2234 }
2238 2235 }
2239 2236 return (error);
2240 2237 }
2241 2238
2242 2239 /*
2243 2240 * This routine is called through the cdevsw[] table to handle
2244 2241 * traditional mmap'able devices that support a d_mmap function.
2245 2242 */
2246 2243 /*ARGSUSED8*/
2247 2244 int
2248 2245 spec_segmap(
2249 2246 dev_t dev,
2250 2247 off_t off,
2251 2248 struct as *as,
2252 2249 caddr_t *addrp,
2253 2250 off_t len,
2254 2251 uint_t prot,
2255 2252 uint_t maxprot,
2256 2253 uint_t flags,
2257 2254 struct cred *cred)
2258 2255 {
2259 2256 struct segdev_crargs dev_a;
2260 2257 int (*mapfunc)(dev_t dev, off_t off, int prot);
2261 2258 size_t i;
2262 2259 int error;
2263 2260
2264 2261 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2265 2262 return (ENODEV);
2266 2263 TRACE_4(TR_FAC_SPECFS, TR_SPECFS_SEGMAP,
2267 2264 "specfs segmap:dev %x as %p len %lx prot %x",
2268 2265 dev, as, len, prot);
2269 2266
2270 2267 /*
2271 2268 * Character devices that support the d_mmap
2272 2269 * interface can only be mmap'ed shared.
2273 2270 */
2274 2271 if ((flags & MAP_TYPE) != MAP_SHARED)
2275 2272 return (EINVAL);
2276 2273
2277 2274 /*
2278 2275 * Check to ensure that the entire range is
2279 2276 * legal and we are not trying to map in
2280 2277 * more than the device will let us.
2281 2278 */
2282 2279 for (i = 0; i < len; i += PAGESIZE) {
2283 2280 if (cdev_mmap(mapfunc, dev, off + i, maxprot) == -1)
2284 2281 return (ENXIO);
2285 2282 }
2286 2283
2287 2284 as_rangelock(as);
2288 2285 /* Pick an address w/o worrying about any vac alignment constraints. */
2289 2286 error = choose_addr(as, addrp, len, off, ADDR_NOVACALIGN, flags);
2290 2287 if (error != 0) {
2291 2288 as_rangeunlock(as);
2292 2289 return (error);
2293 2290 }
2294 2291
2295 2292 dev_a.mapfunc = mapfunc;
2296 2293 dev_a.dev = dev;
2297 2294 dev_a.offset = off;
2298 2295 dev_a.prot = (uchar_t)prot;
2299 2296 dev_a.maxprot = (uchar_t)maxprot;
2300 2297 dev_a.hat_flags = 0;
2301 2298 dev_a.hat_attr = 0;
2302 2299 dev_a.devmap_data = NULL;
2303 2300
2304 2301 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2305 2302 as_rangeunlock(as);
2306 2303 return (error);
2307 2304 }
2308 2305
2309 2306 int
2310 2307 spec_char_map(
2311 2308 dev_t dev,
2312 2309 offset_t off,
2313 2310 struct as *as,
2314 2311 caddr_t *addrp,
2315 2312 size_t len,
2316 2313 uchar_t prot,
2317 2314 uchar_t maxprot,
2318 2315 uint_t flags,
2319 2316 struct cred *cred)
2320 2317 {
2321 2318 int error = 0;
2322 2319 major_t maj = getmajor(dev);
2323 2320 int map_flag;
2324 2321 int (*segmap)(dev_t, off_t, struct as *,
2325 2322 caddr_t *, off_t, uint_t, uint_t, uint_t, cred_t *);
2326 2323 int (*devmap)(dev_t, devmap_cookie_t, offset_t,
2327 2324 size_t, size_t *, uint_t);
2328 2325 int (*mmap)(dev_t dev, off_t off, int prot);
2329 2326
2330 2327 /*
2331 2328 * Character device: let the device driver
2332 2329 * pick the appropriate segment driver.
2333 2330 *
2334 2331 * 4.x compat.: allow 'NULL' cb_segmap => spec_segmap
2335 2332 * Kindness: allow 'nulldev' cb_segmap => spec_segmap
2336 2333 */
2337 2334 segmap = devopsp[maj]->devo_cb_ops->cb_segmap;
2338 2335 if (segmap == NULL || segmap == nulldev || segmap == nodev) {
2339 2336 mmap = devopsp[maj]->devo_cb_ops->cb_mmap;
2340 2337 map_flag = devopsp[maj]->devo_cb_ops->cb_flag;
2341 2338
2342 2339 /*
2343 2340 * Use old mmap framework if the driver has both mmap
2344 2341 * and devmap entry points. This is to prevent the
2345 2342 * system from calling invalid devmap entry point
2346 2343 * for some drivers that might have put garbage in the
2347 2344 * devmap entry point.
2348 2345 */
2349 2346 if ((map_flag & D_DEVMAP) || mmap == NULL ||
2350 2347 mmap == nulldev || mmap == nodev) {
2351 2348 devmap = devopsp[maj]->devo_cb_ops->cb_devmap;
2352 2349
2353 2350 /*
2354 2351 * If driver provides devmap entry point in
2355 2352 * cb_ops but not xx_segmap(9E), call
2356 2353 * devmap_setup with default settings
2357 2354 * (NULL) for callback_ops and driver
2358 2355 * callback private data
2359 2356 */
2360 2357 if (devmap == nodev || devmap == NULL ||
2361 2358 devmap == nulldev)
2362 2359 return (ENODEV);
2363 2360
2364 2361 error = devmap_setup(dev, off, as, addrp,
2365 2362 len, prot, maxprot, flags, cred);
2366 2363
2367 2364 return (error);
2368 2365 } else
2369 2366 segmap = spec_segmap;
2370 2367 } else
2371 2368 segmap = cdev_segmap;
2372 2369
2373 2370 return ((*segmap)(dev, (off_t)off, as, addrp, len, prot,
2374 2371 maxprot, flags, cred));
2375 2372 }
2376 2373
2377 2374 /*ARGSUSED9*/
2378 2375 static int
2379 2376 spec_map(
2380 2377 struct vnode *vp,
2381 2378 offset_t off,
2382 2379 struct as *as,
2383 2380 caddr_t *addrp,
2384 2381 size_t len,
2385 2382 uchar_t prot,
2386 2383 uchar_t maxprot,
2387 2384 uint_t flags,
2388 2385 struct cred *cred,
2389 2386 caller_context_t *ct)
2390 2387 {
2391 2388 int error = 0;
2392 2389 struct snode *sp = VTOS(vp);
2393 2390
2394 2391 if (vp->v_flag & VNOMAP)
2395 2392 return (ENOSYS);
2396 2393
2397 2394 /* fail map with ENXIO if the device is fenced off */
2398 2395 if (S_ISFENCED(sp))
2399 2396 return (ENXIO);
2400 2397
2401 2398 /*
2402 2399 * If file is locked, fail mapping attempt.
2403 2400 */
2404 2401 if (vn_has_flocks(vp))
2405 2402 return (EAGAIN);
2406 2403
2407 2404 if (vp->v_type == VCHR) {
2408 2405 return (spec_char_map(vp->v_rdev, off, as, addrp, len, prot,
2409 2406 maxprot, flags, cred));
2410 2407 } else if (vp->v_type == VBLK) {
2411 2408 struct segvn_crargs vn_a;
2412 2409 struct vnode *cvp;
2413 2410 struct snode *sp;
2414 2411
2415 2412 /*
2416 2413 * Block device, use segvn mapping to the underlying commonvp
2417 2414 * for pages.
2418 2415 */
2419 2416 if (off > spec_maxoffset(vp))
2420 2417 return (ENXIO);
2421 2418
2422 2419 sp = VTOS(vp);
2423 2420 cvp = sp->s_commonvp;
2424 2421 ASSERT(cvp != NULL);
2425 2422
2426 2423 if (off < 0 || ((offset_t)(off + len) < 0))
2427 2424 return (ENXIO);
2428 2425
2429 2426 as_rangelock(as);
2430 2427 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
2431 2428 if (error != 0) {
2432 2429 as_rangeunlock(as);
2433 2430 return (error);
2434 2431 }
2435 2432
2436 2433 vn_a.vp = cvp;
2437 2434 vn_a.offset = off;
2438 2435 vn_a.type = flags & MAP_TYPE;
2439 2436 vn_a.prot = (uchar_t)prot;
2440 2437 vn_a.maxprot = (uchar_t)maxprot;
2441 2438 vn_a.flags = flags & ~MAP_TYPE;
2442 2439 vn_a.cred = cred;
2443 2440 vn_a.amp = NULL;
2444 2441 vn_a.szc = 0;
2445 2442 vn_a.lgrp_mem_policy_flags = 0;
2446 2443
2447 2444 error = as_map(as, *addrp, len, segvn_create, &vn_a);
2448 2445 as_rangeunlock(as);
2449 2446 } else
2450 2447 return (ENODEV);
2451 2448
2452 2449 return (error);
2453 2450 }
2454 2451
2455 2452 /*ARGSUSED1*/
2456 2453 static int
2457 2454 spec_addmap(
2458 2455 struct vnode *vp, /* the common vnode */
2459 2456 offset_t off,
2460 2457 struct as *as,
2461 2458 caddr_t addr,
2462 2459 size_t len, /* how many bytes to add */
2463 2460 uchar_t prot,
2464 2461 uchar_t maxprot,
2465 2462 uint_t flags,
2466 2463 struct cred *cred,
2467 2464 caller_context_t *ct)
2468 2465 {
2469 2466 int error = 0;
2470 2467 struct snode *csp = VTOS(vp);
2471 2468 ulong_t npages;
2472 2469
2473 2470 ASSERT(vp != NULL && VTOS(vp)->s_commonvp == vp);
2474 2471
2475 2472 /*
2476 2473 * XXX Given the above assertion, this might not
2477 2474 * be a particularly sensible thing to test.
2478 2475 */
2479 2476 if (vp->v_flag & VNOMAP)
2480 2477 return (ENOSYS);
2481 2478
2482 2479 /* fail with EIO if the device is fenced off */
2483 2480 if (S_ISFENCED(csp))
2484 2481 return (EIO);
2485 2482
2486 2483 npages = btopr(len);
2487 2484 LOCK_CSP(csp);
2488 2485 csp->s_mapcnt += npages;
2489 2486
2490 2487 UNLOCK_CSP(csp);
2491 2488 return (error);
2492 2489 }
2493 2490
2494 2491 /*ARGSUSED1*/
2495 2492 static int
2496 2493 spec_delmap(
2497 2494 struct vnode *vp, /* the common vnode */
2498 2495 offset_t off,
2499 2496 struct as *as,
2500 2497 caddr_t addr,
2501 2498 size_t len, /* how many bytes to take away */
2502 2499 uint_t prot,
2503 2500 uint_t maxprot,
2504 2501 uint_t flags,
2505 2502 struct cred *cred,
2506 2503 caller_context_t *ct)
2507 2504 {
2508 2505 struct snode *csp = VTOS(vp);
2509 2506 ulong_t npages;
2510 2507 long mcnt;
2511 2508
2512 2509 /* segdev passes us the common vp */
2513 2510
2514 2511 ASSERT(vp != NULL && VTOS(vp)->s_commonvp == vp);
2515 2512
2516 2513 /* allow delmap to succeed even if device fenced off */
2517 2514
2518 2515 /*
2519 2516 * XXX Given the above assertion, this might not
2520 2517 * be a particularly sensible thing to test..
2521 2518 */
2522 2519 if (vp->v_flag & VNOMAP)
2523 2520 return (ENOSYS);
2524 2521
2525 2522 npages = btopr(len);
2526 2523
2527 2524 LOCK_CSP(csp);
2528 2525 mutex_enter(&csp->s_lock);
2529 2526 mcnt = (csp->s_mapcnt -= npages);
2530 2527
2531 2528 if (mcnt == 0) {
2532 2529 /*
2533 2530 * Call the close routine when the last reference of any
2534 2531 * kind through any [s, v]node goes away. The s_dip hold
2535 2532 * on the devinfo node is released when the vnode is
2536 2533 * destroyed.
2537 2534 */
2538 2535 if (csp->s_count == 0) {
2539 2536 csp->s_flag &= ~(SNEEDCLOSE | SSIZEVALID);
2540 2537
2541 2538 /* See comment in spec_close() */
2542 2539 if (csp->s_flag & (SCLONE | SSELFCLONE))
2543 2540 csp->s_flag &= ~SDIPSET;
2544 2541
2545 2542 mutex_exit(&csp->s_lock);
2546 2543
2547 2544 (void) device_close(vp, 0, cred);
2548 2545 } else
2549 2546 mutex_exit(&csp->s_lock);
2550 2547
2551 2548 mutex_enter(&csp->s_lock);
2552 2549 }
2553 2550 ASSERT(mcnt >= 0);
2554 2551
2555 2552 UNLOCK_CSP_LOCK_HELD(csp);
2556 2553 mutex_exit(&csp->s_lock);
2557 2554
2558 2555 return (0);
2559 2556 }
2560 2557
2561 2558 /*ARGSUSED4*/
2562 2559 static int
2563 2560 spec_dump(
2564 2561 struct vnode *vp,
2565 2562 caddr_t addr,
2566 2563 offset_t bn,
2567 2564 offset_t count,
2568 2565 caller_context_t *ct)
2569 2566 {
2570 2567 /* allow dump to succeed even if device fenced off */
2571 2568
2572 2569 ASSERT(vp->v_type == VBLK);
2573 2570 return (bdev_dump(vp->v_rdev, addr, (daddr_t)bn, (int)count));
2574 2571 }
2575 2572
2576 2573
2577 2574 /*
2578 2575 * Do i/o on the given page list from/to vp, io_off for io_len.
2579 2576 * Flags are composed of:
2580 2577 * {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED, B_READ, B_WRITE}
2581 2578 * If B_ASYNC is not set i/o is waited for.
2582 2579 */
2583 2580 /*ARGSUSED5*/
2584 2581 static int
2585 2582 spec_pageio(
2586 2583 struct vnode *vp,
2587 2584 page_t *pp,
2588 2585 u_offset_t io_off,
2589 2586 size_t io_len,
2590 2587 int flags,
2591 2588 cred_t *cr,
2592 2589 caller_context_t *ct)
2593 2590 {
2594 2591 struct buf *bp = NULL;
2595 2592 int err = 0;
2596 2593
2597 2594 if (pp == NULL)
2598 2595 return (EINVAL);
2599 2596
2600 2597 bp = spec_startio(vp, pp, io_off, io_len, flags);
2601 2598
2602 2599 /*
2603 2600 * Wait for i/o to complete if the request is not B_ASYNC.
2604 2601 */
2605 2602 if ((flags & B_ASYNC) == 0) {
2606 2603 err = biowait(bp);
2607 2604 pageio_done(bp);
2608 2605 }
2609 2606 return (err);
2610 2607 }
2611 2608
2612 2609 /*
2613 2610 * Set ACL on underlying vnode if one exists, or return ENOSYS otherwise.
2614 2611 */
2615 2612 int
2616 2613 spec_setsecattr(
2617 2614 struct vnode *vp,
2618 2615 vsecattr_t *vsap,
2619 2616 int flag,
2620 2617 struct cred *cr,
2621 2618 caller_context_t *ct)
2622 2619 {
2623 2620 struct vnode *realvp;
2624 2621 struct snode *sp = VTOS(vp);
2625 2622 int error;
2626 2623
2627 2624 /* fail with ENXIO if the device is fenced off */
2628 2625 if (S_ISFENCED(sp))
2629 2626 return (ENXIO);
2630 2627
2631 2628 /*
2632 2629 * The acl(2) system calls VOP_RWLOCK on the file before setting an
2633 2630 * ACL, but since specfs does not serialize reads and writes, this
2634 2631 * VOP does not do anything. However, some backing file systems may
2635 2632 * expect the lock to be held before setting an ACL, so it is taken
2636 2633 * here privately to avoid serializing specfs reads and writes.
2637 2634 */
2638 2635 if ((realvp = sp->s_realvp) != NULL) {
2639 2636 (void) VOP_RWLOCK(realvp, V_WRITELOCK_TRUE, ct);
2640 2637 error = VOP_SETSECATTR(realvp, vsap, flag, cr, ct);
2641 2638 (void) VOP_RWUNLOCK(realvp, V_WRITELOCK_TRUE, ct);
2642 2639 return (error);
2643 2640 } else
2644 2641 return (fs_nosys());
2645 2642 }
2646 2643
2647 2644 /*
2648 2645 * Get ACL from underlying vnode if one exists, or fabricate it from
2649 2646 * the permissions returned by spec_getattr() otherwise.
2650 2647 */
2651 2648 int
2652 2649 spec_getsecattr(
2653 2650 struct vnode *vp,
2654 2651 vsecattr_t *vsap,
2655 2652 int flag,
2656 2653 struct cred *cr,
2657 2654 caller_context_t *ct)
2658 2655 {
2659 2656 struct vnode *realvp;
2660 2657 struct snode *sp = VTOS(vp);
2661 2658
2662 2659 /* fail with ENXIO if the device is fenced off */
2663 2660 if (S_ISFENCED(sp))
2664 2661 return (ENXIO);
2665 2662
2666 2663 if ((realvp = sp->s_realvp) != NULL)
2667 2664 return (VOP_GETSECATTR(realvp, vsap, flag, cr, ct));
2668 2665 else
2669 2666 return (fs_fab_acl(vp, vsap, flag, cr, ct));
2670 2667 }
2671 2668
2672 2669 int
2673 2670 spec_pathconf(
2674 2671 vnode_t *vp,
2675 2672 int cmd,
2676 2673 ulong_t *valp,
2677 2674 cred_t *cr,
2678 2675 caller_context_t *ct)
2679 2676 {
2680 2677 vnode_t *realvp;
2681 2678 struct snode *sp = VTOS(vp);
2682 2679
2683 2680 /* fail with ENXIO if the device is fenced off */
2684 2681 if (S_ISFENCED(sp))
2685 2682 return (ENXIO);
2686 2683
2687 2684 if ((realvp = sp->s_realvp) != NULL)
2688 2685 return (VOP_PATHCONF(realvp, cmd, valp, cr, ct));
2689 2686 else
2690 2687 return (fs_pathconf(vp, cmd, valp, cr, ct));
2691 2688 }
↓ open down ↓ |
882 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX