Print this page
6149 use NULL capable segop as a shorthand for no-capabilities
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - shared or copy-on-write from a vnode/anonymous memory.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/param.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/vmsystm.h>
53 53 #include <sys/tuneable.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/swap.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/vtrace.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/callb.h>
61 61 #include <sys/vm.h>
62 62 #include <sys/dumphdr.h>
63 63 #include <sys/lgrp.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_vn.h>
69 69 #include <vm/pvn.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/vpage.h>
73 73 #include <sys/proc.h>
74 74 #include <sys/task.h>
75 75 #include <sys/project.h>
76 76 #include <sys/zone.h>
77 77 #include <sys/shm_impl.h>
78 78
79 79 /*
80 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 82 * it can. In the rare case when this page list is not large enough, it
83 83 * goes and gets a large enough array from kmem.
84 84 *
85 85 * This small page list array covers either 8 pages or 64kB worth of pages -
86 86 * whichever is smaller.
87 87 */
88 88 #define PVN_MAX_GETPAGE_SZ 0x10000
89 89 #define PVN_MAX_GETPAGE_NUM 0x8
90 90
91 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
94 94 #else
95 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
97 97 #endif
98 98
99 99 /*
100 100 * Private seg op routines.
101 101 */
102 102 static int segvn_dup(struct seg *seg, struct seg *newseg);
103 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
104 104 static void segvn_free(struct seg *seg);
105 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
106 106 caddr_t addr, size_t len, enum fault_type type,
107 107 enum seg_rw rw);
108 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 109 static int segvn_setprot(struct seg *seg, caddr_t addr,
110 110 size_t len, uint_t prot);
111 111 static int segvn_checkprot(struct seg *seg, caddr_t addr,
112 112 size_t len, uint_t prot);
113 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
114 114 static size_t segvn_swapout(struct seg *seg);
115 115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
116 116 int attr, uint_t flags);
117 117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
118 118 char *vec);
119 119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
120 120 int attr, int op, ulong_t *lockmap, size_t pos);
121 121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
122 122 uint_t *protv);
123 123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
124 124 static int segvn_gettype(struct seg *seg, caddr_t addr);
125 125 static int segvn_getvp(struct seg *seg, caddr_t addr,
126 126 struct vnode **vpp);
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
127 127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
128 128 uint_t behav);
129 129 static void segvn_dump(struct seg *seg);
130 130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
131 131 struct page ***ppp, enum lock_type type, enum seg_rw rw);
132 132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
133 133 uint_t szc);
134 134 static int segvn_getmemid(struct seg *seg, caddr_t addr,
135 135 memid_t *memidp);
136 136 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
137 -static int segvn_capable(struct seg *seg, segcapability_t capable);
138 137 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
139 138
140 139 struct seg_ops segvn_ops = {
141 140 .dup = segvn_dup,
142 141 .unmap = segvn_unmap,
143 142 .free = segvn_free,
144 143 .fault = segvn_fault,
145 144 .faulta = segvn_faulta,
146 145 .setprot = segvn_setprot,
147 146 .checkprot = segvn_checkprot,
148 147 .kluster = segvn_kluster,
149 148 .swapout = segvn_swapout,
150 149 .sync = segvn_sync,
151 150 .incore = segvn_incore,
152 151 .lockop = segvn_lockop,
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
153 152 .getprot = segvn_getprot,
154 153 .getoffset = segvn_getoffset,
155 154 .gettype = segvn_gettype,
156 155 .getvp = segvn_getvp,
157 156 .advise = segvn_advise,
158 157 .dump = segvn_dump,
159 158 .pagelock = segvn_pagelock,
160 159 .setpagesize = segvn_setpagesize,
161 160 .getmemid = segvn_getmemid,
162 161 .getpolicy = segvn_getpolicy,
163 - .capable = segvn_capable,
164 162 .inherit = segvn_inherit,
165 163 };
166 164
167 165 /*
168 166 * Common zfod structures, provided as a shorthand for others to use.
169 167 */
170 168 static segvn_crargs_t zfod_segvn_crargs =
171 169 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
172 170 static segvn_crargs_t kzfod_segvn_crargs =
173 171 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
174 172 PROT_ALL & ~PROT_USER);
175 173 static segvn_crargs_t stack_noexec_crargs =
176 174 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
177 175
178 176 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
179 177 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
180 178 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
181 179 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
182 180
183 181 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
184 182
185 183 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
186 184
187 185 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
188 186 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
189 187 uint_t segvn_pglock_comb_bshift;
190 188 size_t segvn_pglock_comb_palign;
191 189
192 190 static int segvn_concat(struct seg *, struct seg *, int);
193 191 static int segvn_extend_prev(struct seg *, struct seg *,
194 192 struct segvn_crargs *, size_t);
195 193 static int segvn_extend_next(struct seg *, struct seg *,
196 194 struct segvn_crargs *, size_t);
197 195 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
198 196 static void segvn_pagelist_rele(page_t **);
199 197 static void segvn_setvnode_mpss(vnode_t *);
200 198 static void segvn_relocate_pages(page_t **, page_t *);
201 199 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
202 200 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
203 201 uint_t, page_t **, page_t **, uint_t *, int *);
204 202 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
205 203 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
206 204 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
207 205 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
208 206 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
209 207 u_offset_t, struct vpage *, page_t **, uint_t,
210 208 enum fault_type, enum seg_rw, int);
211 209 static void segvn_vpage(struct seg *);
212 210 static size_t segvn_count_swap_by_vpages(struct seg *);
213 211
214 212 static void segvn_purge(struct seg *seg);
215 213 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
216 214 enum seg_rw, int);
217 215 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
218 216 enum seg_rw, int);
219 217
220 218 static int sameprot(struct seg *, caddr_t, size_t);
221 219
222 220 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
223 221 static int segvn_clrszc(struct seg *);
224 222 static struct seg *segvn_split_seg(struct seg *, caddr_t);
225 223 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
226 224 ulong_t, uint_t);
227 225
228 226 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
229 227 size_t, void *, u_offset_t);
230 228
231 229 static struct kmem_cache *segvn_cache;
232 230 static struct kmem_cache **segvn_szc_cache;
233 231
234 232 #ifdef VM_STATS
235 233 static struct segvnvmstats_str {
236 234 ulong_t fill_vp_pages[31];
237 235 ulong_t fltvnpages[49];
238 236 ulong_t fullszcpages[10];
239 237 ulong_t relocatepages[3];
240 238 ulong_t fltanpages[17];
241 239 ulong_t pagelock[2];
242 240 ulong_t demoterange[3];
243 241 } segvnvmstats;
244 242 #endif /* VM_STATS */
245 243
246 244 #define SDR_RANGE 1 /* demote entire range */
247 245 #define SDR_END 2 /* demote non aligned ends only */
248 246
249 247 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
250 248 if ((len) != 0) { \
251 249 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
252 250 ASSERT(lpgaddr >= (seg)->s_base); \
253 251 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
254 252 (len)), pgsz); \
255 253 ASSERT(lpgeaddr > lpgaddr); \
256 254 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
257 255 } else { \
258 256 lpgeaddr = lpgaddr = (addr); \
259 257 } \
260 258 }
261 259
262 260 /*ARGSUSED*/
263 261 static int
264 262 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
265 263 {
266 264 struct segvn_data *svd = buf;
267 265
268 266 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
269 267 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
270 268 svd->svn_trnext = svd->svn_trprev = NULL;
271 269 return (0);
272 270 }
273 271
274 272 /*ARGSUSED1*/
275 273 static void
276 274 segvn_cache_destructor(void *buf, void *cdrarg)
277 275 {
278 276 struct segvn_data *svd = buf;
279 277
280 278 rw_destroy(&svd->lock);
281 279 mutex_destroy(&svd->segfree_syncmtx);
282 280 }
283 281
284 282 /*ARGSUSED*/
285 283 static int
286 284 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
287 285 {
288 286 bzero(buf, sizeof (svntr_t));
289 287 return (0);
290 288 }
291 289
292 290 /*
293 291 * Patching this variable to non-zero allows the system to run with
294 292 * stacks marked as "not executable". It's a bit of a kludge, but is
295 293 * provided as a tweakable for platforms that export those ABIs
296 294 * (e.g. sparc V8) that have executable stacks enabled by default.
297 295 * There are also some restrictions for platforms that don't actually
298 296 * implement 'noexec' protections.
299 297 *
300 298 * Once enabled, the system is (therefore) unable to provide a fully
301 299 * ABI-compliant execution environment, though practically speaking,
302 300 * most everything works. The exceptions are generally some interpreters
303 301 * and debuggers that create executable code on the stack and jump
304 302 * into it (without explicitly mprotecting the address range to include
305 303 * PROT_EXEC).
306 304 *
307 305 * One important class of applications that are disabled are those
308 306 * that have been transformed into malicious agents using one of the
309 307 * numerous "buffer overflow" attacks. See 4007890.
310 308 */
311 309 int noexec_user_stack = 0;
312 310 int noexec_user_stack_log = 1;
313 311
314 312 int segvn_lpg_disable = 0;
315 313 uint_t segvn_maxpgszc = 0;
316 314
317 315 ulong_t segvn_vmpss_clrszc_cnt;
318 316 ulong_t segvn_vmpss_clrszc_err;
319 317 ulong_t segvn_fltvnpages_clrszc_cnt;
320 318 ulong_t segvn_fltvnpages_clrszc_err;
321 319 ulong_t segvn_setpgsz_align_err;
322 320 ulong_t segvn_setpgsz_anon_align_err;
323 321 ulong_t segvn_setpgsz_getattr_err;
324 322 ulong_t segvn_setpgsz_eof_err;
325 323 ulong_t segvn_faultvnmpss_align_err1;
326 324 ulong_t segvn_faultvnmpss_align_err2;
327 325 ulong_t segvn_faultvnmpss_align_err3;
328 326 ulong_t segvn_faultvnmpss_align_err4;
329 327 ulong_t segvn_faultvnmpss_align_err5;
330 328 ulong_t segvn_vmpss_pageio_deadlk_err;
331 329
332 330 int segvn_use_regions = 1;
333 331
334 332 /*
335 333 * Segvn supports text replication optimization for NUMA platforms. Text
336 334 * replica's are represented by anon maps (amp). There's one amp per text file
337 335 * region per lgroup. A process chooses the amp for each of its text mappings
338 336 * based on the lgroup assignment of its main thread (t_tid = 1). All
339 337 * processes that want a replica on a particular lgroup for the same text file
340 338 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
341 339 * with vp,off,size,szc used as a key. Text replication segments are read only
342 340 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
343 341 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
344 342 * pages. Replication amp is assigned to a segment when it gets its first
345 343 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
346 344 * rechecks periodically if the process still maps an amp local to the main
347 345 * thread. If not async thread forces process to remap to an amp in the new
348 346 * home lgroup of the main thread. Current text replication implementation
349 347 * only provides the benefit to workloads that do most of their work in the
350 348 * main thread of a process or all the threads of a process run in the same
351 349 * lgroup. To extend text replication benefit to different types of
352 350 * multithreaded workloads further work would be needed in the hat layer to
353 351 * allow the same virtual address in the same hat to simultaneously map
354 352 * different physical addresses (i.e. page table replication would be needed
355 353 * for x86).
356 354 *
357 355 * amp pages are used instead of vnode pages as long as segment has a very
358 356 * simple life cycle. It's created via segvn_create(), handles S_EXEC
359 357 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
360 358 * happens such as protection is changed, real COW fault happens, pagesize is
361 359 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
362 360 * text replication by converting the segment back to vnode only segment
363 361 * (unmap segment's address range and set svd->amp to NULL).
364 362 *
365 363 * The original file can be changed after amp is inserted into
366 364 * svntr_hashtab. Processes that are launched after the file is already
367 365 * changed can't use the replica's created prior to the file change. To
368 366 * implement this functionality hash entries are timestamped. Replica's can
369 367 * only be used if current file modification time is the same as the timestamp
370 368 * saved when hash entry was created. However just timestamps alone are not
371 369 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
372 370 * deal with file changes via MAP_SHARED mappings differently. When writable
373 371 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
374 372 * existing replica's for this vnode as not usable for future text
375 373 * mappings. And we don't create new replica's for files that currently have
376 374 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
377 375 * true).
378 376 */
379 377
380 378 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
381 379 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
382 380
383 381 static ulong_t svntr_hashtab_sz = 512;
384 382 static svntr_bucket_t *svntr_hashtab = NULL;
385 383 static struct kmem_cache *svntr_cache;
386 384 static svntr_stats_t *segvn_textrepl_stats;
387 385 static ksema_t segvn_trasync_sem;
388 386
389 387 int segvn_disable_textrepl = 1;
390 388 size_t textrepl_size_thresh = (size_t)-1;
391 389 size_t segvn_textrepl_bytes = 0;
392 390 size_t segvn_textrepl_max_bytes = 0;
393 391 clock_t segvn_update_textrepl_interval = 0;
394 392 int segvn_update_tr_time = 10;
395 393 int segvn_disable_textrepl_update = 0;
396 394
397 395 static void segvn_textrepl(struct seg *);
398 396 static void segvn_textunrepl(struct seg *, int);
399 397 static void segvn_inval_trcache(vnode_t *);
400 398 static void segvn_trasync_thread(void);
401 399 static void segvn_trupdate_wakeup(void *);
402 400 static void segvn_trupdate(void);
403 401 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
404 402 ulong_t);
405 403
406 404 /*
407 405 * Initialize segvn data structures
408 406 */
409 407 void
410 408 segvn_init(void)
411 409 {
412 410 uint_t maxszc;
413 411 uint_t szc;
414 412 size_t pgsz;
415 413
416 414 segvn_cache = kmem_cache_create("segvn_cache",
417 415 sizeof (struct segvn_data), 0,
418 416 segvn_cache_constructor, segvn_cache_destructor, NULL,
419 417 NULL, NULL, 0);
420 418
421 419 if (segvn_lpg_disable == 0) {
422 420 szc = maxszc = page_num_pagesizes() - 1;
423 421 if (szc == 0) {
424 422 segvn_lpg_disable = 1;
425 423 }
426 424 if (page_get_pagesize(0) != PAGESIZE) {
427 425 panic("segvn_init: bad szc 0");
428 426 /*NOTREACHED*/
429 427 }
430 428 while (szc != 0) {
431 429 pgsz = page_get_pagesize(szc);
432 430 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
433 431 panic("segvn_init: bad szc %d", szc);
434 432 /*NOTREACHED*/
435 433 }
436 434 szc--;
437 435 }
438 436 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
439 437 segvn_maxpgszc = maxszc;
440 438 }
441 439
442 440 if (segvn_maxpgszc) {
443 441 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
444 442 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
445 443 KM_SLEEP);
446 444 }
447 445
448 446 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
449 447 char str[32];
450 448
451 449 (void) sprintf(str, "segvn_szc_cache%d", szc);
452 450 segvn_szc_cache[szc] = kmem_cache_create(str,
453 451 page_get_pagecnt(szc) * sizeof (page_t *), 0,
454 452 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
455 453 }
456 454
457 455
458 456 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
459 457 segvn_use_regions = 0;
460 458
461 459 /*
462 460 * For now shared regions and text replication segvn support
463 461 * are mutually exclusive. This is acceptable because
464 462 * currently significant benefit from text replication was
465 463 * only observed on AMD64 NUMA platforms (due to relatively
466 464 * small L2$ size) and currently we don't support shared
467 465 * regions on x86.
468 466 */
469 467 if (segvn_use_regions && !segvn_disable_textrepl) {
470 468 segvn_disable_textrepl = 1;
471 469 }
472 470
473 471 #if defined(_LP64)
474 472 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
475 473 !segvn_disable_textrepl) {
476 474 ulong_t i;
477 475 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
478 476
479 477 svntr_cache = kmem_cache_create("svntr_cache",
480 478 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
481 479 NULL, NULL, NULL, 0);
482 480 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
483 481 for (i = 0; i < svntr_hashtab_sz; i++) {
484 482 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
485 483 MUTEX_DEFAULT, NULL);
486 484 }
487 485 segvn_textrepl_max_bytes = ptob(physmem) /
488 486 segvn_textrepl_max_bytes_factor;
489 487 segvn_textrepl_stats = kmem_zalloc(NCPU *
490 488 sizeof (svntr_stats_t), KM_SLEEP);
491 489 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
492 490 (void) thread_create(NULL, 0, segvn_trasync_thread,
493 491 NULL, 0, &p0, TS_RUN, minclsyspri);
494 492 }
495 493 #endif
496 494
497 495 if (!ISP2(segvn_pglock_comb_balign) ||
498 496 segvn_pglock_comb_balign < PAGESIZE) {
499 497 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
500 498 }
501 499 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
502 500 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
503 501 }
504 502
505 503 #define SEGVN_PAGEIO ((void *)0x1)
506 504 #define SEGVN_NOPAGEIO ((void *)0x2)
507 505
508 506 static void
509 507 segvn_setvnode_mpss(vnode_t *vp)
510 508 {
511 509 int err;
512 510
513 511 ASSERT(vp->v_mpssdata == NULL ||
514 512 vp->v_mpssdata == SEGVN_PAGEIO ||
515 513 vp->v_mpssdata == SEGVN_NOPAGEIO);
516 514
517 515 if (vp->v_mpssdata == NULL) {
518 516 if (vn_vmpss_usepageio(vp)) {
519 517 err = VOP_PAGEIO(vp, (page_t *)NULL,
520 518 (u_offset_t)0, 0, 0, CRED(), NULL);
521 519 } else {
522 520 err = ENOSYS;
523 521 }
524 522 /*
525 523 * set v_mpssdata just once per vnode life
526 524 * so that it never changes.
527 525 */
528 526 mutex_enter(&vp->v_lock);
529 527 if (vp->v_mpssdata == NULL) {
530 528 if (err == EINVAL) {
531 529 vp->v_mpssdata = SEGVN_PAGEIO;
532 530 } else {
533 531 vp->v_mpssdata = SEGVN_NOPAGEIO;
534 532 }
535 533 }
536 534 mutex_exit(&vp->v_lock);
537 535 }
538 536 }
539 537
540 538 int
541 539 segvn_create(struct seg *seg, void *argsp)
542 540 {
543 541 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
544 542 struct segvn_data *svd;
545 543 size_t swresv = 0;
546 544 struct cred *cred;
547 545 struct anon_map *amp;
548 546 int error = 0;
549 547 size_t pgsz;
550 548 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
551 549 int use_rgn = 0;
552 550 int trok = 0;
553 551
554 552 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
555 553
556 554 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
557 555 panic("segvn_create type");
558 556 /*NOTREACHED*/
559 557 }
560 558
561 559 /*
562 560 * Check arguments. If a shared anon structure is given then
563 561 * it is illegal to also specify a vp.
564 562 */
565 563 if (a->amp != NULL && a->vp != NULL) {
566 564 panic("segvn_create anon_map");
567 565 /*NOTREACHED*/
568 566 }
569 567
570 568 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
571 569 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
572 570 segvn_use_regions) {
573 571 use_rgn = 1;
574 572 }
575 573
576 574 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
577 575 if (a->type == MAP_SHARED)
578 576 a->flags &= ~MAP_NORESERVE;
579 577
580 578 if (a->szc != 0) {
581 579 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
582 580 (a->amp != NULL && a->type == MAP_PRIVATE) ||
583 581 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
584 582 a->szc = 0;
585 583 } else {
586 584 if (a->szc > segvn_maxpgszc)
587 585 a->szc = segvn_maxpgszc;
588 586 pgsz = page_get_pagesize(a->szc);
589 587 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
590 588 !IS_P2ALIGNED(seg->s_size, pgsz)) {
591 589 a->szc = 0;
592 590 } else if (a->vp != NULL) {
593 591 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
594 592 /*
595 593 * paranoid check.
596 594 * hat_page_demote() is not supported
597 595 * on swapfs pages.
598 596 */
599 597 a->szc = 0;
600 598 } else if (map_addr_vacalign_check(seg->s_base,
601 599 a->offset & PAGEMASK)) {
602 600 a->szc = 0;
603 601 }
604 602 } else if (a->amp != NULL) {
605 603 pgcnt_t anum = btopr(a->offset);
606 604 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
607 605 if (!IS_P2ALIGNED(anum, pgcnt)) {
608 606 a->szc = 0;
609 607 }
610 608 }
611 609 }
612 610 }
613 611
614 612 /*
615 613 * If segment may need private pages, reserve them now.
616 614 */
617 615 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
618 616 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
619 617 if (anon_resv_zone(seg->s_size,
620 618 seg->s_as->a_proc->p_zone) == 0)
621 619 return (EAGAIN);
622 620 swresv = seg->s_size;
623 621 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
624 622 seg, swresv, 1);
625 623 }
626 624
627 625 /*
628 626 * Reserve any mapping structures that may be required.
629 627 *
630 628 * Don't do it for segments that may use regions. It's currently a
631 629 * noop in the hat implementations anyway.
632 630 */
633 631 if (!use_rgn) {
634 632 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
635 633 }
636 634
637 635 if (a->cred) {
638 636 cred = a->cred;
639 637 crhold(cred);
640 638 } else {
641 639 crhold(cred = CRED());
642 640 }
643 641
644 642 /* Inform the vnode of the new mapping */
645 643 if (a->vp != NULL) {
646 644 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
647 645 seg->s_as, seg->s_base, seg->s_size, a->prot,
648 646 a->maxprot, a->type, cred, NULL);
649 647 if (error) {
650 648 if (swresv != 0) {
651 649 anon_unresv_zone(swresv,
652 650 seg->s_as->a_proc->p_zone);
653 651 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
654 652 "anon proc:%p %lu %u", seg, swresv, 0);
655 653 }
656 654 crfree(cred);
657 655 if (!use_rgn) {
658 656 hat_unload(seg->s_as->a_hat, seg->s_base,
659 657 seg->s_size, HAT_UNLOAD_UNMAP);
660 658 }
661 659 return (error);
662 660 }
663 661 /*
664 662 * svntr_hashtab will be NULL if we support shared regions.
665 663 */
666 664 trok = ((a->flags & MAP_TEXT) &&
667 665 (seg->s_size > textrepl_size_thresh ||
668 666 (a->flags & _MAP_TEXTREPL)) &&
669 667 lgrp_optimizations() && svntr_hashtab != NULL &&
670 668 a->type == MAP_PRIVATE && swresv == 0 &&
671 669 !(a->flags & MAP_NORESERVE) &&
672 670 seg->s_as != &kas && a->vp->v_type == VREG);
673 671
674 672 ASSERT(!trok || !use_rgn);
675 673 }
676 674
677 675 /*
678 676 * MAP_NORESERVE mappings don't count towards the VSZ of a process
679 677 * until we fault the pages in.
680 678 */
681 679 if ((a->vp == NULL || a->vp->v_type != VREG) &&
682 680 a->flags & MAP_NORESERVE) {
683 681 seg->s_as->a_resvsize -= seg->s_size;
684 682 }
685 683
686 684 /*
687 685 * If more than one segment in the address space, and they're adjacent
688 686 * virtually, try to concatenate them. Don't concatenate if an
689 687 * explicit anon_map structure was supplied (e.g., SystemV shared
690 688 * memory) or if we'll use text replication for this segment.
691 689 */
692 690 if (a->amp == NULL && !use_rgn && !trok) {
693 691 struct seg *pseg, *nseg;
694 692 struct segvn_data *psvd, *nsvd;
695 693 lgrp_mem_policy_t ppolicy, npolicy;
696 694 uint_t lgrp_mem_policy_flags = 0;
697 695 extern lgrp_mem_policy_t lgrp_mem_default_policy;
698 696
699 697 /*
700 698 * Memory policy flags (lgrp_mem_policy_flags) is valid when
701 699 * extending stack/heap segments.
702 700 */
703 701 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
704 702 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
705 703 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
706 704 } else {
707 705 /*
708 706 * Get policy when not extending it from another segment
709 707 */
710 708 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
711 709 }
712 710
713 711 /*
714 712 * First, try to concatenate the previous and new segments
715 713 */
716 714 pseg = AS_SEGPREV(seg->s_as, seg);
717 715 if (pseg != NULL &&
718 716 pseg->s_base + pseg->s_size == seg->s_base &&
719 717 pseg->s_ops == &segvn_ops) {
720 718 /*
721 719 * Get memory allocation policy from previous segment.
722 720 * When extension is specified (e.g. for heap) apply
723 721 * this policy to the new segment regardless of the
724 722 * outcome of segment concatenation. Extension occurs
725 723 * for non-default policy otherwise default policy is
726 724 * used and is based on extended segment size.
727 725 */
728 726 psvd = (struct segvn_data *)pseg->s_data;
729 727 ppolicy = psvd->policy_info.mem_policy;
730 728 if (lgrp_mem_policy_flags ==
731 729 LGRP_MP_FLAG_EXTEND_UP) {
732 730 if (ppolicy != lgrp_mem_default_policy) {
733 731 mpolicy = ppolicy;
734 732 } else {
735 733 mpolicy = lgrp_mem_policy_default(
736 734 pseg->s_size + seg->s_size,
737 735 a->type);
738 736 }
739 737 }
740 738
741 739 if (mpolicy == ppolicy &&
742 740 (pseg->s_size + seg->s_size <=
743 741 segvn_comb_thrshld || psvd->amp == NULL) &&
744 742 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
745 743 /*
746 744 * success! now try to concatenate
747 745 * with following seg
748 746 */
749 747 crfree(cred);
750 748 nseg = AS_SEGNEXT(pseg->s_as, pseg);
751 749 if (nseg != NULL &&
752 750 nseg != pseg &&
753 751 nseg->s_ops == &segvn_ops &&
754 752 pseg->s_base + pseg->s_size ==
755 753 nseg->s_base)
756 754 (void) segvn_concat(pseg, nseg, 0);
757 755 ASSERT(pseg->s_szc == 0 ||
758 756 (a->szc == pseg->s_szc &&
759 757 IS_P2ALIGNED(pseg->s_base, pgsz) &&
760 758 IS_P2ALIGNED(pseg->s_size, pgsz)));
761 759 return (0);
762 760 }
763 761 }
764 762
765 763 /*
766 764 * Failed, so try to concatenate with following seg
767 765 */
768 766 nseg = AS_SEGNEXT(seg->s_as, seg);
769 767 if (nseg != NULL &&
770 768 seg->s_base + seg->s_size == nseg->s_base &&
771 769 nseg->s_ops == &segvn_ops) {
772 770 /*
773 771 * Get memory allocation policy from next segment.
774 772 * When extension is specified (e.g. for stack) apply
775 773 * this policy to the new segment regardless of the
776 774 * outcome of segment concatenation. Extension occurs
777 775 * for non-default policy otherwise default policy is
778 776 * used and is based on extended segment size.
779 777 */
780 778 nsvd = (struct segvn_data *)nseg->s_data;
781 779 npolicy = nsvd->policy_info.mem_policy;
782 780 if (lgrp_mem_policy_flags ==
783 781 LGRP_MP_FLAG_EXTEND_DOWN) {
784 782 if (npolicy != lgrp_mem_default_policy) {
785 783 mpolicy = npolicy;
786 784 } else {
787 785 mpolicy = lgrp_mem_policy_default(
788 786 nseg->s_size + seg->s_size,
789 787 a->type);
790 788 }
791 789 }
792 790
793 791 if (mpolicy == npolicy &&
794 792 segvn_extend_next(seg, nseg, a, swresv) == 0) {
795 793 crfree(cred);
796 794 ASSERT(nseg->s_szc == 0 ||
797 795 (a->szc == nseg->s_szc &&
798 796 IS_P2ALIGNED(nseg->s_base, pgsz) &&
799 797 IS_P2ALIGNED(nseg->s_size, pgsz)));
800 798 return (0);
801 799 }
802 800 }
803 801 }
804 802
805 803 if (a->vp != NULL) {
806 804 VN_HOLD(a->vp);
807 805 if (a->type == MAP_SHARED)
808 806 lgrp_shm_policy_init(NULL, a->vp);
809 807 }
810 808 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
811 809
812 810 seg->s_ops = &segvn_ops;
813 811 seg->s_data = (void *)svd;
814 812 seg->s_szc = a->szc;
815 813
816 814 svd->seg = seg;
817 815 svd->vp = a->vp;
818 816 /*
819 817 * Anonymous mappings have no backing file so the offset is meaningless.
820 818 */
821 819 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
822 820 svd->prot = a->prot;
823 821 svd->maxprot = a->maxprot;
824 822 svd->pageprot = 0;
825 823 svd->type = a->type;
826 824 svd->vpage = NULL;
827 825 svd->cred = cred;
828 826 svd->advice = MADV_NORMAL;
829 827 svd->pageadvice = 0;
830 828 svd->flags = (ushort_t)a->flags;
831 829 svd->softlockcnt = 0;
832 830 svd->softlockcnt_sbase = 0;
833 831 svd->softlockcnt_send = 0;
834 832 svd->svn_inz = 0;
835 833 svd->rcookie = HAT_INVALID_REGION_COOKIE;
836 834 svd->pageswap = 0;
837 835
838 836 if (a->szc != 0 && a->vp != NULL) {
839 837 segvn_setvnode_mpss(a->vp);
840 838 }
841 839 if (svd->type == MAP_SHARED && svd->vp != NULL &&
842 840 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
843 841 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
844 842 segvn_inval_trcache(svd->vp);
845 843 }
846 844
847 845 amp = a->amp;
848 846 if ((svd->amp = amp) == NULL) {
849 847 svd->anon_index = 0;
850 848 if (svd->type == MAP_SHARED) {
851 849 svd->swresv = 0;
852 850 /*
853 851 * Shared mappings to a vp need no other setup.
854 852 * If we have a shared mapping to an anon_map object
855 853 * which hasn't been allocated yet, allocate the
856 854 * struct now so that it will be properly shared
857 855 * by remembering the swap reservation there.
858 856 */
859 857 if (a->vp == NULL) {
860 858 svd->amp = anonmap_alloc(seg->s_size, swresv,
861 859 ANON_SLEEP);
862 860 svd->amp->a_szc = seg->s_szc;
863 861 }
864 862 } else {
865 863 /*
866 864 * Private mapping (with or without a vp).
867 865 * Allocate anon_map when needed.
868 866 */
869 867 svd->swresv = swresv;
870 868 }
871 869 } else {
872 870 pgcnt_t anon_num;
873 871
874 872 /*
875 873 * Mapping to an existing anon_map structure without a vp.
876 874 * For now we will insure that the segment size isn't larger
877 875 * than the size - offset gives us. Later on we may wish to
878 876 * have the anon array dynamically allocated itself so that
879 877 * we don't always have to allocate all the anon pointer slots.
880 878 * This of course involves adding extra code to check that we
881 879 * aren't trying to use an anon pointer slot beyond the end
882 880 * of the currently allocated anon array.
883 881 */
884 882 if ((amp->size - a->offset) < seg->s_size) {
885 883 panic("segvn_create anon_map size");
886 884 /*NOTREACHED*/
887 885 }
888 886
889 887 anon_num = btopr(a->offset);
890 888
891 889 if (a->type == MAP_SHARED) {
892 890 /*
893 891 * SHARED mapping to a given anon_map.
894 892 */
895 893 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
896 894 amp->refcnt++;
897 895 if (a->szc > amp->a_szc) {
898 896 amp->a_szc = a->szc;
899 897 }
900 898 ANON_LOCK_EXIT(&->a_rwlock);
901 899 svd->anon_index = anon_num;
902 900 svd->swresv = 0;
903 901 } else {
904 902 /*
905 903 * PRIVATE mapping to a given anon_map.
906 904 * Make sure that all the needed anon
907 905 * structures are created (so that we will
908 906 * share the underlying pages if nothing
909 907 * is written by this mapping) and then
910 908 * duplicate the anon array as is done
911 909 * when a privately mapped segment is dup'ed.
912 910 */
913 911 struct anon *ap;
914 912 caddr_t addr;
915 913 caddr_t eaddr;
916 914 ulong_t anon_idx;
917 915 int hat_flag = HAT_LOAD;
918 916
919 917 if (svd->flags & MAP_TEXT) {
920 918 hat_flag |= HAT_LOAD_TEXT;
921 919 }
922 920
923 921 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
924 922 svd->amp->a_szc = seg->s_szc;
925 923 svd->anon_index = 0;
926 924 svd->swresv = swresv;
927 925
928 926 /*
929 927 * Prevent 2 threads from allocating anon
930 928 * slots simultaneously.
931 929 */
932 930 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
933 931 eaddr = seg->s_base + seg->s_size;
934 932
935 933 for (anon_idx = anon_num, addr = seg->s_base;
936 934 addr < eaddr; addr += PAGESIZE, anon_idx++) {
937 935 page_t *pp;
938 936
939 937 if ((ap = anon_get_ptr(amp->ahp,
940 938 anon_idx)) != NULL)
941 939 continue;
942 940
943 941 /*
944 942 * Allocate the anon struct now.
945 943 * Might as well load up translation
946 944 * to the page while we're at it...
947 945 */
948 946 pp = anon_zero(seg, addr, &ap, cred);
949 947 if (ap == NULL || pp == NULL) {
950 948 panic("segvn_create anon_zero");
951 949 /*NOTREACHED*/
952 950 }
953 951
954 952 /*
955 953 * Re-acquire the anon_map lock and
956 954 * initialize the anon array entry.
957 955 */
958 956 ASSERT(anon_get_ptr(amp->ahp,
959 957 anon_idx) == NULL);
960 958 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
961 959 ANON_SLEEP);
962 960
963 961 ASSERT(seg->s_szc == 0);
964 962 ASSERT(!IS_VMODSORT(pp->p_vnode));
965 963
966 964 ASSERT(use_rgn == 0);
967 965 hat_memload(seg->s_as->a_hat, addr, pp,
968 966 svd->prot & ~PROT_WRITE, hat_flag);
969 967
970 968 page_unlock(pp);
971 969 }
972 970 ASSERT(seg->s_szc == 0);
973 971 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
974 972 0, seg->s_size);
975 973 ANON_LOCK_EXIT(&->a_rwlock);
976 974 }
977 975 }
978 976
979 977 /*
980 978 * Set default memory allocation policy for segment
981 979 *
982 980 * Always set policy for private memory at least for initialization
983 981 * even if this is a shared memory segment
984 982 */
985 983 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
986 984
987 985 if (svd->type == MAP_SHARED)
988 986 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
989 987 svd->vp, svd->offset, seg->s_size);
990 988
991 989 if (use_rgn) {
992 990 ASSERT(!trok);
993 991 ASSERT(svd->amp == NULL);
994 992 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
995 993 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
996 994 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
997 995 HAT_REGION_TEXT);
998 996 }
999 997
1000 998 ASSERT(!trok || !(svd->prot & PROT_WRITE));
1001 999 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1002 1000
1003 1001 return (0);
1004 1002 }
1005 1003
1006 1004 /*
1007 1005 * Concatenate two existing segments, if possible.
1008 1006 * Return 0 on success, -1 if two segments are not compatible
1009 1007 * or -2 on memory allocation failure.
1010 1008 * If amp_cat == 1 then try and concat segments with anon maps
1011 1009 */
1012 1010 static int
1013 1011 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1014 1012 {
1015 1013 struct segvn_data *svd1 = seg1->s_data;
1016 1014 struct segvn_data *svd2 = seg2->s_data;
1017 1015 struct anon_map *amp1 = svd1->amp;
1018 1016 struct anon_map *amp2 = svd2->amp;
1019 1017 struct vpage *vpage1 = svd1->vpage;
1020 1018 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1021 1019 size_t size, nvpsize;
1022 1020 pgcnt_t npages1, npages2;
1023 1021
1024 1022 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1025 1023 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1026 1024 ASSERT(seg1->s_ops == seg2->s_ops);
1027 1025
1028 1026 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1029 1027 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1030 1028 return (-1);
1031 1029 }
1032 1030
1033 1031 /* both segments exist, try to merge them */
1034 1032 #define incompat(x) (svd1->x != svd2->x)
1035 1033 if (incompat(vp) || incompat(maxprot) ||
1036 1034 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1037 1035 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1038 1036 incompat(type) || incompat(cred) || incompat(flags) ||
1039 1037 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1040 1038 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1041 1039 return (-1);
1042 1040 #undef incompat
1043 1041
1044 1042 /*
1045 1043 * vp == NULL implies zfod, offset doesn't matter
1046 1044 */
1047 1045 if (svd1->vp != NULL &&
1048 1046 svd1->offset + seg1->s_size != svd2->offset) {
1049 1047 return (-1);
1050 1048 }
1051 1049
1052 1050 /*
1053 1051 * Don't concatenate if either segment uses text replication.
1054 1052 */
1055 1053 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1056 1054 return (-1);
1057 1055 }
1058 1056
1059 1057 /*
1060 1058 * Fail early if we're not supposed to concatenate
1061 1059 * segments with non NULL amp.
1062 1060 */
1063 1061 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1064 1062 return (-1);
1065 1063 }
1066 1064
1067 1065 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1068 1066 if (amp1 != amp2) {
1069 1067 return (-1);
1070 1068 }
1071 1069 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1072 1070 svd2->anon_index) {
1073 1071 return (-1);
1074 1072 }
1075 1073 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1076 1074 }
1077 1075
1078 1076 /*
1079 1077 * If either seg has vpages, create a new merged vpage array.
1080 1078 */
1081 1079 if (vpage1 != NULL || vpage2 != NULL) {
1082 1080 struct vpage *vp, *evp;
1083 1081
1084 1082 npages1 = seg_pages(seg1);
1085 1083 npages2 = seg_pages(seg2);
1086 1084 nvpsize = vpgtob(npages1 + npages2);
1087 1085
1088 1086 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1089 1087 return (-2);
1090 1088 }
1091 1089
1092 1090 if (vpage1 != NULL) {
1093 1091 bcopy(vpage1, nvpage, vpgtob(npages1));
1094 1092 } else {
1095 1093 evp = nvpage + npages1;
1096 1094 for (vp = nvpage; vp < evp; vp++) {
1097 1095 VPP_SETPROT(vp, svd1->prot);
1098 1096 VPP_SETADVICE(vp, svd1->advice);
1099 1097 }
1100 1098 }
1101 1099
1102 1100 if (vpage2 != NULL) {
1103 1101 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1104 1102 } else {
1105 1103 evp = nvpage + npages1 + npages2;
1106 1104 for (vp = nvpage + npages1; vp < evp; vp++) {
1107 1105 VPP_SETPROT(vp, svd2->prot);
1108 1106 VPP_SETADVICE(vp, svd2->advice);
1109 1107 }
1110 1108 }
1111 1109
1112 1110 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1113 1111 ASSERT(svd1->swresv == seg1->s_size);
1114 1112 ASSERT(!(svd1->flags & MAP_NORESERVE));
1115 1113 ASSERT(!(svd2->flags & MAP_NORESERVE));
1116 1114 evp = nvpage + npages1;
1117 1115 for (vp = nvpage; vp < evp; vp++) {
1118 1116 VPP_SETSWAPRES(vp);
1119 1117 }
1120 1118 }
1121 1119
1122 1120 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1123 1121 ASSERT(svd2->swresv == seg2->s_size);
1124 1122 ASSERT(!(svd1->flags & MAP_NORESERVE));
1125 1123 ASSERT(!(svd2->flags & MAP_NORESERVE));
1126 1124 vp = nvpage + npages1;
1127 1125 evp = vp + npages2;
1128 1126 for (; vp < evp; vp++) {
1129 1127 VPP_SETSWAPRES(vp);
1130 1128 }
1131 1129 }
1132 1130 }
1133 1131 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1134 1132 (svd1->pageswap == 0 && svd2->pageswap == 0));
1135 1133
1136 1134 /*
1137 1135 * If either segment has private pages, create a new merged anon
1138 1136 * array. If mergeing shared anon segments just decrement anon map's
1139 1137 * refcnt.
1140 1138 */
1141 1139 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1142 1140 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1143 1141 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1144 1142 ASSERT(amp1->refcnt >= 2);
1145 1143 amp1->refcnt--;
1146 1144 ANON_LOCK_EXIT(&1->a_rwlock);
1147 1145 svd2->amp = NULL;
1148 1146 } else if (amp1 != NULL || amp2 != NULL) {
1149 1147 struct anon_hdr *nahp;
1150 1148 struct anon_map *namp = NULL;
1151 1149 size_t asize;
1152 1150
1153 1151 ASSERT(svd1->type == MAP_PRIVATE);
1154 1152
1155 1153 asize = seg1->s_size + seg2->s_size;
1156 1154 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1157 1155 if (nvpage != NULL) {
1158 1156 kmem_free(nvpage, nvpsize);
1159 1157 }
1160 1158 return (-2);
1161 1159 }
1162 1160 if (amp1 != NULL) {
1163 1161 /*
1164 1162 * XXX anon rwlock is not really needed because
1165 1163 * this is a private segment and we are writers.
1166 1164 */
1167 1165 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1168 1166 ASSERT(amp1->refcnt == 1);
1169 1167 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1170 1168 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1171 1169 anon_release(nahp, btop(asize));
1172 1170 ANON_LOCK_EXIT(&1->a_rwlock);
1173 1171 if (nvpage != NULL) {
1174 1172 kmem_free(nvpage, nvpsize);
1175 1173 }
1176 1174 return (-2);
1177 1175 }
1178 1176 }
1179 1177 if (amp2 != NULL) {
1180 1178 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1181 1179 ASSERT(amp2->refcnt == 1);
1182 1180 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1183 1181 nahp, btop(seg1->s_size), btop(seg2->s_size),
1184 1182 ANON_NOSLEEP)) {
1185 1183 anon_release(nahp, btop(asize));
1186 1184 ANON_LOCK_EXIT(&2->a_rwlock);
1187 1185 if (amp1 != NULL) {
1188 1186 ANON_LOCK_EXIT(&1->a_rwlock);
1189 1187 }
1190 1188 if (nvpage != NULL) {
1191 1189 kmem_free(nvpage, nvpsize);
1192 1190 }
1193 1191 return (-2);
1194 1192 }
1195 1193 }
1196 1194 if (amp1 != NULL) {
1197 1195 namp = amp1;
1198 1196 anon_release(amp1->ahp, btop(amp1->size));
1199 1197 }
1200 1198 if (amp2 != NULL) {
1201 1199 if (namp == NULL) {
1202 1200 ASSERT(amp1 == NULL);
1203 1201 namp = amp2;
1204 1202 anon_release(amp2->ahp, btop(amp2->size));
1205 1203 } else {
1206 1204 amp2->refcnt--;
1207 1205 ANON_LOCK_EXIT(&2->a_rwlock);
1208 1206 anonmap_free(amp2);
1209 1207 }
1210 1208 svd2->amp = NULL; /* needed for seg_free */
1211 1209 }
1212 1210 namp->ahp = nahp;
1213 1211 namp->size = asize;
1214 1212 svd1->amp = namp;
1215 1213 svd1->anon_index = 0;
1216 1214 ANON_LOCK_EXIT(&namp->a_rwlock);
1217 1215 }
1218 1216 /*
1219 1217 * Now free the old vpage structures.
1220 1218 */
1221 1219 if (nvpage != NULL) {
1222 1220 if (vpage1 != NULL) {
1223 1221 kmem_free(vpage1, vpgtob(npages1));
1224 1222 }
1225 1223 if (vpage2 != NULL) {
1226 1224 svd2->vpage = NULL;
1227 1225 kmem_free(vpage2, vpgtob(npages2));
1228 1226 }
1229 1227 if (svd2->pageprot) {
1230 1228 svd1->pageprot = 1;
1231 1229 }
1232 1230 if (svd2->pageadvice) {
1233 1231 svd1->pageadvice = 1;
1234 1232 }
1235 1233 if (svd2->pageswap) {
1236 1234 svd1->pageswap = 1;
1237 1235 }
1238 1236 svd1->vpage = nvpage;
1239 1237 }
1240 1238
1241 1239 /* all looks ok, merge segments */
1242 1240 svd1->swresv += svd2->swresv;
1243 1241 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1244 1242 size = seg2->s_size;
1245 1243 seg_free(seg2);
1246 1244 seg1->s_size += size;
1247 1245 return (0);
1248 1246 }
1249 1247
1250 1248 /*
1251 1249 * Extend the previous segment (seg1) to include the
1252 1250 * new segment (seg2 + a), if possible.
1253 1251 * Return 0 on success.
1254 1252 */
1255 1253 static int
1256 1254 segvn_extend_prev(seg1, seg2, a, swresv)
1257 1255 struct seg *seg1, *seg2;
1258 1256 struct segvn_crargs *a;
1259 1257 size_t swresv;
1260 1258 {
1261 1259 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1262 1260 size_t size;
1263 1261 struct anon_map *amp1;
1264 1262 struct vpage *new_vpage;
1265 1263
1266 1264 /*
1267 1265 * We don't need any segment level locks for "segvn" data
1268 1266 * since the address space is "write" locked.
1269 1267 */
1270 1268 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1271 1269
1272 1270 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1273 1271 return (-1);
1274 1272 }
1275 1273
1276 1274 /* second segment is new, try to extend first */
1277 1275 /* XXX - should also check cred */
1278 1276 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1279 1277 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1280 1278 svd1->type != a->type || svd1->flags != a->flags ||
1281 1279 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1282 1280 return (-1);
1283 1281
1284 1282 /* vp == NULL implies zfod, offset doesn't matter */
1285 1283 if (svd1->vp != NULL &&
1286 1284 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1287 1285 return (-1);
1288 1286
1289 1287 if (svd1->tr_state != SEGVN_TR_OFF) {
1290 1288 return (-1);
1291 1289 }
1292 1290
1293 1291 amp1 = svd1->amp;
1294 1292 if (amp1) {
1295 1293 pgcnt_t newpgs;
1296 1294
1297 1295 /*
1298 1296 * Segment has private pages, can data structures
1299 1297 * be expanded?
1300 1298 *
1301 1299 * Acquire the anon_map lock to prevent it from changing,
1302 1300 * if it is shared. This ensures that the anon_map
1303 1301 * will not change while a thread which has a read/write
1304 1302 * lock on an address space references it.
1305 1303 * XXX - Don't need the anon_map lock at all if "refcnt"
1306 1304 * is 1.
1307 1305 *
1308 1306 * Can't grow a MAP_SHARED segment with an anonmap because
1309 1307 * there may be existing anon slots where we want to extend
1310 1308 * the segment and we wouldn't know what to do with them
1311 1309 * (e.g., for tmpfs right thing is to just leave them there,
1312 1310 * for /dev/zero they should be cleared out).
1313 1311 */
1314 1312 if (svd1->type == MAP_SHARED)
1315 1313 return (-1);
1316 1314
1317 1315 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1318 1316 if (amp1->refcnt > 1) {
1319 1317 ANON_LOCK_EXIT(&1->a_rwlock);
1320 1318 return (-1);
1321 1319 }
1322 1320 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1323 1321 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1324 1322
1325 1323 if (newpgs == 0) {
1326 1324 ANON_LOCK_EXIT(&1->a_rwlock);
1327 1325 return (-1);
1328 1326 }
1329 1327 amp1->size = ptob(newpgs);
1330 1328 ANON_LOCK_EXIT(&1->a_rwlock);
1331 1329 }
1332 1330 if (svd1->vpage != NULL) {
1333 1331 struct vpage *vp, *evp;
1334 1332 new_vpage =
1335 1333 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1336 1334 KM_NOSLEEP);
1337 1335 if (new_vpage == NULL)
1338 1336 return (-1);
1339 1337 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1340 1338 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1341 1339 svd1->vpage = new_vpage;
1342 1340
1343 1341 vp = new_vpage + seg_pages(seg1);
1344 1342 evp = vp + seg_pages(seg2);
1345 1343 for (; vp < evp; vp++)
1346 1344 VPP_SETPROT(vp, a->prot);
1347 1345 if (svd1->pageswap && swresv) {
1348 1346 ASSERT(!(svd1->flags & MAP_NORESERVE));
1349 1347 ASSERT(swresv == seg2->s_size);
1350 1348 vp = new_vpage + seg_pages(seg1);
1351 1349 for (; vp < evp; vp++) {
1352 1350 VPP_SETSWAPRES(vp);
1353 1351 }
1354 1352 }
1355 1353 }
1356 1354 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1357 1355 size = seg2->s_size;
1358 1356 seg_free(seg2);
1359 1357 seg1->s_size += size;
1360 1358 svd1->swresv += swresv;
1361 1359 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1362 1360 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1363 1361 (svd1->vp->v_flag & VVMEXEC)) {
1364 1362 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1365 1363 segvn_inval_trcache(svd1->vp);
1366 1364 }
1367 1365 return (0);
1368 1366 }
1369 1367
1370 1368 /*
1371 1369 * Extend the next segment (seg2) to include the
1372 1370 * new segment (seg1 + a), if possible.
1373 1371 * Return 0 on success.
1374 1372 */
1375 1373 static int
1376 1374 segvn_extend_next(
1377 1375 struct seg *seg1,
1378 1376 struct seg *seg2,
1379 1377 struct segvn_crargs *a,
1380 1378 size_t swresv)
1381 1379 {
1382 1380 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1383 1381 size_t size;
1384 1382 struct anon_map *amp2;
1385 1383 struct vpage *new_vpage;
1386 1384
1387 1385 /*
1388 1386 * We don't need any segment level locks for "segvn" data
1389 1387 * since the address space is "write" locked.
1390 1388 */
1391 1389 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1392 1390
1393 1391 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1394 1392 return (-1);
1395 1393 }
1396 1394
1397 1395 /* first segment is new, try to extend second */
1398 1396 /* XXX - should also check cred */
1399 1397 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1400 1398 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1401 1399 svd2->type != a->type || svd2->flags != a->flags ||
1402 1400 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1403 1401 return (-1);
1404 1402 /* vp == NULL implies zfod, offset doesn't matter */
1405 1403 if (svd2->vp != NULL &&
1406 1404 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1407 1405 return (-1);
1408 1406
1409 1407 if (svd2->tr_state != SEGVN_TR_OFF) {
1410 1408 return (-1);
1411 1409 }
1412 1410
1413 1411 amp2 = svd2->amp;
1414 1412 if (amp2) {
1415 1413 pgcnt_t newpgs;
1416 1414
1417 1415 /*
1418 1416 * Segment has private pages, can data structures
1419 1417 * be expanded?
1420 1418 *
1421 1419 * Acquire the anon_map lock to prevent it from changing,
1422 1420 * if it is shared. This ensures that the anon_map
1423 1421 * will not change while a thread which has a read/write
1424 1422 * lock on an address space references it.
1425 1423 *
1426 1424 * XXX - Don't need the anon_map lock at all if "refcnt"
1427 1425 * is 1.
1428 1426 */
1429 1427 if (svd2->type == MAP_SHARED)
1430 1428 return (-1);
1431 1429
1432 1430 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1433 1431 if (amp2->refcnt > 1) {
1434 1432 ANON_LOCK_EXIT(&2->a_rwlock);
1435 1433 return (-1);
1436 1434 }
1437 1435 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1438 1436 btop(seg2->s_size), btop(seg1->s_size),
1439 1437 ANON_NOSLEEP | ANON_GROWDOWN);
1440 1438
1441 1439 if (newpgs == 0) {
1442 1440 ANON_LOCK_EXIT(&2->a_rwlock);
1443 1441 return (-1);
1444 1442 }
1445 1443 amp2->size = ptob(newpgs);
1446 1444 ANON_LOCK_EXIT(&2->a_rwlock);
1447 1445 }
1448 1446 if (svd2->vpage != NULL) {
1449 1447 struct vpage *vp, *evp;
1450 1448 new_vpage =
1451 1449 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1452 1450 KM_NOSLEEP);
1453 1451 if (new_vpage == NULL) {
1454 1452 /* Not merging segments so adjust anon_index back */
1455 1453 if (amp2)
1456 1454 svd2->anon_index += seg_pages(seg1);
1457 1455 return (-1);
1458 1456 }
1459 1457 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1460 1458 vpgtob(seg_pages(seg2)));
1461 1459 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1462 1460 svd2->vpage = new_vpage;
1463 1461
1464 1462 vp = new_vpage;
1465 1463 evp = vp + seg_pages(seg1);
1466 1464 for (; vp < evp; vp++)
1467 1465 VPP_SETPROT(vp, a->prot);
1468 1466 if (svd2->pageswap && swresv) {
1469 1467 ASSERT(!(svd2->flags & MAP_NORESERVE));
1470 1468 ASSERT(swresv == seg1->s_size);
1471 1469 vp = new_vpage;
1472 1470 for (; vp < evp; vp++) {
1473 1471 VPP_SETSWAPRES(vp);
1474 1472 }
1475 1473 }
1476 1474 }
1477 1475 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1478 1476 size = seg1->s_size;
1479 1477 seg_free(seg1);
1480 1478 seg2->s_size += size;
1481 1479 seg2->s_base -= size;
1482 1480 svd2->offset -= size;
1483 1481 svd2->swresv += swresv;
1484 1482 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1485 1483 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1486 1484 (svd2->vp->v_flag & VVMEXEC)) {
1487 1485 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1488 1486 segvn_inval_trcache(svd2->vp);
1489 1487 }
1490 1488 return (0);
1491 1489 }
1492 1490
1493 1491 /*
1494 1492 * Duplicate all the pages in the segment. This may break COW sharing for a
1495 1493 * given page. If the page is marked with inherit zero set, then instead of
1496 1494 * duplicating the page, we zero the page.
1497 1495 */
1498 1496 static int
1499 1497 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1500 1498 {
1501 1499 int error;
1502 1500 uint_t prot;
1503 1501 page_t *pp;
1504 1502 struct anon *ap, *newap;
1505 1503 size_t i;
1506 1504 caddr_t addr;
1507 1505
1508 1506 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1509 1507 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1510 1508 ulong_t old_idx = svd->anon_index;
1511 1509 ulong_t new_idx = 0;
1512 1510
1513 1511 i = btopr(seg->s_size);
1514 1512 addr = seg->s_base;
1515 1513
1516 1514 /*
1517 1515 * XXX break cow sharing using PAGESIZE
1518 1516 * pages. They will be relocated into larger
1519 1517 * pages at fault time.
1520 1518 */
1521 1519 while (i-- > 0) {
1522 1520 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1523 1521 struct vpage *vpp;
1524 1522
1525 1523 vpp = &svd->vpage[seg_page(seg, addr)];
1526 1524
1527 1525 /*
1528 1526 * prot need not be computed below 'cause anon_private
1529 1527 * is going to ignore it anyway as child doesn't inherit
1530 1528 * pagelock from parent.
1531 1529 */
1532 1530 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1533 1531
1534 1532 /*
1535 1533 * Check whether we should zero this or dup it.
1536 1534 */
1537 1535 if (svd->svn_inz == SEGVN_INZ_ALL ||
1538 1536 (svd->svn_inz == SEGVN_INZ_VPP &&
1539 1537 VPP_ISINHZERO(vpp))) {
1540 1538 pp = anon_zero(newseg, addr, &newap,
1541 1539 newsvd->cred);
1542 1540 } else {
1543 1541 page_t *anon_pl[1+1];
1544 1542 uint_t vpprot;
1545 1543 error = anon_getpage(&ap, &vpprot, anon_pl,
1546 1544 PAGESIZE, seg, addr, S_READ, svd->cred);
1547 1545 if (error != 0)
1548 1546 return (error);
1549 1547
1550 1548 pp = anon_private(&newap, newseg, addr, prot,
1551 1549 anon_pl[0], 0, newsvd->cred);
1552 1550 }
1553 1551 if (pp == NULL) {
1554 1552 return (ENOMEM);
1555 1553 }
1556 1554 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1557 1555 ANON_SLEEP);
1558 1556 page_unlock(pp);
1559 1557 }
1560 1558 addr += PAGESIZE;
1561 1559 old_idx++;
1562 1560 new_idx++;
1563 1561 }
1564 1562
1565 1563 return (0);
1566 1564 }
1567 1565
1568 1566 static int
1569 1567 segvn_dup(struct seg *seg, struct seg *newseg)
1570 1568 {
1571 1569 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1572 1570 struct segvn_data *newsvd;
1573 1571 pgcnt_t npages = seg_pages(seg);
1574 1572 int error = 0;
1575 1573 size_t len;
1576 1574 struct anon_map *amp;
1577 1575
1578 1576 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1579 1577 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1580 1578
1581 1579 /*
1582 1580 * If segment has anon reserved, reserve more for the new seg.
1583 1581 * For a MAP_NORESERVE segment swresv will be a count of all the
1584 1582 * allocated anon slots; thus we reserve for the child as many slots
1585 1583 * as the parent has allocated. This semantic prevents the child or
1586 1584 * parent from dieing during a copy-on-write fault caused by trying
1587 1585 * to write a shared pre-existing anon page.
1588 1586 */
1589 1587 if ((len = svd->swresv) != 0) {
1590 1588 if (anon_resv(svd->swresv) == 0)
1591 1589 return (ENOMEM);
1592 1590
1593 1591 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1594 1592 seg, len, 0);
1595 1593 }
1596 1594
1597 1595 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1598 1596
1599 1597 newseg->s_ops = &segvn_ops;
1600 1598 newseg->s_data = (void *)newsvd;
1601 1599 newseg->s_szc = seg->s_szc;
1602 1600
1603 1601 newsvd->seg = newseg;
1604 1602 if ((newsvd->vp = svd->vp) != NULL) {
1605 1603 VN_HOLD(svd->vp);
1606 1604 if (svd->type == MAP_SHARED)
1607 1605 lgrp_shm_policy_init(NULL, svd->vp);
1608 1606 }
1609 1607 newsvd->offset = svd->offset;
1610 1608 newsvd->prot = svd->prot;
1611 1609 newsvd->maxprot = svd->maxprot;
1612 1610 newsvd->pageprot = svd->pageprot;
1613 1611 newsvd->type = svd->type;
1614 1612 newsvd->cred = svd->cred;
1615 1613 crhold(newsvd->cred);
1616 1614 newsvd->advice = svd->advice;
1617 1615 newsvd->pageadvice = svd->pageadvice;
1618 1616 newsvd->svn_inz = svd->svn_inz;
1619 1617 newsvd->swresv = svd->swresv;
1620 1618 newsvd->pageswap = svd->pageswap;
1621 1619 newsvd->flags = svd->flags;
1622 1620 newsvd->softlockcnt = 0;
1623 1621 newsvd->softlockcnt_sbase = 0;
1624 1622 newsvd->softlockcnt_send = 0;
1625 1623 newsvd->policy_info = svd->policy_info;
1626 1624 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1627 1625
1628 1626 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1629 1627 /*
1630 1628 * Not attaching to a shared anon object.
1631 1629 */
1632 1630 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1633 1631 svd->tr_state == SEGVN_TR_OFF);
1634 1632 if (svd->tr_state == SEGVN_TR_ON) {
1635 1633 ASSERT(newsvd->vp != NULL && amp != NULL);
1636 1634 newsvd->tr_state = SEGVN_TR_INIT;
1637 1635 } else {
1638 1636 newsvd->tr_state = svd->tr_state;
1639 1637 }
1640 1638 newsvd->amp = NULL;
1641 1639 newsvd->anon_index = 0;
1642 1640 } else {
1643 1641 /* regions for now are only used on pure vnode segments */
1644 1642 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1645 1643 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1646 1644 newsvd->tr_state = SEGVN_TR_OFF;
1647 1645 if (svd->type == MAP_SHARED) {
1648 1646 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1649 1647 newsvd->amp = amp;
1650 1648 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1651 1649 amp->refcnt++;
1652 1650 ANON_LOCK_EXIT(&->a_rwlock);
1653 1651 newsvd->anon_index = svd->anon_index;
1654 1652 } else {
1655 1653 int reclaim = 1;
1656 1654
1657 1655 /*
1658 1656 * Allocate and initialize new anon_map structure.
1659 1657 */
1660 1658 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1661 1659 ANON_SLEEP);
1662 1660 newsvd->amp->a_szc = newseg->s_szc;
1663 1661 newsvd->anon_index = 0;
1664 1662 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1665 1663 svd->svn_inz == SEGVN_INZ_ALL ||
1666 1664 svd->svn_inz == SEGVN_INZ_VPP);
1667 1665
1668 1666 /*
1669 1667 * We don't have to acquire the anon_map lock
1670 1668 * for the new segment (since it belongs to an
1671 1669 * address space that is still not associated
1672 1670 * with any process), or the segment in the old
1673 1671 * address space (since all threads in it
1674 1672 * are stopped while duplicating the address space).
1675 1673 */
1676 1674
1677 1675 /*
1678 1676 * The goal of the following code is to make sure that
1679 1677 * softlocked pages do not end up as copy on write
1680 1678 * pages. This would cause problems where one
1681 1679 * thread writes to a page that is COW and a different
1682 1680 * thread in the same process has softlocked it. The
1683 1681 * softlock lock would move away from this process
1684 1682 * because the write would cause this process to get
1685 1683 * a copy (without the softlock).
1686 1684 *
1687 1685 * The strategy here is to just break the
1688 1686 * sharing on pages that could possibly be
1689 1687 * softlocked.
1690 1688 *
1691 1689 * In addition, if any pages have been marked that they
1692 1690 * should be inherited as zero, then we immediately go
1693 1691 * ahead and break COW and zero them. In the case of a
1694 1692 * softlocked page that should be inherited zero, we
1695 1693 * break COW and just get a zero page.
1696 1694 */
1697 1695 retry:
1698 1696 if (svd->softlockcnt ||
1699 1697 svd->svn_inz != SEGVN_INZ_NONE) {
1700 1698 /*
1701 1699 * The softlock count might be non zero
1702 1700 * because some pages are still stuck in the
1703 1701 * cache for lazy reclaim. Flush the cache
1704 1702 * now. This should drop the count to zero.
1705 1703 * [or there is really I/O going on to these
1706 1704 * pages]. Note, we have the writers lock so
1707 1705 * nothing gets inserted during the flush.
1708 1706 */
1709 1707 if (svd->softlockcnt && reclaim == 1) {
1710 1708 segvn_purge(seg);
1711 1709 reclaim = 0;
1712 1710 goto retry;
1713 1711 }
1714 1712
1715 1713 error = segvn_dup_pages(seg, newseg);
1716 1714 if (error != 0) {
1717 1715 newsvd->vpage = NULL;
1718 1716 goto out;
1719 1717 }
1720 1718 } else { /* common case */
1721 1719 if (seg->s_szc != 0) {
1722 1720 /*
1723 1721 * If at least one of anon slots of a
1724 1722 * large page exists then make sure
1725 1723 * all anon slots of a large page
1726 1724 * exist to avoid partial cow sharing
1727 1725 * of a large page in the future.
1728 1726 */
1729 1727 anon_dup_fill_holes(amp->ahp,
1730 1728 svd->anon_index, newsvd->amp->ahp,
1731 1729 0, seg->s_size, seg->s_szc,
1732 1730 svd->vp != NULL);
1733 1731 } else {
1734 1732 anon_dup(amp->ahp, svd->anon_index,
1735 1733 newsvd->amp->ahp, 0, seg->s_size);
1736 1734 }
1737 1735
1738 1736 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1739 1737 seg->s_size, PROT_WRITE);
1740 1738 }
1741 1739 }
1742 1740 }
1743 1741 /*
1744 1742 * If necessary, create a vpage structure for the new segment.
1745 1743 * Do not copy any page lock indications.
1746 1744 */
1747 1745 if (svd->vpage != NULL) {
1748 1746 uint_t i;
1749 1747 struct vpage *ovp = svd->vpage;
1750 1748 struct vpage *nvp;
1751 1749
1752 1750 nvp = newsvd->vpage =
1753 1751 kmem_alloc(vpgtob(npages), KM_SLEEP);
1754 1752 for (i = 0; i < npages; i++) {
1755 1753 *nvp = *ovp++;
1756 1754 VPP_CLRPPLOCK(nvp++);
1757 1755 }
1758 1756 } else
1759 1757 newsvd->vpage = NULL;
1760 1758
1761 1759 /* Inform the vnode of the new mapping */
1762 1760 if (newsvd->vp != NULL) {
1763 1761 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1764 1762 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1765 1763 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1766 1764 }
1767 1765 out:
1768 1766 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1769 1767 ASSERT(newsvd->amp == NULL);
1770 1768 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1771 1769 newsvd->rcookie = svd->rcookie;
1772 1770 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1773 1771 }
1774 1772 return (error);
1775 1773 }
1776 1774
1777 1775
1778 1776 /*
1779 1777 * callback function to invoke free_vp_pages() for only those pages actually
1780 1778 * processed by the HAT when a shared region is destroyed.
1781 1779 */
1782 1780 extern int free_pages;
1783 1781
1784 1782 static void
1785 1783 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1786 1784 size_t r_size, void *r_obj, u_offset_t r_objoff)
1787 1785 {
1788 1786 u_offset_t off;
1789 1787 size_t len;
1790 1788 vnode_t *vp = (vnode_t *)r_obj;
1791 1789
1792 1790 ASSERT(eaddr > saddr);
1793 1791 ASSERT(saddr >= r_saddr);
1794 1792 ASSERT(saddr < r_saddr + r_size);
1795 1793 ASSERT(eaddr > r_saddr);
1796 1794 ASSERT(eaddr <= r_saddr + r_size);
1797 1795 ASSERT(vp != NULL);
1798 1796
1799 1797 if (!free_pages) {
1800 1798 return;
1801 1799 }
1802 1800
1803 1801 len = eaddr - saddr;
1804 1802 off = (saddr - r_saddr) + r_objoff;
1805 1803 free_vp_pages(vp, off, len);
1806 1804 }
1807 1805
1808 1806 /*
1809 1807 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1810 1808 * those pages actually processed by the HAT
1811 1809 */
1812 1810 static void
1813 1811 segvn_hat_unload_callback(hat_callback_t *cb)
1814 1812 {
1815 1813 struct seg *seg = cb->hcb_data;
1816 1814 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1817 1815 size_t len;
1818 1816 u_offset_t off;
1819 1817
1820 1818 ASSERT(svd->vp != NULL);
1821 1819 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1822 1820 ASSERT(cb->hcb_start_addr >= seg->s_base);
1823 1821
1824 1822 len = cb->hcb_end_addr - cb->hcb_start_addr;
1825 1823 off = cb->hcb_start_addr - seg->s_base;
1826 1824 free_vp_pages(svd->vp, svd->offset + off, len);
1827 1825 }
1828 1826
1829 1827 /*
1830 1828 * This function determines the number of bytes of swap reserved by
1831 1829 * a segment for which per-page accounting is present. It is used to
1832 1830 * calculate the correct value of a segvn_data's swresv.
1833 1831 */
1834 1832 static size_t
1835 1833 segvn_count_swap_by_vpages(struct seg *seg)
1836 1834 {
1837 1835 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1838 1836 struct vpage *vp, *evp;
1839 1837 size_t nswappages = 0;
1840 1838
1841 1839 ASSERT(svd->pageswap);
1842 1840 ASSERT(svd->vpage != NULL);
1843 1841
1844 1842 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1845 1843
1846 1844 for (vp = svd->vpage; vp < evp; vp++) {
1847 1845 if (VPP_ISSWAPRES(vp))
1848 1846 nswappages++;
1849 1847 }
1850 1848
1851 1849 return (nswappages << PAGESHIFT);
1852 1850 }
1853 1851
1854 1852 static int
1855 1853 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1856 1854 {
1857 1855 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1858 1856 struct segvn_data *nsvd;
1859 1857 struct seg *nseg;
1860 1858 struct anon_map *amp;
1861 1859 pgcnt_t opages; /* old segment size in pages */
1862 1860 pgcnt_t npages; /* new segment size in pages */
1863 1861 pgcnt_t dpages; /* pages being deleted (unmapped) */
1864 1862 hat_callback_t callback; /* used for free_vp_pages() */
1865 1863 hat_callback_t *cbp = NULL;
1866 1864 caddr_t nbase;
1867 1865 size_t nsize;
1868 1866 size_t oswresv;
1869 1867 int reclaim = 1;
1870 1868
1871 1869 /*
1872 1870 * We don't need any segment level locks for "segvn" data
1873 1871 * since the address space is "write" locked.
1874 1872 */
1875 1873 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1876 1874
1877 1875 /*
1878 1876 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1879 1877 * softlockcnt is protected from change by the as write lock.
1880 1878 */
1881 1879 retry:
1882 1880 if (svd->softlockcnt > 0) {
1883 1881 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1884 1882
1885 1883 /*
1886 1884 * If this is shared segment non 0 softlockcnt
1887 1885 * means locked pages are still in use.
1888 1886 */
1889 1887 if (svd->type == MAP_SHARED) {
1890 1888 return (EAGAIN);
1891 1889 }
1892 1890
1893 1891 /*
1894 1892 * since we do have the writers lock nobody can fill
1895 1893 * the cache during the purge. The flush either succeeds
1896 1894 * or we still have pending I/Os.
1897 1895 */
1898 1896 if (reclaim == 1) {
1899 1897 segvn_purge(seg);
1900 1898 reclaim = 0;
1901 1899 goto retry;
1902 1900 }
1903 1901 return (EAGAIN);
1904 1902 }
1905 1903
1906 1904 /*
1907 1905 * Check for bad sizes
1908 1906 */
1909 1907 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1910 1908 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1911 1909 panic("segvn_unmap");
1912 1910 /*NOTREACHED*/
1913 1911 }
1914 1912
1915 1913 if (seg->s_szc != 0) {
1916 1914 size_t pgsz = page_get_pagesize(seg->s_szc);
1917 1915 int err;
1918 1916 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1919 1917 ASSERT(seg->s_base != addr || seg->s_size != len);
1920 1918 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1921 1919 ASSERT(svd->amp == NULL);
1922 1920 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1923 1921 hat_leave_region(seg->s_as->a_hat,
1924 1922 svd->rcookie, HAT_REGION_TEXT);
1925 1923 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1926 1924 /*
1927 1925 * could pass a flag to segvn_demote_range()
1928 1926 * below to tell it not to do any unloads but
1929 1927 * this case is rare enough to not bother for
1930 1928 * now.
1931 1929 */
1932 1930 } else if (svd->tr_state == SEGVN_TR_INIT) {
1933 1931 svd->tr_state = SEGVN_TR_OFF;
1934 1932 } else if (svd->tr_state == SEGVN_TR_ON) {
1935 1933 ASSERT(svd->amp != NULL);
1936 1934 segvn_textunrepl(seg, 1);
1937 1935 ASSERT(svd->amp == NULL);
1938 1936 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1939 1937 }
1940 1938 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1941 1939 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1942 1940 if (err == 0) {
1943 1941 return (IE_RETRY);
1944 1942 }
1945 1943 return (err);
1946 1944 }
1947 1945 }
1948 1946
1949 1947 /* Inform the vnode of the unmapping. */
1950 1948 if (svd->vp) {
1951 1949 int error;
1952 1950
1953 1951 error = VOP_DELMAP(svd->vp,
1954 1952 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1955 1953 seg->s_as, addr, len, svd->prot, svd->maxprot,
1956 1954 svd->type, svd->cred, NULL);
1957 1955
1958 1956 if (error == EAGAIN)
1959 1957 return (error);
1960 1958 }
1961 1959
1962 1960 /*
1963 1961 * Remove any page locks set through this mapping.
1964 1962 * If text replication is not off no page locks could have been
1965 1963 * established via this mapping.
1966 1964 */
1967 1965 if (svd->tr_state == SEGVN_TR_OFF) {
1968 1966 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1969 1967 }
1970 1968
1971 1969 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1972 1970 ASSERT(svd->amp == NULL);
1973 1971 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1974 1972 ASSERT(svd->type == MAP_PRIVATE);
1975 1973 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1976 1974 HAT_REGION_TEXT);
1977 1975 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1978 1976 } else if (svd->tr_state == SEGVN_TR_ON) {
1979 1977 ASSERT(svd->amp != NULL);
1980 1978 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1981 1979 segvn_textunrepl(seg, 1);
1982 1980 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1983 1981 } else {
1984 1982 if (svd->tr_state != SEGVN_TR_OFF) {
1985 1983 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1986 1984 svd->tr_state = SEGVN_TR_OFF;
1987 1985 }
1988 1986 /*
1989 1987 * Unload any hardware translations in the range to be taken
1990 1988 * out. Use a callback to invoke free_vp_pages() effectively.
1991 1989 */
1992 1990 if (svd->vp != NULL && free_pages != 0) {
1993 1991 callback.hcb_data = seg;
1994 1992 callback.hcb_function = segvn_hat_unload_callback;
1995 1993 cbp = &callback;
1996 1994 }
1997 1995 hat_unload_callback(seg->s_as->a_hat, addr, len,
1998 1996 HAT_UNLOAD_UNMAP, cbp);
1999 1997
2000 1998 if (svd->type == MAP_SHARED && svd->vp != NULL &&
2001 1999 (svd->vp->v_flag & VVMEXEC) &&
2002 2000 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2003 2001 segvn_inval_trcache(svd->vp);
2004 2002 }
2005 2003 }
2006 2004
2007 2005 /*
2008 2006 * Check for entire segment
2009 2007 */
2010 2008 if (addr == seg->s_base && len == seg->s_size) {
2011 2009 seg_free(seg);
2012 2010 return (0);
2013 2011 }
2014 2012
2015 2013 opages = seg_pages(seg);
2016 2014 dpages = btop(len);
2017 2015 npages = opages - dpages;
2018 2016 amp = svd->amp;
2019 2017 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2020 2018
2021 2019 /*
2022 2020 * Check for beginning of segment
2023 2021 */
2024 2022 if (addr == seg->s_base) {
2025 2023 if (svd->vpage != NULL) {
2026 2024 size_t nbytes;
2027 2025 struct vpage *ovpage;
2028 2026
2029 2027 ovpage = svd->vpage; /* keep pointer to vpage */
2030 2028
2031 2029 nbytes = vpgtob(npages);
2032 2030 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2033 2031 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2034 2032
2035 2033 /* free up old vpage */
2036 2034 kmem_free(ovpage, vpgtob(opages));
2037 2035 }
2038 2036 if (amp != NULL) {
2039 2037 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2040 2038 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2041 2039 /*
2042 2040 * Shared anon map is no longer in use. Before
2043 2041 * freeing its pages purge all entries from
2044 2042 * pcache that belong to this amp.
2045 2043 */
2046 2044 if (svd->type == MAP_SHARED) {
2047 2045 ASSERT(amp->refcnt == 1);
2048 2046 ASSERT(svd->softlockcnt == 0);
2049 2047 anonmap_purge(amp);
2050 2048 }
2051 2049 /*
2052 2050 * Free up now unused parts of anon_map array.
2053 2051 */
2054 2052 if (amp->a_szc == seg->s_szc) {
2055 2053 if (seg->s_szc != 0) {
2056 2054 anon_free_pages(amp->ahp,
2057 2055 svd->anon_index, len,
2058 2056 seg->s_szc);
2059 2057 } else {
2060 2058 anon_free(amp->ahp,
2061 2059 svd->anon_index,
2062 2060 len);
2063 2061 }
2064 2062 } else {
2065 2063 ASSERT(svd->type == MAP_SHARED);
2066 2064 ASSERT(amp->a_szc > seg->s_szc);
2067 2065 anon_shmap_free_pages(amp,
2068 2066 svd->anon_index, len);
2069 2067 }
2070 2068
2071 2069 /*
2072 2070 * Unreserve swap space for the
2073 2071 * unmapped chunk of this segment in
2074 2072 * case it's MAP_SHARED
2075 2073 */
2076 2074 if (svd->type == MAP_SHARED) {
2077 2075 anon_unresv_zone(len,
2078 2076 seg->s_as->a_proc->p_zone);
2079 2077 amp->swresv -= len;
2080 2078 }
2081 2079 }
2082 2080 ANON_LOCK_EXIT(&->a_rwlock);
2083 2081 svd->anon_index += dpages;
2084 2082 }
2085 2083 if (svd->vp != NULL)
2086 2084 svd->offset += len;
2087 2085
2088 2086 seg->s_base += len;
2089 2087 seg->s_size -= len;
2090 2088
2091 2089 if (svd->swresv) {
2092 2090 if (svd->flags & MAP_NORESERVE) {
2093 2091 ASSERT(amp);
2094 2092 oswresv = svd->swresv;
2095 2093
2096 2094 svd->swresv = ptob(anon_pages(amp->ahp,
2097 2095 svd->anon_index, npages));
2098 2096 anon_unresv_zone(oswresv - svd->swresv,
2099 2097 seg->s_as->a_proc->p_zone);
2100 2098 if (SEG_IS_PARTIAL_RESV(seg))
2101 2099 seg->s_as->a_resvsize -= oswresv -
2102 2100 svd->swresv;
2103 2101 } else {
2104 2102 size_t unlen;
2105 2103
2106 2104 if (svd->pageswap) {
2107 2105 oswresv = svd->swresv;
2108 2106 svd->swresv =
2109 2107 segvn_count_swap_by_vpages(seg);
2110 2108 ASSERT(oswresv >= svd->swresv);
2111 2109 unlen = oswresv - svd->swresv;
2112 2110 } else {
2113 2111 svd->swresv -= len;
2114 2112 ASSERT(svd->swresv == seg->s_size);
2115 2113 unlen = len;
2116 2114 }
2117 2115 anon_unresv_zone(unlen,
2118 2116 seg->s_as->a_proc->p_zone);
2119 2117 }
2120 2118 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2121 2119 seg, len, 0);
2122 2120 }
2123 2121
2124 2122 return (0);
2125 2123 }
2126 2124
2127 2125 /*
2128 2126 * Check for end of segment
2129 2127 */
2130 2128 if (addr + len == seg->s_base + seg->s_size) {
2131 2129 if (svd->vpage != NULL) {
2132 2130 size_t nbytes;
2133 2131 struct vpage *ovpage;
2134 2132
2135 2133 ovpage = svd->vpage; /* keep pointer to vpage */
2136 2134
2137 2135 nbytes = vpgtob(npages);
2138 2136 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2139 2137 bcopy(ovpage, svd->vpage, nbytes);
2140 2138
2141 2139 /* free up old vpage */
2142 2140 kmem_free(ovpage, vpgtob(opages));
2143 2141
2144 2142 }
2145 2143 if (amp != NULL) {
2146 2144 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2147 2145 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2148 2146 /*
2149 2147 * Free up now unused parts of anon_map array.
2150 2148 */
2151 2149 ulong_t an_idx = svd->anon_index + npages;
2152 2150
2153 2151 /*
2154 2152 * Shared anon map is no longer in use. Before
2155 2153 * freeing its pages purge all entries from
2156 2154 * pcache that belong to this amp.
2157 2155 */
2158 2156 if (svd->type == MAP_SHARED) {
2159 2157 ASSERT(amp->refcnt == 1);
2160 2158 ASSERT(svd->softlockcnt == 0);
2161 2159 anonmap_purge(amp);
2162 2160 }
2163 2161
2164 2162 if (amp->a_szc == seg->s_szc) {
2165 2163 if (seg->s_szc != 0) {
2166 2164 anon_free_pages(amp->ahp,
2167 2165 an_idx, len,
2168 2166 seg->s_szc);
2169 2167 } else {
2170 2168 anon_free(amp->ahp, an_idx,
2171 2169 len);
2172 2170 }
2173 2171 } else {
2174 2172 ASSERT(svd->type == MAP_SHARED);
2175 2173 ASSERT(amp->a_szc > seg->s_szc);
2176 2174 anon_shmap_free_pages(amp,
2177 2175 an_idx, len);
2178 2176 }
2179 2177
2180 2178 /*
2181 2179 * Unreserve swap space for the
2182 2180 * unmapped chunk of this segment in
2183 2181 * case it's MAP_SHARED
2184 2182 */
2185 2183 if (svd->type == MAP_SHARED) {
2186 2184 anon_unresv_zone(len,
2187 2185 seg->s_as->a_proc->p_zone);
2188 2186 amp->swresv -= len;
2189 2187 }
2190 2188 }
2191 2189 ANON_LOCK_EXIT(&->a_rwlock);
2192 2190 }
2193 2191
2194 2192 seg->s_size -= len;
2195 2193
2196 2194 if (svd->swresv) {
2197 2195 if (svd->flags & MAP_NORESERVE) {
2198 2196 ASSERT(amp);
2199 2197 oswresv = svd->swresv;
2200 2198 svd->swresv = ptob(anon_pages(amp->ahp,
2201 2199 svd->anon_index, npages));
2202 2200 anon_unresv_zone(oswresv - svd->swresv,
2203 2201 seg->s_as->a_proc->p_zone);
2204 2202 if (SEG_IS_PARTIAL_RESV(seg))
2205 2203 seg->s_as->a_resvsize -= oswresv -
2206 2204 svd->swresv;
2207 2205 } else {
2208 2206 size_t unlen;
2209 2207
2210 2208 if (svd->pageswap) {
2211 2209 oswresv = svd->swresv;
2212 2210 svd->swresv =
2213 2211 segvn_count_swap_by_vpages(seg);
2214 2212 ASSERT(oswresv >= svd->swresv);
2215 2213 unlen = oswresv - svd->swresv;
2216 2214 } else {
2217 2215 svd->swresv -= len;
2218 2216 ASSERT(svd->swresv == seg->s_size);
2219 2217 unlen = len;
2220 2218 }
2221 2219 anon_unresv_zone(unlen,
2222 2220 seg->s_as->a_proc->p_zone);
2223 2221 }
2224 2222 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2225 2223 "anon proc:%p %lu %u", seg, len, 0);
2226 2224 }
2227 2225
2228 2226 return (0);
2229 2227 }
2230 2228
2231 2229 /*
2232 2230 * The section to go is in the middle of the segment,
2233 2231 * have to make it into two segments. nseg is made for
2234 2232 * the high end while seg is cut down at the low end.
2235 2233 */
2236 2234 nbase = addr + len; /* new seg base */
2237 2235 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2238 2236 seg->s_size = addr - seg->s_base; /* shrink old seg */
2239 2237 nseg = seg_alloc(seg->s_as, nbase, nsize);
2240 2238 if (nseg == NULL) {
2241 2239 panic("segvn_unmap seg_alloc");
2242 2240 /*NOTREACHED*/
2243 2241 }
2244 2242 nseg->s_ops = seg->s_ops;
2245 2243 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2246 2244 nseg->s_data = (void *)nsvd;
2247 2245 nseg->s_szc = seg->s_szc;
2248 2246 *nsvd = *svd;
2249 2247 nsvd->seg = nseg;
2250 2248 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2251 2249 nsvd->swresv = 0;
2252 2250 nsvd->softlockcnt = 0;
2253 2251 nsvd->softlockcnt_sbase = 0;
2254 2252 nsvd->softlockcnt_send = 0;
2255 2253 nsvd->svn_inz = svd->svn_inz;
2256 2254 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2257 2255
2258 2256 if (svd->vp != NULL) {
2259 2257 VN_HOLD(nsvd->vp);
2260 2258 if (nsvd->type == MAP_SHARED)
2261 2259 lgrp_shm_policy_init(NULL, nsvd->vp);
2262 2260 }
2263 2261 crhold(svd->cred);
2264 2262
2265 2263 if (svd->vpage == NULL) {
2266 2264 nsvd->vpage = NULL;
2267 2265 } else {
2268 2266 /* need to split vpage into two arrays */
2269 2267 size_t nbytes;
2270 2268 struct vpage *ovpage;
2271 2269
2272 2270 ovpage = svd->vpage; /* keep pointer to vpage */
2273 2271
2274 2272 npages = seg_pages(seg); /* seg has shrunk */
2275 2273 nbytes = vpgtob(npages);
2276 2274 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2277 2275
2278 2276 bcopy(ovpage, svd->vpage, nbytes);
2279 2277
2280 2278 npages = seg_pages(nseg);
2281 2279 nbytes = vpgtob(npages);
2282 2280 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2283 2281
2284 2282 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2285 2283
2286 2284 /* free up old vpage */
2287 2285 kmem_free(ovpage, vpgtob(opages));
2288 2286 }
2289 2287
2290 2288 if (amp == NULL) {
2291 2289 nsvd->amp = NULL;
2292 2290 nsvd->anon_index = 0;
2293 2291 } else {
2294 2292 /*
2295 2293 * Need to create a new anon map for the new segment.
2296 2294 * We'll also allocate a new smaller array for the old
2297 2295 * smaller segment to save space.
2298 2296 */
2299 2297 opages = btop((uintptr_t)(addr - seg->s_base));
2300 2298 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2301 2299 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2302 2300 /*
2303 2301 * Free up now unused parts of anon_map array.
2304 2302 */
2305 2303 ulong_t an_idx = svd->anon_index + opages;
2306 2304
2307 2305 /*
2308 2306 * Shared anon map is no longer in use. Before
2309 2307 * freeing its pages purge all entries from
2310 2308 * pcache that belong to this amp.
2311 2309 */
2312 2310 if (svd->type == MAP_SHARED) {
2313 2311 ASSERT(amp->refcnt == 1);
2314 2312 ASSERT(svd->softlockcnt == 0);
2315 2313 anonmap_purge(amp);
2316 2314 }
2317 2315
2318 2316 if (amp->a_szc == seg->s_szc) {
2319 2317 if (seg->s_szc != 0) {
2320 2318 anon_free_pages(amp->ahp, an_idx, len,
2321 2319 seg->s_szc);
2322 2320 } else {
2323 2321 anon_free(amp->ahp, an_idx,
2324 2322 len);
2325 2323 }
2326 2324 } else {
2327 2325 ASSERT(svd->type == MAP_SHARED);
2328 2326 ASSERT(amp->a_szc > seg->s_szc);
2329 2327 anon_shmap_free_pages(amp, an_idx, len);
2330 2328 }
2331 2329
2332 2330 /*
2333 2331 * Unreserve swap space for the
2334 2332 * unmapped chunk of this segment in
2335 2333 * case it's MAP_SHARED
2336 2334 */
2337 2335 if (svd->type == MAP_SHARED) {
2338 2336 anon_unresv_zone(len,
2339 2337 seg->s_as->a_proc->p_zone);
2340 2338 amp->swresv -= len;
2341 2339 }
2342 2340 }
2343 2341 nsvd->anon_index = svd->anon_index +
2344 2342 btop((uintptr_t)(nseg->s_base - seg->s_base));
2345 2343 if (svd->type == MAP_SHARED) {
2346 2344 amp->refcnt++;
2347 2345 nsvd->amp = amp;
2348 2346 } else {
2349 2347 struct anon_map *namp;
2350 2348 struct anon_hdr *nahp;
2351 2349
2352 2350 ASSERT(svd->type == MAP_PRIVATE);
2353 2351 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2354 2352 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2355 2353 namp->a_szc = seg->s_szc;
2356 2354 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2357 2355 0, btop(seg->s_size), ANON_SLEEP);
2358 2356 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2359 2357 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2360 2358 anon_release(amp->ahp, btop(amp->size));
2361 2359 svd->anon_index = 0;
2362 2360 nsvd->anon_index = 0;
2363 2361 amp->ahp = nahp;
2364 2362 amp->size = seg->s_size;
2365 2363 nsvd->amp = namp;
2366 2364 }
2367 2365 ANON_LOCK_EXIT(&->a_rwlock);
2368 2366 }
2369 2367 if (svd->swresv) {
2370 2368 if (svd->flags & MAP_NORESERVE) {
2371 2369 ASSERT(amp);
2372 2370 oswresv = svd->swresv;
2373 2371 svd->swresv = ptob(anon_pages(amp->ahp,
2374 2372 svd->anon_index, btop(seg->s_size)));
2375 2373 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2376 2374 nsvd->anon_index, btop(nseg->s_size)));
2377 2375 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2378 2376 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2379 2377 seg->s_as->a_proc->p_zone);
2380 2378 if (SEG_IS_PARTIAL_RESV(seg))
2381 2379 seg->s_as->a_resvsize -= oswresv -
2382 2380 (svd->swresv + nsvd->swresv);
2383 2381 } else {
2384 2382 size_t unlen;
2385 2383
2386 2384 if (svd->pageswap) {
2387 2385 oswresv = svd->swresv;
2388 2386 svd->swresv = segvn_count_swap_by_vpages(seg);
2389 2387 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2390 2388 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2391 2389 unlen = oswresv - (svd->swresv + nsvd->swresv);
2392 2390 } else {
2393 2391 if (seg->s_size + nseg->s_size + len !=
2394 2392 svd->swresv) {
2395 2393 panic("segvn_unmap: cannot split "
2396 2394 "swap reservation");
2397 2395 /*NOTREACHED*/
2398 2396 }
2399 2397 svd->swresv = seg->s_size;
2400 2398 nsvd->swresv = nseg->s_size;
2401 2399 unlen = len;
2402 2400 }
2403 2401 anon_unresv_zone(unlen,
2404 2402 seg->s_as->a_proc->p_zone);
2405 2403 }
2406 2404 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2407 2405 seg, len, 0);
2408 2406 }
2409 2407
2410 2408 return (0); /* I'm glad that's all over with! */
2411 2409 }
2412 2410
2413 2411 static void
2414 2412 segvn_free(struct seg *seg)
2415 2413 {
2416 2414 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2417 2415 pgcnt_t npages = seg_pages(seg);
2418 2416 struct anon_map *amp;
2419 2417 size_t len;
2420 2418
2421 2419 /*
2422 2420 * We don't need any segment level locks for "segvn" data
2423 2421 * since the address space is "write" locked.
2424 2422 */
2425 2423 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2426 2424 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2427 2425
2428 2426 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2429 2427
2430 2428 /*
2431 2429 * Be sure to unlock pages. XXX Why do things get free'ed instead
2432 2430 * of unmapped? XXX
2433 2431 */
2434 2432 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2435 2433 0, MC_UNLOCK, NULL, 0);
2436 2434
2437 2435 /*
2438 2436 * Deallocate the vpage and anon pointers if necessary and possible.
2439 2437 */
2440 2438 if (svd->vpage != NULL) {
2441 2439 kmem_free(svd->vpage, vpgtob(npages));
2442 2440 svd->vpage = NULL;
2443 2441 }
2444 2442 if ((amp = svd->amp) != NULL) {
2445 2443 /*
2446 2444 * If there are no more references to this anon_map
2447 2445 * structure, then deallocate the structure after freeing
2448 2446 * up all the anon slot pointers that we can.
2449 2447 */
2450 2448 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2451 2449 ASSERT(amp->a_szc >= seg->s_szc);
2452 2450 if (--amp->refcnt == 0) {
2453 2451 if (svd->type == MAP_PRIVATE) {
2454 2452 /*
2455 2453 * Private - we only need to anon_free
2456 2454 * the part that this segment refers to.
2457 2455 */
2458 2456 if (seg->s_szc != 0) {
2459 2457 anon_free_pages(amp->ahp,
2460 2458 svd->anon_index, seg->s_size,
2461 2459 seg->s_szc);
2462 2460 } else {
2463 2461 anon_free(amp->ahp, svd->anon_index,
2464 2462 seg->s_size);
2465 2463 }
2466 2464 } else {
2467 2465
2468 2466 /*
2469 2467 * Shared anon map is no longer in use. Before
2470 2468 * freeing its pages purge all entries from
2471 2469 * pcache that belong to this amp.
2472 2470 */
2473 2471 ASSERT(svd->softlockcnt == 0);
2474 2472 anonmap_purge(amp);
2475 2473
2476 2474 /*
2477 2475 * Shared - anon_free the entire
2478 2476 * anon_map's worth of stuff and
2479 2477 * release any swap reservation.
2480 2478 */
2481 2479 if (amp->a_szc != 0) {
2482 2480 anon_shmap_free_pages(amp, 0,
2483 2481 amp->size);
2484 2482 } else {
2485 2483 anon_free(amp->ahp, 0, amp->size);
2486 2484 }
2487 2485 if ((len = amp->swresv) != 0) {
2488 2486 anon_unresv_zone(len,
2489 2487 seg->s_as->a_proc->p_zone);
2490 2488 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2491 2489 "anon proc:%p %lu %u", seg, len, 0);
2492 2490 }
2493 2491 }
2494 2492 svd->amp = NULL;
2495 2493 ANON_LOCK_EXIT(&->a_rwlock);
2496 2494 anonmap_free(amp);
2497 2495 } else if (svd->type == MAP_PRIVATE) {
2498 2496 /*
2499 2497 * We had a private mapping which still has
2500 2498 * a held anon_map so just free up all the
2501 2499 * anon slot pointers that we were using.
2502 2500 */
2503 2501 if (seg->s_szc != 0) {
2504 2502 anon_free_pages(amp->ahp, svd->anon_index,
2505 2503 seg->s_size, seg->s_szc);
2506 2504 } else {
2507 2505 anon_free(amp->ahp, svd->anon_index,
2508 2506 seg->s_size);
2509 2507 }
2510 2508 ANON_LOCK_EXIT(&->a_rwlock);
2511 2509 } else {
2512 2510 ANON_LOCK_EXIT(&->a_rwlock);
2513 2511 }
2514 2512 }
2515 2513
2516 2514 /*
2517 2515 * Release swap reservation.
2518 2516 */
2519 2517 if ((len = svd->swresv) != 0) {
2520 2518 anon_unresv_zone(svd->swresv,
2521 2519 seg->s_as->a_proc->p_zone);
2522 2520 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2523 2521 seg, len, 0);
2524 2522 if (SEG_IS_PARTIAL_RESV(seg))
2525 2523 seg->s_as->a_resvsize -= svd->swresv;
2526 2524 svd->swresv = 0;
2527 2525 }
2528 2526 /*
2529 2527 * Release claim on vnode, credentials, and finally free the
2530 2528 * private data.
2531 2529 */
2532 2530 if (svd->vp != NULL) {
2533 2531 if (svd->type == MAP_SHARED)
2534 2532 lgrp_shm_policy_fini(NULL, svd->vp);
2535 2533 VN_RELE(svd->vp);
2536 2534 svd->vp = NULL;
2537 2535 }
2538 2536 crfree(svd->cred);
2539 2537 svd->pageprot = 0;
2540 2538 svd->pageadvice = 0;
2541 2539 svd->pageswap = 0;
2542 2540 svd->cred = NULL;
2543 2541
2544 2542 /*
2545 2543 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2546 2544 * still working with this segment without holding as lock (in case
2547 2545 * it's called by pcache async thread).
2548 2546 */
2549 2547 ASSERT(svd->softlockcnt == 0);
2550 2548 mutex_enter(&svd->segfree_syncmtx);
2551 2549 mutex_exit(&svd->segfree_syncmtx);
2552 2550
2553 2551 seg->s_data = NULL;
2554 2552 kmem_cache_free(segvn_cache, svd);
2555 2553 }
2556 2554
2557 2555 /*
2558 2556 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2559 2557 * already been F_SOFTLOCK'ed.
2560 2558 * Caller must always match addr and len of a softunlock with a previous
2561 2559 * softlock with exactly the same addr and len.
2562 2560 */
2563 2561 static void
2564 2562 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2565 2563 {
2566 2564 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2567 2565 page_t *pp;
2568 2566 caddr_t adr;
2569 2567 struct vnode *vp;
2570 2568 u_offset_t offset;
2571 2569 ulong_t anon_index;
2572 2570 struct anon_map *amp;
2573 2571 struct anon *ap = NULL;
2574 2572
2575 2573 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2576 2574 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2577 2575
2578 2576 if ((amp = svd->amp) != NULL)
2579 2577 anon_index = svd->anon_index + seg_page(seg, addr);
2580 2578
2581 2579 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2582 2580 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2583 2581 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2584 2582 } else {
2585 2583 hat_unlock(seg->s_as->a_hat, addr, len);
2586 2584 }
2587 2585 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2588 2586 if (amp != NULL) {
2589 2587 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2590 2588 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2591 2589 != NULL) {
2592 2590 swap_xlate(ap, &vp, &offset);
2593 2591 } else {
2594 2592 vp = svd->vp;
2595 2593 offset = svd->offset +
2596 2594 (uintptr_t)(adr - seg->s_base);
2597 2595 }
2598 2596 ANON_LOCK_EXIT(&->a_rwlock);
2599 2597 } else {
2600 2598 vp = svd->vp;
2601 2599 offset = svd->offset +
2602 2600 (uintptr_t)(adr - seg->s_base);
2603 2601 }
2604 2602
2605 2603 /*
2606 2604 * Use page_find() instead of page_lookup() to
2607 2605 * find the page since we know that it is locked.
2608 2606 */
2609 2607 pp = page_find(vp, offset);
2610 2608 if (pp == NULL) {
2611 2609 panic(
2612 2610 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2613 2611 (void *)adr, (void *)ap, (void *)vp, offset);
2614 2612 /*NOTREACHED*/
2615 2613 }
2616 2614
2617 2615 if (rw == S_WRITE) {
2618 2616 hat_setrefmod(pp);
2619 2617 if (seg->s_as->a_vbits)
2620 2618 hat_setstat(seg->s_as, adr, PAGESIZE,
2621 2619 P_REF | P_MOD);
2622 2620 } else if (rw != S_OTHER) {
2623 2621 hat_setref(pp);
2624 2622 if (seg->s_as->a_vbits)
2625 2623 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2626 2624 }
2627 2625 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2628 2626 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2629 2627 page_unlock(pp);
2630 2628 }
2631 2629 ASSERT(svd->softlockcnt >= btop(len));
2632 2630 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2633 2631 /*
2634 2632 * All SOFTLOCKS are gone. Wakeup any waiting
2635 2633 * unmappers so they can try again to unmap.
2636 2634 * Check for waiters first without the mutex
2637 2635 * held so we don't always grab the mutex on
2638 2636 * softunlocks.
2639 2637 */
2640 2638 if (AS_ISUNMAPWAIT(seg->s_as)) {
2641 2639 mutex_enter(&seg->s_as->a_contents);
2642 2640 if (AS_ISUNMAPWAIT(seg->s_as)) {
2643 2641 AS_CLRUNMAPWAIT(seg->s_as);
2644 2642 cv_broadcast(&seg->s_as->a_cv);
2645 2643 }
2646 2644 mutex_exit(&seg->s_as->a_contents);
2647 2645 }
2648 2646 }
2649 2647 }
2650 2648
2651 2649 #define PAGE_HANDLED ((page_t *)-1)
2652 2650
2653 2651 /*
2654 2652 * Release all the pages in the NULL terminated ppp list
2655 2653 * which haven't already been converted to PAGE_HANDLED.
2656 2654 */
2657 2655 static void
2658 2656 segvn_pagelist_rele(page_t **ppp)
2659 2657 {
2660 2658 for (; *ppp != NULL; ppp++) {
2661 2659 if (*ppp != PAGE_HANDLED)
2662 2660 page_unlock(*ppp);
2663 2661 }
2664 2662 }
2665 2663
2666 2664 static int stealcow = 1;
2667 2665
2668 2666 /*
2669 2667 * Workaround for viking chip bug. See bug id 1220902.
2670 2668 * To fix this down in pagefault() would require importing so
2671 2669 * much as and segvn code as to be unmaintainable.
2672 2670 */
2673 2671 int enable_mbit_wa = 0;
2674 2672
2675 2673 /*
2676 2674 * Handles all the dirty work of getting the right
2677 2675 * anonymous pages and loading up the translations.
2678 2676 * This routine is called only from segvn_fault()
2679 2677 * when looping over the range of addresses requested.
2680 2678 *
2681 2679 * The basic algorithm here is:
2682 2680 * If this is an anon_zero case
2683 2681 * Call anon_zero to allocate page
2684 2682 * Load up translation
2685 2683 * Return
2686 2684 * endif
2687 2685 * If this is an anon page
2688 2686 * Use anon_getpage to get the page
2689 2687 * else
2690 2688 * Find page in pl[] list passed in
2691 2689 * endif
2692 2690 * If not a cow
2693 2691 * Load up the translation to the page
2694 2692 * return
2695 2693 * endif
2696 2694 * Call anon_private to handle cow
2697 2695 * Load up (writable) translation to new page
2698 2696 */
2699 2697 static faultcode_t
2700 2698 segvn_faultpage(
2701 2699 struct hat *hat, /* the hat to use for mapping */
2702 2700 struct seg *seg, /* seg_vn of interest */
2703 2701 caddr_t addr, /* address in as */
2704 2702 u_offset_t off, /* offset in vp */
2705 2703 struct vpage *vpage, /* pointer to vpage for vp, off */
2706 2704 page_t *pl[], /* object source page pointer */
2707 2705 uint_t vpprot, /* access allowed to object pages */
2708 2706 enum fault_type type, /* type of fault */
2709 2707 enum seg_rw rw, /* type of access at fault */
2710 2708 int brkcow) /* we may need to break cow */
2711 2709 {
2712 2710 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2713 2711 page_t *pp, **ppp;
2714 2712 uint_t pageflags = 0;
2715 2713 page_t *anon_pl[1 + 1];
2716 2714 page_t *opp = NULL; /* original page */
2717 2715 uint_t prot;
2718 2716 int err;
2719 2717 int cow;
2720 2718 int claim;
2721 2719 int steal = 0;
2722 2720 ulong_t anon_index;
2723 2721 struct anon *ap, *oldap;
2724 2722 struct anon_map *amp;
2725 2723 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2726 2724 int anon_lock = 0;
2727 2725 anon_sync_obj_t cookie;
2728 2726
2729 2727 if (svd->flags & MAP_TEXT) {
2730 2728 hat_flag |= HAT_LOAD_TEXT;
2731 2729 }
2732 2730
2733 2731 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2734 2732 ASSERT(seg->s_szc == 0);
2735 2733 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2736 2734
2737 2735 /*
2738 2736 * Initialize protection value for this page.
2739 2737 * If we have per page protection values check it now.
2740 2738 */
2741 2739 if (svd->pageprot) {
2742 2740 uint_t protchk;
2743 2741
2744 2742 switch (rw) {
2745 2743 case S_READ:
2746 2744 protchk = PROT_READ;
2747 2745 break;
2748 2746 case S_WRITE:
2749 2747 protchk = PROT_WRITE;
2750 2748 break;
2751 2749 case S_EXEC:
2752 2750 protchk = PROT_EXEC;
2753 2751 break;
2754 2752 case S_OTHER:
2755 2753 default:
2756 2754 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2757 2755 break;
2758 2756 }
2759 2757
2760 2758 prot = VPP_PROT(vpage);
2761 2759 if ((prot & protchk) == 0)
2762 2760 return (FC_PROT); /* illegal access type */
2763 2761 } else {
2764 2762 prot = svd->prot;
2765 2763 }
2766 2764
2767 2765 if (type == F_SOFTLOCK) {
2768 2766 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2769 2767 }
2770 2768
2771 2769 /*
2772 2770 * Always acquire the anon array lock to prevent 2 threads from
2773 2771 * allocating separate anon slots for the same "addr".
2774 2772 */
2775 2773
2776 2774 if ((amp = svd->amp) != NULL) {
2777 2775 ASSERT(RW_READ_HELD(&->a_rwlock));
2778 2776 anon_index = svd->anon_index + seg_page(seg, addr);
2779 2777 anon_array_enter(amp, anon_index, &cookie);
2780 2778 anon_lock = 1;
2781 2779 }
2782 2780
2783 2781 if (svd->vp == NULL && amp != NULL) {
2784 2782 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2785 2783 /*
2786 2784 * Allocate a (normally) writable anonymous page of
2787 2785 * zeroes. If no advance reservations, reserve now.
2788 2786 */
2789 2787 if (svd->flags & MAP_NORESERVE) {
2790 2788 if (anon_resv_zone(ptob(1),
2791 2789 seg->s_as->a_proc->p_zone)) {
2792 2790 atomic_add_long(&svd->swresv, ptob(1));
2793 2791 atomic_add_long(&seg->s_as->a_resvsize,
2794 2792 ptob(1));
2795 2793 } else {
2796 2794 err = ENOMEM;
2797 2795 goto out;
2798 2796 }
2799 2797 }
2800 2798 if ((pp = anon_zero(seg, addr, &ap,
2801 2799 svd->cred)) == NULL) {
2802 2800 err = ENOMEM;
2803 2801 goto out; /* out of swap space */
2804 2802 }
2805 2803 /*
2806 2804 * Re-acquire the anon_map lock and
2807 2805 * initialize the anon array entry.
2808 2806 */
2809 2807 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2810 2808 ANON_SLEEP);
2811 2809
2812 2810 ASSERT(pp->p_szc == 0);
2813 2811
2814 2812 /*
2815 2813 * Handle pages that have been marked for migration
2816 2814 */
2817 2815 if (lgrp_optimizations())
2818 2816 page_migrate(seg, addr, &pp, 1);
2819 2817
2820 2818 if (enable_mbit_wa) {
2821 2819 if (rw == S_WRITE)
2822 2820 hat_setmod(pp);
2823 2821 else if (!hat_ismod(pp))
2824 2822 prot &= ~PROT_WRITE;
2825 2823 }
2826 2824 /*
2827 2825 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2828 2826 * with MC_LOCKAS, MCL_FUTURE) and this is a
2829 2827 * MAP_NORESERVE segment, we may need to
2830 2828 * permanently lock the page as it is being faulted
2831 2829 * for the first time. The following text applies
2832 2830 * only to MAP_NORESERVE segments:
2833 2831 *
2834 2832 * As per memcntl(2), if this segment was created
2835 2833 * after MCL_FUTURE was applied (a "future"
2836 2834 * segment), its pages must be locked. If this
2837 2835 * segment existed at MCL_FUTURE application (a
2838 2836 * "past" segment), the interface is unclear.
2839 2837 *
2840 2838 * We decide to lock only if vpage is present:
2841 2839 *
2842 2840 * - "future" segments will have a vpage array (see
2843 2841 * as_map), and so will be locked as required
2844 2842 *
2845 2843 * - "past" segments may not have a vpage array,
2846 2844 * depending on whether events (such as
2847 2845 * mprotect) have occurred. Locking if vpage
2848 2846 * exists will preserve legacy behavior. Not
2849 2847 * locking if vpage is absent, will not break
2850 2848 * the interface or legacy behavior. Note that
2851 2849 * allocating vpage here if it's absent requires
2852 2850 * upgrading the segvn reader lock, the cost of
2853 2851 * which does not seem worthwhile.
2854 2852 *
2855 2853 * Usually testing and setting VPP_ISPPLOCK and
2856 2854 * VPP_SETPPLOCK requires holding the segvn lock as
2857 2855 * writer, but in this case all readers are
2858 2856 * serializing on the anon array lock.
2859 2857 */
2860 2858 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2861 2859 (svd->flags & MAP_NORESERVE) &&
2862 2860 !VPP_ISPPLOCK(vpage)) {
2863 2861 proc_t *p = seg->s_as->a_proc;
2864 2862 ASSERT(svd->type == MAP_PRIVATE);
2865 2863 mutex_enter(&p->p_lock);
2866 2864 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2867 2865 1) == 0) {
2868 2866 claim = VPP_PROT(vpage) & PROT_WRITE;
2869 2867 if (page_pp_lock(pp, claim, 0)) {
2870 2868 VPP_SETPPLOCK(vpage);
2871 2869 } else {
2872 2870 rctl_decr_locked_mem(p, NULL,
2873 2871 PAGESIZE, 1);
2874 2872 }
2875 2873 }
2876 2874 mutex_exit(&p->p_lock);
2877 2875 }
2878 2876
2879 2877 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2880 2878 hat_memload(hat, addr, pp, prot, hat_flag);
2881 2879
2882 2880 if (!(hat_flag & HAT_LOAD_LOCK))
2883 2881 page_unlock(pp);
2884 2882
2885 2883 anon_array_exit(&cookie);
2886 2884 return (0);
2887 2885 }
2888 2886 }
2889 2887
2890 2888 /*
2891 2889 * Obtain the page structure via anon_getpage() if it is
2892 2890 * a private copy of an object (the result of a previous
2893 2891 * copy-on-write).
2894 2892 */
2895 2893 if (amp != NULL) {
2896 2894 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2897 2895 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2898 2896 seg, addr, rw, svd->cred);
2899 2897 if (err)
2900 2898 goto out;
2901 2899
2902 2900 if (svd->type == MAP_SHARED) {
2903 2901 /*
2904 2902 * If this is a shared mapping to an
2905 2903 * anon_map, then ignore the write
2906 2904 * permissions returned by anon_getpage().
2907 2905 * They apply to the private mappings
2908 2906 * of this anon_map.
2909 2907 */
2910 2908 vpprot |= PROT_WRITE;
2911 2909 }
2912 2910 opp = anon_pl[0];
2913 2911 }
2914 2912 }
2915 2913
2916 2914 /*
2917 2915 * Search the pl[] list passed in if it is from the
2918 2916 * original object (i.e., not a private copy).
2919 2917 */
2920 2918 if (opp == NULL) {
2921 2919 /*
2922 2920 * Find original page. We must be bringing it in
2923 2921 * from the list in pl[].
2924 2922 */
2925 2923 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2926 2924 if (opp == PAGE_HANDLED)
2927 2925 continue;
2928 2926 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2929 2927 if (opp->p_offset == off)
2930 2928 break;
2931 2929 }
2932 2930 if (opp == NULL) {
2933 2931 panic("segvn_faultpage not found");
2934 2932 /*NOTREACHED*/
2935 2933 }
2936 2934 *ppp = PAGE_HANDLED;
2937 2935
2938 2936 }
2939 2937
2940 2938 ASSERT(PAGE_LOCKED(opp));
2941 2939
2942 2940 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2943 2941 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2944 2942
2945 2943 /*
2946 2944 * The fault is treated as a copy-on-write fault if a
2947 2945 * write occurs on a private segment and the object
2948 2946 * page (i.e., mapping) is write protected. We assume
2949 2947 * that fatal protection checks have already been made.
2950 2948 */
2951 2949
2952 2950 if (brkcow) {
2953 2951 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2954 2952 cow = !(vpprot & PROT_WRITE);
2955 2953 } else if (svd->tr_state == SEGVN_TR_ON) {
2956 2954 /*
2957 2955 * If we are doing text replication COW on first touch.
2958 2956 */
2959 2957 ASSERT(amp != NULL);
2960 2958 ASSERT(svd->vp != NULL);
2961 2959 ASSERT(rw != S_WRITE);
2962 2960 cow = (ap == NULL);
2963 2961 } else {
2964 2962 cow = 0;
2965 2963 }
2966 2964
2967 2965 /*
2968 2966 * If not a copy-on-write case load the translation
2969 2967 * and return.
2970 2968 */
2971 2969 if (cow == 0) {
2972 2970
2973 2971 /*
2974 2972 * Handle pages that have been marked for migration
2975 2973 */
2976 2974 if (lgrp_optimizations())
2977 2975 page_migrate(seg, addr, &opp, 1);
2978 2976
2979 2977 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2980 2978 if (rw == S_WRITE)
2981 2979 hat_setmod(opp);
2982 2980 else if (rw != S_OTHER && !hat_ismod(opp))
2983 2981 prot &= ~PROT_WRITE;
2984 2982 }
2985 2983
2986 2984 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2987 2985 (!svd->pageprot && svd->prot == (prot & vpprot)));
2988 2986 ASSERT(amp == NULL ||
2989 2987 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2990 2988 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2991 2989 svd->rcookie);
2992 2990
2993 2991 if (!(hat_flag & HAT_LOAD_LOCK))
2994 2992 page_unlock(opp);
2995 2993
2996 2994 if (anon_lock) {
2997 2995 anon_array_exit(&cookie);
2998 2996 }
2999 2997 return (0);
3000 2998 }
3001 2999
3002 3000 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3003 3001
3004 3002 hat_setref(opp);
3005 3003
3006 3004 ASSERT(amp != NULL && anon_lock);
3007 3005
3008 3006 /*
3009 3007 * Steal the page only if it isn't a private page
3010 3008 * since stealing a private page is not worth the effort.
3011 3009 */
3012 3010 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3013 3011 steal = 1;
3014 3012
3015 3013 /*
3016 3014 * Steal the original page if the following conditions are true:
3017 3015 *
3018 3016 * We are low on memory, the page is not private, page is not large,
3019 3017 * not shared, not modified, not `locked' or if we have it `locked'
3020 3018 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3021 3019 * that the page is not shared) and if it doesn't have any
3022 3020 * translations. page_struct_lock isn't needed to look at p_cowcnt
3023 3021 * and p_lckcnt because we first get exclusive lock on page.
3024 3022 */
3025 3023 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3026 3024
3027 3025 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3028 3026 page_tryupgrade(opp) && !hat_ismod(opp) &&
3029 3027 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3030 3028 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3031 3029 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3032 3030 /*
3033 3031 * Check if this page has other translations
3034 3032 * after unloading our translation.
3035 3033 */
3036 3034 if (hat_page_is_mapped(opp)) {
3037 3035 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3038 3036 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3039 3037 HAT_UNLOAD);
3040 3038 }
3041 3039
3042 3040 /*
3043 3041 * hat_unload() might sync back someone else's recent
3044 3042 * modification, so check again.
3045 3043 */
3046 3044 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3047 3045 pageflags |= STEAL_PAGE;
3048 3046 }
3049 3047
3050 3048 /*
3051 3049 * If we have a vpage pointer, see if it indicates that we have
3052 3050 * ``locked'' the page we map -- if so, tell anon_private to
3053 3051 * transfer the locking resource to the new page.
3054 3052 *
3055 3053 * See Statement at the beginning of segvn_lockop regarding
3056 3054 * the way lockcnts/cowcnts are handled during COW.
3057 3055 *
3058 3056 */
3059 3057 if (vpage != NULL && VPP_ISPPLOCK(vpage))
3060 3058 pageflags |= LOCK_PAGE;
3061 3059
3062 3060 /*
3063 3061 * Allocate a private page and perform the copy.
3064 3062 * For MAP_NORESERVE reserve swap space now, unless this
3065 3063 * is a cow fault on an existing anon page in which case
3066 3064 * MAP_NORESERVE will have made advance reservations.
3067 3065 */
3068 3066 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3069 3067 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3070 3068 atomic_add_long(&svd->swresv, ptob(1));
3071 3069 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3072 3070 } else {
3073 3071 page_unlock(opp);
3074 3072 err = ENOMEM;
3075 3073 goto out;
3076 3074 }
3077 3075 }
3078 3076 oldap = ap;
3079 3077 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3080 3078 if (pp == NULL) {
3081 3079 err = ENOMEM; /* out of swap space */
3082 3080 goto out;
3083 3081 }
3084 3082
3085 3083 /*
3086 3084 * If we copied away from an anonymous page, then
3087 3085 * we are one step closer to freeing up an anon slot.
3088 3086 *
3089 3087 * NOTE: The original anon slot must be released while
3090 3088 * holding the "anon_map" lock. This is necessary to prevent
3091 3089 * other threads from obtaining a pointer to the anon slot
3092 3090 * which may be freed if its "refcnt" is 1.
3093 3091 */
3094 3092 if (oldap != NULL)
3095 3093 anon_decref(oldap);
3096 3094
3097 3095 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3098 3096
3099 3097 /*
3100 3098 * Handle pages that have been marked for migration
3101 3099 */
3102 3100 if (lgrp_optimizations())
3103 3101 page_migrate(seg, addr, &pp, 1);
3104 3102
3105 3103 ASSERT(pp->p_szc == 0);
3106 3104
3107 3105 ASSERT(!IS_VMODSORT(pp->p_vnode));
3108 3106 if (enable_mbit_wa) {
3109 3107 if (rw == S_WRITE)
3110 3108 hat_setmod(pp);
3111 3109 else if (!hat_ismod(pp))
3112 3110 prot &= ~PROT_WRITE;
3113 3111 }
3114 3112
3115 3113 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3116 3114 hat_memload(hat, addr, pp, prot, hat_flag);
3117 3115
3118 3116 if (!(hat_flag & HAT_LOAD_LOCK))
3119 3117 page_unlock(pp);
3120 3118
3121 3119 ASSERT(anon_lock);
3122 3120 anon_array_exit(&cookie);
3123 3121 return (0);
3124 3122 out:
3125 3123 if (anon_lock)
3126 3124 anon_array_exit(&cookie);
3127 3125
3128 3126 if (type == F_SOFTLOCK) {
3129 3127 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3130 3128 }
3131 3129 return (FC_MAKE_ERR(err));
3132 3130 }
3133 3131
3134 3132 /*
3135 3133 * relocate a bunch of smaller targ pages into one large repl page. all targ
3136 3134 * pages must be complete pages smaller than replacement pages.
3137 3135 * it's assumed that no page's szc can change since they are all PAGESIZE or
3138 3136 * complete large pages locked SHARED.
3139 3137 */
3140 3138 static void
3141 3139 segvn_relocate_pages(page_t **targ, page_t *replacement)
3142 3140 {
3143 3141 page_t *pp;
3144 3142 pgcnt_t repl_npgs, curnpgs;
3145 3143 pgcnt_t i;
3146 3144 uint_t repl_szc = replacement->p_szc;
3147 3145 page_t *first_repl = replacement;
3148 3146 page_t *repl;
3149 3147 spgcnt_t npgs;
3150 3148
3151 3149 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3152 3150
3153 3151 ASSERT(repl_szc != 0);
3154 3152 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3155 3153
3156 3154 i = 0;
3157 3155 while (repl_npgs) {
3158 3156 spgcnt_t nreloc;
3159 3157 int err;
3160 3158 ASSERT(replacement != NULL);
3161 3159 pp = targ[i];
3162 3160 ASSERT(pp->p_szc < repl_szc);
3163 3161 ASSERT(PAGE_EXCL(pp));
3164 3162 ASSERT(!PP_ISFREE(pp));
3165 3163 curnpgs = page_get_pagecnt(pp->p_szc);
3166 3164 if (curnpgs == 1) {
3167 3165 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3168 3166 repl = replacement;
3169 3167 page_sub(&replacement, repl);
3170 3168 ASSERT(PAGE_EXCL(repl));
3171 3169 ASSERT(!PP_ISFREE(repl));
3172 3170 ASSERT(repl->p_szc == repl_szc);
3173 3171 } else {
3174 3172 page_t *repl_savepp;
3175 3173 int j;
3176 3174 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3177 3175 repl_savepp = replacement;
3178 3176 for (j = 0; j < curnpgs; j++) {
3179 3177 repl = replacement;
3180 3178 page_sub(&replacement, repl);
3181 3179 ASSERT(PAGE_EXCL(repl));
3182 3180 ASSERT(!PP_ISFREE(repl));
3183 3181 ASSERT(repl->p_szc == repl_szc);
3184 3182 ASSERT(page_pptonum(targ[i + j]) ==
3185 3183 page_pptonum(targ[i]) + j);
3186 3184 }
3187 3185 repl = repl_savepp;
3188 3186 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3189 3187 }
3190 3188 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3191 3189 if (err || nreloc != curnpgs) {
3192 3190 panic("segvn_relocate_pages: "
3193 3191 "page_relocate failed err=%d curnpgs=%ld "
3194 3192 "nreloc=%ld", err, curnpgs, nreloc);
3195 3193 }
3196 3194 ASSERT(curnpgs <= repl_npgs);
3197 3195 repl_npgs -= curnpgs;
3198 3196 i += curnpgs;
3199 3197 }
3200 3198 ASSERT(replacement == NULL);
3201 3199
3202 3200 repl = first_repl;
3203 3201 repl_npgs = npgs;
3204 3202 for (i = 0; i < repl_npgs; i++) {
3205 3203 ASSERT(PAGE_EXCL(repl));
3206 3204 ASSERT(!PP_ISFREE(repl));
3207 3205 targ[i] = repl;
3208 3206 page_downgrade(targ[i]);
3209 3207 repl++;
3210 3208 }
3211 3209 }
3212 3210
3213 3211 /*
3214 3212 * Check if all pages in ppa array are complete smaller than szc pages and
3215 3213 * their roots will still be aligned relative to their current size if the
3216 3214 * entire ppa array is relocated into one szc page. If these conditions are
3217 3215 * not met return 0.
3218 3216 *
3219 3217 * If all pages are properly aligned attempt to upgrade their locks
3220 3218 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3221 3219 * upgrdfail was set to 0 by caller.
3222 3220 *
3223 3221 * Return 1 if all pages are aligned and locked exclusively.
3224 3222 *
3225 3223 * If all pages in ppa array happen to be physically contiguous to make one
3226 3224 * szc page and all exclusive locks are successfully obtained promote the page
3227 3225 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3228 3226 */
3229 3227 static int
3230 3228 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3231 3229 {
3232 3230 page_t *pp;
3233 3231 pfn_t pfn;
3234 3232 pgcnt_t totnpgs = page_get_pagecnt(szc);
3235 3233 pfn_t first_pfn;
3236 3234 int contig = 1;
3237 3235 pgcnt_t i;
3238 3236 pgcnt_t j;
3239 3237 uint_t curszc;
3240 3238 pgcnt_t curnpgs;
3241 3239 int root = 0;
3242 3240
3243 3241 ASSERT(szc > 0);
3244 3242
3245 3243 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3246 3244
3247 3245 for (i = 0; i < totnpgs; i++) {
3248 3246 pp = ppa[i];
3249 3247 ASSERT(PAGE_SHARED(pp));
3250 3248 ASSERT(!PP_ISFREE(pp));
3251 3249 pfn = page_pptonum(pp);
3252 3250 if (i == 0) {
3253 3251 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3254 3252 contig = 0;
3255 3253 } else {
3256 3254 first_pfn = pfn;
3257 3255 }
3258 3256 } else if (contig && pfn != first_pfn + i) {
3259 3257 contig = 0;
3260 3258 }
3261 3259 if (pp->p_szc == 0) {
3262 3260 if (root) {
3263 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3264 3262 return (0);
3265 3263 }
3266 3264 } else if (!root) {
3267 3265 if ((curszc = pp->p_szc) >= szc) {
3268 3266 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3269 3267 return (0);
3270 3268 }
3271 3269 if (curszc == 0) {
3272 3270 /*
3273 3271 * p_szc changed means we don't have all pages
3274 3272 * locked. return failure.
3275 3273 */
3276 3274 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3277 3275 return (0);
3278 3276 }
3279 3277 curnpgs = page_get_pagecnt(curszc);
3280 3278 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3281 3279 !IS_P2ALIGNED(i, curnpgs)) {
3282 3280 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3283 3281 return (0);
3284 3282 }
3285 3283 root = 1;
3286 3284 } else {
3287 3285 ASSERT(i > 0);
3288 3286 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3289 3287 if (pp->p_szc != curszc) {
3290 3288 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3291 3289 return (0);
3292 3290 }
3293 3291 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3294 3292 panic("segvn_full_szcpages: "
3295 3293 "large page not physically contiguous");
3296 3294 }
3297 3295 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3298 3296 root = 0;
3299 3297 }
3300 3298 }
3301 3299 }
3302 3300
3303 3301 for (i = 0; i < totnpgs; i++) {
3304 3302 ASSERT(ppa[i]->p_szc < szc);
3305 3303 if (!page_tryupgrade(ppa[i])) {
3306 3304 for (j = 0; j < i; j++) {
3307 3305 page_downgrade(ppa[j]);
3308 3306 }
3309 3307 *pszc = ppa[i]->p_szc;
3310 3308 *upgrdfail = 1;
3311 3309 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3312 3310 return (0);
3313 3311 }
3314 3312 }
3315 3313
3316 3314 /*
3317 3315 * When a page is put a free cachelist its szc is set to 0. if file
3318 3316 * system reclaimed pages from cachelist targ pages will be physically
3319 3317 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3320 3318 * pages without any relocations.
3321 3319 * To avoid any hat issues with previous small mappings
3322 3320 * hat_pageunload() the target pages first.
3323 3321 */
3324 3322 if (contig) {
3325 3323 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3326 3324 for (i = 0; i < totnpgs; i++) {
3327 3325 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3328 3326 }
3329 3327 for (i = 0; i < totnpgs; i++) {
3330 3328 ppa[i]->p_szc = szc;
3331 3329 }
3332 3330 for (i = 0; i < totnpgs; i++) {
3333 3331 ASSERT(PAGE_EXCL(ppa[i]));
3334 3332 page_downgrade(ppa[i]);
3335 3333 }
3336 3334 if (pszc != NULL) {
3337 3335 *pszc = szc;
3338 3336 }
3339 3337 }
3340 3338 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3341 3339 return (1);
3342 3340 }
3343 3341
3344 3342 /*
3345 3343 * Create physically contiguous pages for [vp, off] - [vp, off +
3346 3344 * page_size(szc)) range and for private segment return them in ppa array.
3347 3345 * Pages are created either via IO or relocations.
3348 3346 *
3349 3347 * Return 1 on success and 0 on failure.
3350 3348 *
3351 3349 * If physically contiguous pages already exist for this range return 1 without
3352 3350 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3353 3351 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3354 3352 */
3355 3353
3356 3354 static int
3357 3355 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3358 3356 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3359 3357 int *downsize)
3360 3358
3361 3359 {
3362 3360 page_t *pplist = *ppplist;
3363 3361 size_t pgsz = page_get_pagesize(szc);
3364 3362 pgcnt_t pages = btop(pgsz);
3365 3363 ulong_t start_off = off;
3366 3364 u_offset_t eoff = off + pgsz;
3367 3365 spgcnt_t nreloc;
3368 3366 u_offset_t io_off = off;
3369 3367 size_t io_len;
3370 3368 page_t *io_pplist = NULL;
3371 3369 page_t *done_pplist = NULL;
3372 3370 pgcnt_t pgidx = 0;
3373 3371 page_t *pp;
3374 3372 page_t *newpp;
3375 3373 page_t *targpp;
3376 3374 int io_err = 0;
3377 3375 int i;
3378 3376 pfn_t pfn;
3379 3377 ulong_t ppages;
3380 3378 page_t *targ_pplist = NULL;
3381 3379 page_t *repl_pplist = NULL;
3382 3380 page_t *tmp_pplist;
3383 3381 int nios = 0;
3384 3382 uint_t pszc;
3385 3383 struct vattr va;
3386 3384
3387 3385 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3388 3386
3389 3387 ASSERT(szc != 0);
3390 3388 ASSERT(pplist->p_szc == szc);
3391 3389
3392 3390 /*
3393 3391 * downsize will be set to 1 only if we fail to lock pages. this will
3394 3392 * allow subsequent faults to try to relocate the page again. If we
3395 3393 * fail due to misalignment don't downsize and let the caller map the
3396 3394 * whole region with small mappings to avoid more faults into the area
3397 3395 * where we can't get large pages anyway.
3398 3396 */
3399 3397 *downsize = 0;
3400 3398
3401 3399 while (off < eoff) {
3402 3400 newpp = pplist;
3403 3401 ASSERT(newpp != NULL);
3404 3402 ASSERT(PAGE_EXCL(newpp));
3405 3403 ASSERT(!PP_ISFREE(newpp));
3406 3404 /*
3407 3405 * we pass NULL for nrelocp to page_lookup_create()
3408 3406 * so that it doesn't relocate. We relocate here
3409 3407 * later only after we make sure we can lock all
3410 3408 * pages in the range we handle and they are all
3411 3409 * aligned.
3412 3410 */
3413 3411 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3414 3412 ASSERT(pp != NULL);
3415 3413 ASSERT(!PP_ISFREE(pp));
3416 3414 ASSERT(pp->p_vnode == vp);
3417 3415 ASSERT(pp->p_offset == off);
3418 3416 if (pp == newpp) {
3419 3417 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3420 3418 page_sub(&pplist, pp);
3421 3419 ASSERT(PAGE_EXCL(pp));
3422 3420 ASSERT(page_iolock_assert(pp));
3423 3421 page_list_concat(&io_pplist, &pp);
3424 3422 off += PAGESIZE;
3425 3423 continue;
3426 3424 }
3427 3425 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3428 3426 pfn = page_pptonum(pp);
3429 3427 pszc = pp->p_szc;
3430 3428 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3431 3429 IS_P2ALIGNED(pfn, pages)) {
3432 3430 ASSERT(repl_pplist == NULL);
3433 3431 ASSERT(done_pplist == NULL);
3434 3432 ASSERT(pplist == *ppplist);
3435 3433 page_unlock(pp);
3436 3434 page_free_replacement_page(pplist);
3437 3435 page_create_putback(pages);
3438 3436 *ppplist = NULL;
3439 3437 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3440 3438 return (1);
3441 3439 }
3442 3440 if (pszc >= szc) {
3443 3441 page_unlock(pp);
3444 3442 segvn_faultvnmpss_align_err1++;
3445 3443 goto out;
3446 3444 }
3447 3445 ppages = page_get_pagecnt(pszc);
3448 3446 if (!IS_P2ALIGNED(pfn, ppages)) {
3449 3447 ASSERT(pszc > 0);
3450 3448 /*
3451 3449 * sizing down to pszc won't help.
3452 3450 */
3453 3451 page_unlock(pp);
3454 3452 segvn_faultvnmpss_align_err2++;
3455 3453 goto out;
3456 3454 }
3457 3455 pfn = page_pptonum(newpp);
3458 3456 if (!IS_P2ALIGNED(pfn, ppages)) {
3459 3457 ASSERT(pszc > 0);
3460 3458 /*
3461 3459 * sizing down to pszc won't help.
3462 3460 */
3463 3461 page_unlock(pp);
3464 3462 segvn_faultvnmpss_align_err3++;
3465 3463 goto out;
3466 3464 }
3467 3465 if (!PAGE_EXCL(pp)) {
3468 3466 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3469 3467 page_unlock(pp);
3470 3468 *downsize = 1;
3471 3469 *ret_pszc = pp->p_szc;
3472 3470 goto out;
3473 3471 }
3474 3472 targpp = pp;
3475 3473 if (io_pplist != NULL) {
3476 3474 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3477 3475 io_len = off - io_off;
3478 3476 /*
3479 3477 * Some file systems like NFS don't check EOF
3480 3478 * conditions in VOP_PAGEIO(). Check it here
3481 3479 * now that pages are locked SE_EXCL. Any file
3482 3480 * truncation will wait until the pages are
3483 3481 * unlocked so no need to worry that file will
3484 3482 * be truncated after we check its size here.
3485 3483 * XXX fix NFS to remove this check.
3486 3484 */
3487 3485 va.va_mask = AT_SIZE;
3488 3486 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3489 3487 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3490 3488 page_unlock(targpp);
3491 3489 goto out;
3492 3490 }
3493 3491 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3494 3492 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3495 3493 *downsize = 1;
3496 3494 *ret_pszc = 0;
3497 3495 page_unlock(targpp);
3498 3496 goto out;
3499 3497 }
3500 3498 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3501 3499 B_READ, svd->cred, NULL);
3502 3500 if (io_err) {
3503 3501 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3504 3502 page_unlock(targpp);
3505 3503 if (io_err == EDEADLK) {
3506 3504 segvn_vmpss_pageio_deadlk_err++;
3507 3505 }
3508 3506 goto out;
3509 3507 }
3510 3508 nios++;
3511 3509 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3512 3510 while (io_pplist != NULL) {
3513 3511 pp = io_pplist;
3514 3512 page_sub(&io_pplist, pp);
3515 3513 ASSERT(page_iolock_assert(pp));
3516 3514 page_io_unlock(pp);
3517 3515 pgidx = (pp->p_offset - start_off) >>
3518 3516 PAGESHIFT;
3519 3517 ASSERT(pgidx < pages);
3520 3518 ppa[pgidx] = pp;
3521 3519 page_list_concat(&done_pplist, &pp);
3522 3520 }
3523 3521 }
3524 3522 pp = targpp;
3525 3523 ASSERT(PAGE_EXCL(pp));
3526 3524 ASSERT(pp->p_szc <= pszc);
3527 3525 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3528 3526 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3529 3527 page_unlock(pp);
3530 3528 *downsize = 1;
3531 3529 *ret_pszc = pp->p_szc;
3532 3530 goto out;
3533 3531 }
3534 3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3535 3533 /*
3536 3534 * page szc chould have changed before the entire group was
3537 3535 * locked. reread page szc.
3538 3536 */
3539 3537 pszc = pp->p_szc;
3540 3538 ppages = page_get_pagecnt(pszc);
3541 3539
3542 3540 /* link just the roots */
3543 3541 page_list_concat(&targ_pplist, &pp);
3544 3542 page_sub(&pplist, newpp);
3545 3543 page_list_concat(&repl_pplist, &newpp);
3546 3544 off += PAGESIZE;
3547 3545 while (--ppages != 0) {
3548 3546 newpp = pplist;
3549 3547 page_sub(&pplist, newpp);
3550 3548 off += PAGESIZE;
3551 3549 }
3552 3550 io_off = off;
3553 3551 }
3554 3552 if (io_pplist != NULL) {
3555 3553 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3556 3554 io_len = eoff - io_off;
3557 3555 va.va_mask = AT_SIZE;
3558 3556 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3559 3557 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3560 3558 goto out;
3561 3559 }
3562 3560 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3563 3561 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3564 3562 *downsize = 1;
3565 3563 *ret_pszc = 0;
3566 3564 goto out;
3567 3565 }
3568 3566 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3569 3567 B_READ, svd->cred, NULL);
3570 3568 if (io_err) {
3571 3569 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3572 3570 if (io_err == EDEADLK) {
3573 3571 segvn_vmpss_pageio_deadlk_err++;
3574 3572 }
3575 3573 goto out;
3576 3574 }
3577 3575 nios++;
3578 3576 while (io_pplist != NULL) {
3579 3577 pp = io_pplist;
3580 3578 page_sub(&io_pplist, pp);
3581 3579 ASSERT(page_iolock_assert(pp));
3582 3580 page_io_unlock(pp);
3583 3581 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3584 3582 ASSERT(pgidx < pages);
3585 3583 ppa[pgidx] = pp;
3586 3584 }
3587 3585 }
3588 3586 /*
3589 3587 * we're now bound to succeed or panic.
3590 3588 * remove pages from done_pplist. it's not needed anymore.
3591 3589 */
3592 3590 while (done_pplist != NULL) {
3593 3591 pp = done_pplist;
3594 3592 page_sub(&done_pplist, pp);
3595 3593 }
3596 3594 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3597 3595 ASSERT(pplist == NULL);
3598 3596 *ppplist = NULL;
3599 3597 while (targ_pplist != NULL) {
3600 3598 int ret;
3601 3599 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3602 3600 ASSERT(repl_pplist);
3603 3601 pp = targ_pplist;
3604 3602 page_sub(&targ_pplist, pp);
3605 3603 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3606 3604 newpp = repl_pplist;
3607 3605 page_sub(&repl_pplist, newpp);
3608 3606 #ifdef DEBUG
3609 3607 pfn = page_pptonum(pp);
3610 3608 pszc = pp->p_szc;
3611 3609 ppages = page_get_pagecnt(pszc);
3612 3610 ASSERT(IS_P2ALIGNED(pfn, ppages));
3613 3611 pfn = page_pptonum(newpp);
3614 3612 ASSERT(IS_P2ALIGNED(pfn, ppages));
3615 3613 ASSERT(P2PHASE(pfn, pages) == pgidx);
3616 3614 #endif
3617 3615 nreloc = 0;
3618 3616 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3619 3617 if (ret != 0 || nreloc == 0) {
3620 3618 panic("segvn_fill_vp_pages: "
3621 3619 "page_relocate failed");
3622 3620 }
3623 3621 pp = newpp;
3624 3622 while (nreloc-- != 0) {
3625 3623 ASSERT(PAGE_EXCL(pp));
3626 3624 ASSERT(pp->p_vnode == vp);
3627 3625 ASSERT(pgidx ==
3628 3626 ((pp->p_offset - start_off) >> PAGESHIFT));
3629 3627 ppa[pgidx++] = pp;
3630 3628 pp++;
3631 3629 }
3632 3630 }
3633 3631
3634 3632 if (svd->type == MAP_PRIVATE) {
3635 3633 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3636 3634 for (i = 0; i < pages; i++) {
3637 3635 ASSERT(ppa[i] != NULL);
3638 3636 ASSERT(PAGE_EXCL(ppa[i]));
3639 3637 ASSERT(ppa[i]->p_vnode == vp);
3640 3638 ASSERT(ppa[i]->p_offset ==
3641 3639 start_off + (i << PAGESHIFT));
3642 3640 page_downgrade(ppa[i]);
3643 3641 }
3644 3642 ppa[pages] = NULL;
3645 3643 } else {
3646 3644 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3647 3645 /*
3648 3646 * the caller will still call VOP_GETPAGE() for shared segments
3649 3647 * to check FS write permissions. For private segments we map
3650 3648 * file read only anyway. so no VOP_GETPAGE is needed.
3651 3649 */
3652 3650 for (i = 0; i < pages; i++) {
3653 3651 ASSERT(ppa[i] != NULL);
3654 3652 ASSERT(PAGE_EXCL(ppa[i]));
3655 3653 ASSERT(ppa[i]->p_vnode == vp);
3656 3654 ASSERT(ppa[i]->p_offset ==
3657 3655 start_off + (i << PAGESHIFT));
3658 3656 page_unlock(ppa[i]);
3659 3657 }
3660 3658 ppa[0] = NULL;
3661 3659 }
3662 3660
3663 3661 return (1);
3664 3662 out:
3665 3663 /*
3666 3664 * Do the cleanup. Unlock target pages we didn't relocate. They are
3667 3665 * linked on targ_pplist by root pages. reassemble unused replacement
3668 3666 * and io pages back to pplist.
3669 3667 */
3670 3668 if (io_pplist != NULL) {
3671 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3672 3670 pp = io_pplist;
3673 3671 do {
3674 3672 ASSERT(pp->p_vnode == vp);
3675 3673 ASSERT(pp->p_offset == io_off);
3676 3674 ASSERT(page_iolock_assert(pp));
3677 3675 page_io_unlock(pp);
3678 3676 page_hashout(pp, NULL);
3679 3677 io_off += PAGESIZE;
3680 3678 } while ((pp = pp->p_next) != io_pplist);
3681 3679 page_list_concat(&io_pplist, &pplist);
3682 3680 pplist = io_pplist;
3683 3681 }
3684 3682 tmp_pplist = NULL;
3685 3683 while (targ_pplist != NULL) {
3686 3684 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3687 3685 pp = targ_pplist;
3688 3686 ASSERT(PAGE_EXCL(pp));
3689 3687 page_sub(&targ_pplist, pp);
3690 3688
3691 3689 pszc = pp->p_szc;
3692 3690 ppages = page_get_pagecnt(pszc);
3693 3691 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3694 3692
3695 3693 if (pszc != 0) {
3696 3694 group_page_unlock(pp);
3697 3695 }
3698 3696 page_unlock(pp);
3699 3697
3700 3698 pp = repl_pplist;
3701 3699 ASSERT(pp != NULL);
3702 3700 ASSERT(PAGE_EXCL(pp));
3703 3701 ASSERT(pp->p_szc == szc);
3704 3702 page_sub(&repl_pplist, pp);
3705 3703
3706 3704 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3707 3705
3708 3706 /* relink replacement page */
3709 3707 page_list_concat(&tmp_pplist, &pp);
3710 3708 while (--ppages != 0) {
3711 3709 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3712 3710 pp++;
3713 3711 ASSERT(PAGE_EXCL(pp));
3714 3712 ASSERT(pp->p_szc == szc);
3715 3713 page_list_concat(&tmp_pplist, &pp);
3716 3714 }
3717 3715 }
3718 3716 if (tmp_pplist != NULL) {
3719 3717 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3720 3718 page_list_concat(&tmp_pplist, &pplist);
3721 3719 pplist = tmp_pplist;
3722 3720 }
3723 3721 /*
3724 3722 * at this point all pages are either on done_pplist or
3725 3723 * pplist. They can't be all on done_pplist otherwise
3726 3724 * we'd've been done.
3727 3725 */
3728 3726 ASSERT(pplist != NULL);
3729 3727 if (nios != 0) {
3730 3728 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3731 3729 pp = pplist;
3732 3730 do {
3733 3731 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3734 3732 ASSERT(pp->p_szc == szc);
3735 3733 ASSERT(PAGE_EXCL(pp));
3736 3734 ASSERT(pp->p_vnode != vp);
3737 3735 pp->p_szc = 0;
3738 3736 } while ((pp = pp->p_next) != pplist);
3739 3737
3740 3738 pp = done_pplist;
3741 3739 do {
3742 3740 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3743 3741 ASSERT(pp->p_szc == szc);
3744 3742 ASSERT(PAGE_EXCL(pp));
3745 3743 ASSERT(pp->p_vnode == vp);
3746 3744 pp->p_szc = 0;
3747 3745 } while ((pp = pp->p_next) != done_pplist);
3748 3746
3749 3747 while (pplist != NULL) {
3750 3748 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3751 3749 pp = pplist;
3752 3750 page_sub(&pplist, pp);
3753 3751 page_free(pp, 0);
3754 3752 }
3755 3753
3756 3754 while (done_pplist != NULL) {
3757 3755 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3758 3756 pp = done_pplist;
3759 3757 page_sub(&done_pplist, pp);
3760 3758 page_unlock(pp);
3761 3759 }
3762 3760 *ppplist = NULL;
3763 3761 return (0);
3764 3762 }
3765 3763 ASSERT(pplist == *ppplist);
3766 3764 if (io_err) {
3767 3765 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3768 3766 /*
3769 3767 * don't downsize on io error.
3770 3768 * see if vop_getpage succeeds.
3771 3769 * pplist may still be used in this case
3772 3770 * for relocations.
3773 3771 */
3774 3772 return (0);
3775 3773 }
3776 3774 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3777 3775 page_free_replacement_page(pplist);
3778 3776 page_create_putback(pages);
3779 3777 *ppplist = NULL;
3780 3778 return (0);
3781 3779 }
3782 3780
3783 3781 int segvn_anypgsz = 0;
3784 3782
3785 3783 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3786 3784 if ((type) == F_SOFTLOCK) { \
3787 3785 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3788 3786 -(pages)); \
3789 3787 }
3790 3788
3791 3789 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3792 3790 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3793 3791 if ((rw) == S_WRITE) { \
3794 3792 for (i = 0; i < (pages); i++) { \
3795 3793 ASSERT((ppa)[i]->p_vnode == \
3796 3794 (ppa)[0]->p_vnode); \
3797 3795 hat_setmod((ppa)[i]); \
3798 3796 } \
3799 3797 } else if ((rw) != S_OTHER && \
3800 3798 ((prot) & (vpprot) & PROT_WRITE)) { \
3801 3799 for (i = 0; i < (pages); i++) { \
3802 3800 ASSERT((ppa)[i]->p_vnode == \
3803 3801 (ppa)[0]->p_vnode); \
3804 3802 if (!hat_ismod((ppa)[i])) { \
3805 3803 prot &= ~PROT_WRITE; \
3806 3804 break; \
3807 3805 } \
3808 3806 } \
3809 3807 } \
3810 3808 }
3811 3809
3812 3810 #ifdef VM_STATS
3813 3811
3814 3812 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3815 3813 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3816 3814
3817 3815 #else /* VM_STATS */
3818 3816
3819 3817 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3820 3818
3821 3819 #endif
3822 3820
3823 3821 static faultcode_t
3824 3822 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3825 3823 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3826 3824 caddr_t eaddr, int brkcow)
3827 3825 {
3828 3826 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3829 3827 struct anon_map *amp = svd->amp;
3830 3828 uchar_t segtype = svd->type;
3831 3829 uint_t szc = seg->s_szc;
3832 3830 size_t pgsz = page_get_pagesize(szc);
3833 3831 size_t maxpgsz = pgsz;
3834 3832 pgcnt_t pages = btop(pgsz);
3835 3833 pgcnt_t maxpages = pages;
3836 3834 size_t ppasize = (pages + 1) * sizeof (page_t *);
3837 3835 caddr_t a = lpgaddr;
3838 3836 caddr_t maxlpgeaddr = lpgeaddr;
3839 3837 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3840 3838 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3841 3839 struct vpage *vpage = (svd->vpage != NULL) ?
3842 3840 &svd->vpage[seg_page(seg, a)] : NULL;
3843 3841 vnode_t *vp = svd->vp;
3844 3842 page_t **ppa;
3845 3843 uint_t pszc;
3846 3844 size_t ppgsz;
3847 3845 pgcnt_t ppages;
3848 3846 faultcode_t err = 0;
3849 3847 int ierr;
3850 3848 int vop_size_err = 0;
3851 3849 uint_t protchk, prot, vpprot;
3852 3850 ulong_t i;
3853 3851 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3854 3852 anon_sync_obj_t an_cookie;
3855 3853 enum seg_rw arw;
3856 3854 int alloc_failed = 0;
3857 3855 int adjszc_chk;
3858 3856 struct vattr va;
3859 3857 int xhat = 0;
3860 3858 page_t *pplist;
3861 3859 pfn_t pfn;
3862 3860 int physcontig;
3863 3861 int upgrdfail;
3864 3862 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3865 3863 int tron = (svd->tr_state == SEGVN_TR_ON);
3866 3864
3867 3865 ASSERT(szc != 0);
3868 3866 ASSERT(vp != NULL);
3869 3867 ASSERT(brkcow == 0 || amp != NULL);
3870 3868 ASSERT(tron == 0 || amp != NULL);
3871 3869 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3872 3870 ASSERT(!(svd->flags & MAP_NORESERVE));
3873 3871 ASSERT(type != F_SOFTUNLOCK);
3874 3872 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3875 3873 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3876 3874 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3877 3875 ASSERT(seg->s_szc < NBBY * sizeof (int));
3878 3876 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3879 3877 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3880 3878
3881 3879 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3882 3880 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3883 3881
3884 3882 if (svd->flags & MAP_TEXT) {
3885 3883 hat_flag |= HAT_LOAD_TEXT;
3886 3884 }
3887 3885
3888 3886 if (svd->pageprot) {
3889 3887 switch (rw) {
3890 3888 case S_READ:
3891 3889 protchk = PROT_READ;
3892 3890 break;
3893 3891 case S_WRITE:
3894 3892 protchk = PROT_WRITE;
3895 3893 break;
3896 3894 case S_EXEC:
3897 3895 protchk = PROT_EXEC;
3898 3896 break;
3899 3897 case S_OTHER:
3900 3898 default:
3901 3899 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3902 3900 break;
3903 3901 }
3904 3902 } else {
3905 3903 prot = svd->prot;
3906 3904 /* caller has already done segment level protection check. */
3907 3905 }
3908 3906
3909 3907 if (seg->s_as->a_hat != hat) {
3910 3908 xhat = 1;
3911 3909 }
3912 3910
3913 3911 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3914 3912 SEGVN_VMSTAT_FLTVNPAGES(2);
3915 3913 arw = S_READ;
3916 3914 } else {
3917 3915 arw = rw;
3918 3916 }
3919 3917
3920 3918 ppa = kmem_alloc(ppasize, KM_SLEEP);
3921 3919
3922 3920 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3923 3921
3924 3922 for (;;) {
3925 3923 adjszc_chk = 0;
3926 3924 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3927 3925 if (adjszc_chk) {
3928 3926 while (szc < seg->s_szc) {
3929 3927 uintptr_t e;
3930 3928 uint_t tszc;
3931 3929 tszc = segvn_anypgsz_vnode ? szc + 1 :
3932 3930 seg->s_szc;
3933 3931 ppgsz = page_get_pagesize(tszc);
3934 3932 if (!IS_P2ALIGNED(a, ppgsz) ||
3935 3933 ((alloc_failed >> tszc) & 0x1)) {
3936 3934 break;
3937 3935 }
3938 3936 SEGVN_VMSTAT_FLTVNPAGES(4);
3939 3937 szc = tszc;
3940 3938 pgsz = ppgsz;
3941 3939 pages = btop(pgsz);
3942 3940 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3943 3941 lpgeaddr = (caddr_t)e;
3944 3942 }
3945 3943 }
3946 3944
3947 3945 again:
3948 3946 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3949 3947 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3950 3948 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3951 3949 anon_array_enter(amp, aindx, &an_cookie);
3952 3950 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3953 3951 SEGVN_VMSTAT_FLTVNPAGES(5);
3954 3952 ASSERT(anon_pages(amp->ahp, aindx,
3955 3953 maxpages) == maxpages);
3956 3954 anon_array_exit(&an_cookie);
3957 3955 ANON_LOCK_EXIT(&->a_rwlock);
3958 3956 err = segvn_fault_anonpages(hat, seg,
3959 3957 a, a + maxpgsz, type, rw,
3960 3958 MAX(a, addr),
3961 3959 MIN(a + maxpgsz, eaddr), brkcow);
3962 3960 if (err != 0) {
3963 3961 SEGVN_VMSTAT_FLTVNPAGES(6);
3964 3962 goto out;
3965 3963 }
3966 3964 if (szc < seg->s_szc) {
3967 3965 szc = seg->s_szc;
3968 3966 pgsz = maxpgsz;
3969 3967 pages = maxpages;
3970 3968 lpgeaddr = maxlpgeaddr;
3971 3969 }
3972 3970 goto next;
3973 3971 } else {
3974 3972 ASSERT(anon_pages(amp->ahp, aindx,
3975 3973 maxpages) == 0);
3976 3974 SEGVN_VMSTAT_FLTVNPAGES(7);
3977 3975 anon_array_exit(&an_cookie);
3978 3976 ANON_LOCK_EXIT(&->a_rwlock);
3979 3977 }
3980 3978 }
3981 3979 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3982 3980 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3983 3981
3984 3982 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3985 3983 ASSERT(vpage != NULL);
3986 3984 prot = VPP_PROT(vpage);
3987 3985 ASSERT(sameprot(seg, a, maxpgsz));
3988 3986 if ((prot & protchk) == 0) {
3989 3987 SEGVN_VMSTAT_FLTVNPAGES(8);
3990 3988 err = FC_PROT;
3991 3989 goto out;
3992 3990 }
3993 3991 }
3994 3992 if (type == F_SOFTLOCK) {
3995 3993 atomic_add_long((ulong_t *)&svd->softlockcnt,
3996 3994 pages);
3997 3995 }
3998 3996
3999 3997 pplist = NULL;
4000 3998 physcontig = 0;
4001 3999 ppa[0] = NULL;
4002 4000 if (!brkcow && !tron && szc &&
4003 4001 !page_exists_physcontig(vp, off, szc,
4004 4002 segtype == MAP_PRIVATE ? ppa : NULL)) {
4005 4003 SEGVN_VMSTAT_FLTVNPAGES(9);
4006 4004 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
4007 4005 szc, 0, 0) && type != F_SOFTLOCK) {
4008 4006 SEGVN_VMSTAT_FLTVNPAGES(10);
4009 4007 pszc = 0;
4010 4008 ierr = -1;
4011 4009 alloc_failed |= (1 << szc);
4012 4010 break;
4013 4011 }
4014 4012 if (pplist != NULL &&
4015 4013 vp->v_mpssdata == SEGVN_PAGEIO) {
4016 4014 int downsize;
4017 4015 SEGVN_VMSTAT_FLTVNPAGES(11);
4018 4016 physcontig = segvn_fill_vp_pages(svd,
4019 4017 vp, off, szc, ppa, &pplist,
4020 4018 &pszc, &downsize);
4021 4019 ASSERT(!physcontig || pplist == NULL);
4022 4020 if (!physcontig && downsize &&
4023 4021 type != F_SOFTLOCK) {
4024 4022 ASSERT(pplist == NULL);
4025 4023 SEGVN_VMSTAT_FLTVNPAGES(12);
4026 4024 ierr = -1;
4027 4025 break;
4028 4026 }
4029 4027 ASSERT(!physcontig ||
4030 4028 segtype == MAP_PRIVATE ||
4031 4029 ppa[0] == NULL);
4032 4030 if (physcontig && ppa[0] == NULL) {
4033 4031 physcontig = 0;
4034 4032 }
4035 4033 }
4036 4034 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4037 4035 SEGVN_VMSTAT_FLTVNPAGES(13);
4038 4036 ASSERT(segtype == MAP_PRIVATE);
4039 4037 physcontig = 1;
4040 4038 }
4041 4039
4042 4040 if (!physcontig) {
4043 4041 SEGVN_VMSTAT_FLTVNPAGES(14);
4044 4042 ppa[0] = NULL;
4045 4043 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4046 4044 &vpprot, ppa, pgsz, seg, a, arw,
4047 4045 svd->cred, NULL);
4048 4046 #ifdef DEBUG
4049 4047 if (ierr == 0) {
4050 4048 for (i = 0; i < pages; i++) {
4051 4049 ASSERT(PAGE_LOCKED(ppa[i]));
4052 4050 ASSERT(!PP_ISFREE(ppa[i]));
4053 4051 ASSERT(ppa[i]->p_vnode == vp);
4054 4052 ASSERT(ppa[i]->p_offset ==
4055 4053 off + (i << PAGESHIFT));
4056 4054 }
4057 4055 }
4058 4056 #endif /* DEBUG */
4059 4057 if (segtype == MAP_PRIVATE) {
4060 4058 SEGVN_VMSTAT_FLTVNPAGES(15);
4061 4059 vpprot &= ~PROT_WRITE;
4062 4060 }
4063 4061 } else {
4064 4062 ASSERT(segtype == MAP_PRIVATE);
4065 4063 SEGVN_VMSTAT_FLTVNPAGES(16);
4066 4064 vpprot = PROT_ALL & ~PROT_WRITE;
4067 4065 ierr = 0;
4068 4066 }
4069 4067
4070 4068 if (ierr != 0) {
4071 4069 SEGVN_VMSTAT_FLTVNPAGES(17);
4072 4070 if (pplist != NULL) {
4073 4071 SEGVN_VMSTAT_FLTVNPAGES(18);
4074 4072 page_free_replacement_page(pplist);
4075 4073 page_create_putback(pages);
4076 4074 }
4077 4075 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4078 4076 if (a + pgsz <= eaddr) {
4079 4077 SEGVN_VMSTAT_FLTVNPAGES(19);
4080 4078 err = FC_MAKE_ERR(ierr);
4081 4079 goto out;
4082 4080 }
4083 4081 va.va_mask = AT_SIZE;
4084 4082 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4085 4083 SEGVN_VMSTAT_FLTVNPAGES(20);
4086 4084 err = FC_MAKE_ERR(EIO);
4087 4085 goto out;
4088 4086 }
4089 4087 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4090 4088 SEGVN_VMSTAT_FLTVNPAGES(21);
4091 4089 err = FC_MAKE_ERR(ierr);
4092 4090 goto out;
4093 4091 }
4094 4092 if (btopr(va.va_size) <
4095 4093 btopr(off + (eaddr - a))) {
4096 4094 SEGVN_VMSTAT_FLTVNPAGES(22);
4097 4095 err = FC_MAKE_ERR(ierr);
4098 4096 goto out;
4099 4097 }
4100 4098 if (brkcow || tron || type == F_SOFTLOCK) {
4101 4099 /* can't reduce map area */
4102 4100 SEGVN_VMSTAT_FLTVNPAGES(23);
4103 4101 vop_size_err = 1;
4104 4102 goto out;
4105 4103 }
4106 4104 SEGVN_VMSTAT_FLTVNPAGES(24);
4107 4105 ASSERT(szc != 0);
4108 4106 pszc = 0;
4109 4107 ierr = -1;
4110 4108 break;
4111 4109 }
4112 4110
4113 4111 if (amp != NULL) {
4114 4112 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4115 4113 anon_array_enter(amp, aindx, &an_cookie);
4116 4114 }
4117 4115 if (amp != NULL &&
4118 4116 anon_get_ptr(amp->ahp, aindx) != NULL) {
4119 4117 ulong_t taindx = P2ALIGN(aindx, maxpages);
4120 4118
4121 4119 SEGVN_VMSTAT_FLTVNPAGES(25);
4122 4120 ASSERT(anon_pages(amp->ahp, taindx,
4123 4121 maxpages) == maxpages);
4124 4122 for (i = 0; i < pages; i++) {
4125 4123 page_unlock(ppa[i]);
4126 4124 }
4127 4125 anon_array_exit(&an_cookie);
4128 4126 ANON_LOCK_EXIT(&->a_rwlock);
4129 4127 if (pplist != NULL) {
4130 4128 page_free_replacement_page(pplist);
4131 4129 page_create_putback(pages);
4132 4130 }
4133 4131 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4134 4132 if (szc < seg->s_szc) {
4135 4133 SEGVN_VMSTAT_FLTVNPAGES(26);
4136 4134 /*
4137 4135 * For private segments SOFTLOCK
4138 4136 * either always breaks cow (any rw
4139 4137 * type except S_READ_NOCOW) or
4140 4138 * address space is locked as writer
4141 4139 * (S_READ_NOCOW case) and anon slots
4142 4140 * can't show up on second check.
4143 4141 * Therefore if we are here for
4144 4142 * SOFTLOCK case it must be a cow
4145 4143 * break but cow break never reduces
4146 4144 * szc. text replication (tron) in
4147 4145 * this case works as cow break.
4148 4146 * Thus the assert below.
4149 4147 */
4150 4148 ASSERT(!brkcow && !tron &&
4151 4149 type != F_SOFTLOCK);
4152 4150 pszc = seg->s_szc;
4153 4151 ierr = -2;
4154 4152 break;
4155 4153 }
4156 4154 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4157 4155 goto again;
4158 4156 }
4159 4157 #ifdef DEBUG
4160 4158 if (amp != NULL) {
4161 4159 ulong_t taindx = P2ALIGN(aindx, maxpages);
4162 4160 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4163 4161 }
4164 4162 #endif /* DEBUG */
4165 4163
4166 4164 if (brkcow || tron) {
4167 4165 ASSERT(amp != NULL);
4168 4166 ASSERT(pplist == NULL);
4169 4167 ASSERT(szc == seg->s_szc);
4170 4168 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4171 4169 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4172 4170 SEGVN_VMSTAT_FLTVNPAGES(27);
4173 4171 ierr = anon_map_privatepages(amp, aindx, szc,
4174 4172 seg, a, prot, ppa, vpage, segvn_anypgsz,
4175 4173 tron ? PG_LOCAL : 0, svd->cred);
4176 4174 if (ierr != 0) {
4177 4175 SEGVN_VMSTAT_FLTVNPAGES(28);
4178 4176 anon_array_exit(&an_cookie);
4179 4177 ANON_LOCK_EXIT(&->a_rwlock);
4180 4178 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4181 4179 err = FC_MAKE_ERR(ierr);
4182 4180 goto out;
4183 4181 }
4184 4182
4185 4183 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4186 4184 /*
4187 4185 * p_szc can't be changed for locked
4188 4186 * swapfs pages.
4189 4187 */
4190 4188 ASSERT(svd->rcookie ==
4191 4189 HAT_INVALID_REGION_COOKIE);
4192 4190 hat_memload_array(hat, a, pgsz, ppa, prot,
4193 4191 hat_flag);
4194 4192
4195 4193 if (!(hat_flag & HAT_LOAD_LOCK)) {
4196 4194 SEGVN_VMSTAT_FLTVNPAGES(29);
4197 4195 for (i = 0; i < pages; i++) {
4198 4196 page_unlock(ppa[i]);
4199 4197 }
4200 4198 }
4201 4199 anon_array_exit(&an_cookie);
4202 4200 ANON_LOCK_EXIT(&->a_rwlock);
4203 4201 goto next;
4204 4202 }
4205 4203
4206 4204 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4207 4205 (!svd->pageprot && svd->prot == (prot & vpprot)));
4208 4206
4209 4207 pfn = page_pptonum(ppa[0]);
4210 4208 /*
4211 4209 * hat_page_demote() needs an SE_EXCL lock on one of
4212 4210 * constituent page_t's and it decreases root's p_szc
4213 4211 * last. This means if root's p_szc is equal szc and
4214 4212 * all its constituent pages are locked
4215 4213 * hat_page_demote() that could have changed p_szc to
4216 4214 * szc is already done and no new have page_demote()
4217 4215 * can start for this large page.
4218 4216 */
4219 4217
4220 4218 /*
4221 4219 * we need to make sure same mapping size is used for
4222 4220 * the same address range if there's a possibility the
4223 4221 * adddress is already mapped because hat layer panics
4224 4222 * when translation is loaded for the range already
4225 4223 * mapped with a different page size. We achieve it
4226 4224 * by always using largest page size possible subject
4227 4225 * to the constraints of page size, segment page size
4228 4226 * and page alignment. Since mappings are invalidated
4229 4227 * when those constraints change and make it
4230 4228 * impossible to use previously used mapping size no
4231 4229 * mapping size conflicts should happen.
4232 4230 */
4233 4231
4234 4232 chkszc:
4235 4233 if ((pszc = ppa[0]->p_szc) == szc &&
4236 4234 IS_P2ALIGNED(pfn, pages)) {
4237 4235
4238 4236 SEGVN_VMSTAT_FLTVNPAGES(30);
4239 4237 #ifdef DEBUG
4240 4238 for (i = 0; i < pages; i++) {
4241 4239 ASSERT(PAGE_LOCKED(ppa[i]));
4242 4240 ASSERT(!PP_ISFREE(ppa[i]));
4243 4241 ASSERT(page_pptonum(ppa[i]) ==
4244 4242 pfn + i);
4245 4243 ASSERT(ppa[i]->p_szc == szc);
4246 4244 ASSERT(ppa[i]->p_vnode == vp);
4247 4245 ASSERT(ppa[i]->p_offset ==
4248 4246 off + (i << PAGESHIFT));
4249 4247 }
4250 4248 #endif /* DEBUG */
4251 4249 /*
4252 4250 * All pages are of szc we need and they are
4253 4251 * all locked so they can't change szc. load
4254 4252 * translations.
4255 4253 *
4256 4254 * if page got promoted since last check
4257 4255 * we don't need pplist.
4258 4256 */
4259 4257 if (pplist != NULL) {
4260 4258 page_free_replacement_page(pplist);
4261 4259 page_create_putback(pages);
4262 4260 }
4263 4261 if (PP_ISMIGRATE(ppa[0])) {
4264 4262 page_migrate(seg, a, ppa, pages);
4265 4263 }
4266 4264 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4267 4265 prot, vpprot);
4268 4266 if (!xhat) {
4269 4267 hat_memload_array_region(hat, a, pgsz,
4270 4268 ppa, prot & vpprot, hat_flag,
4271 4269 svd->rcookie);
4272 4270 } else {
4273 4271 /*
4274 4272 * avoid large xhat mappings to FS
4275 4273 * pages so that hat_page_demote()
4276 4274 * doesn't need to check for xhat
4277 4275 * large mappings.
4278 4276 * Don't use regions with xhats.
4279 4277 */
4280 4278 for (i = 0; i < pages; i++) {
4281 4279 hat_memload(hat,
4282 4280 a + (i << PAGESHIFT),
4283 4281 ppa[i], prot & vpprot,
4284 4282 hat_flag);
4285 4283 }
4286 4284 }
4287 4285
4288 4286 if (!(hat_flag & HAT_LOAD_LOCK)) {
4289 4287 for (i = 0; i < pages; i++) {
4290 4288 page_unlock(ppa[i]);
4291 4289 }
4292 4290 }
4293 4291 if (amp != NULL) {
4294 4292 anon_array_exit(&an_cookie);
4295 4293 ANON_LOCK_EXIT(&->a_rwlock);
4296 4294 }
4297 4295 goto next;
4298 4296 }
4299 4297
4300 4298 /*
4301 4299 * See if upsize is possible.
4302 4300 */
4303 4301 if (pszc > szc && szc < seg->s_szc &&
4304 4302 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4305 4303 pgcnt_t aphase;
4306 4304 uint_t pszc1 = MIN(pszc, seg->s_szc);
4307 4305 ppgsz = page_get_pagesize(pszc1);
4308 4306 ppages = btop(ppgsz);
4309 4307 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4310 4308
4311 4309 ASSERT(type != F_SOFTLOCK);
4312 4310
4313 4311 SEGVN_VMSTAT_FLTVNPAGES(31);
4314 4312 if (aphase != P2PHASE(pfn, ppages)) {
4315 4313 segvn_faultvnmpss_align_err4++;
4316 4314 } else {
4317 4315 SEGVN_VMSTAT_FLTVNPAGES(32);
4318 4316 if (pplist != NULL) {
4319 4317 page_t *pl = pplist;
4320 4318 page_free_replacement_page(pl);
4321 4319 page_create_putback(pages);
4322 4320 }
4323 4321 for (i = 0; i < pages; i++) {
4324 4322 page_unlock(ppa[i]);
4325 4323 }
4326 4324 if (amp != NULL) {
4327 4325 anon_array_exit(&an_cookie);
4328 4326 ANON_LOCK_EXIT(&->a_rwlock);
4329 4327 }
4330 4328 pszc = pszc1;
4331 4329 ierr = -2;
4332 4330 break;
4333 4331 }
4334 4332 }
4335 4333
4336 4334 /*
4337 4335 * check if we should use smallest mapping size.
4338 4336 */
4339 4337 upgrdfail = 0;
4340 4338 if (szc == 0 || xhat ||
4341 4339 (pszc >= szc &&
4342 4340 !IS_P2ALIGNED(pfn, pages)) ||
4343 4341 (pszc < szc &&
4344 4342 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4345 4343 &pszc))) {
4346 4344
4347 4345 if (upgrdfail && type != F_SOFTLOCK) {
4348 4346 /*
4349 4347 * segvn_full_szcpages failed to lock
4350 4348 * all pages EXCL. Size down.
4351 4349 */
4352 4350 ASSERT(pszc < szc);
4353 4351
4354 4352 SEGVN_VMSTAT_FLTVNPAGES(33);
4355 4353
4356 4354 if (pplist != NULL) {
4357 4355 page_t *pl = pplist;
4358 4356 page_free_replacement_page(pl);
4359 4357 page_create_putback(pages);
4360 4358 }
4361 4359
4362 4360 for (i = 0; i < pages; i++) {
4363 4361 page_unlock(ppa[i]);
4364 4362 }
4365 4363 if (amp != NULL) {
4366 4364 anon_array_exit(&an_cookie);
4367 4365 ANON_LOCK_EXIT(&->a_rwlock);
4368 4366 }
4369 4367 ierr = -1;
4370 4368 break;
4371 4369 }
4372 4370 if (szc != 0 && !xhat && !upgrdfail) {
4373 4371 segvn_faultvnmpss_align_err5++;
4374 4372 }
4375 4373 SEGVN_VMSTAT_FLTVNPAGES(34);
4376 4374 if (pplist != NULL) {
4377 4375 page_free_replacement_page(pplist);
4378 4376 page_create_putback(pages);
4379 4377 }
4380 4378 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4381 4379 prot, vpprot);
4382 4380 if (upgrdfail && segvn_anypgsz_vnode) {
4383 4381 /* SOFTLOCK case */
4384 4382 hat_memload_array_region(hat, a, pgsz,
4385 4383 ppa, prot & vpprot, hat_flag,
4386 4384 svd->rcookie);
4387 4385 } else {
4388 4386 for (i = 0; i < pages; i++) {
4389 4387 hat_memload_region(hat,
4390 4388 a + (i << PAGESHIFT),
4391 4389 ppa[i], prot & vpprot,
4392 4390 hat_flag, svd->rcookie);
4393 4391 }
4394 4392 }
4395 4393 if (!(hat_flag & HAT_LOAD_LOCK)) {
4396 4394 for (i = 0; i < pages; i++) {
4397 4395 page_unlock(ppa[i]);
4398 4396 }
4399 4397 }
4400 4398 if (amp != NULL) {
4401 4399 anon_array_exit(&an_cookie);
4402 4400 ANON_LOCK_EXIT(&->a_rwlock);
4403 4401 }
4404 4402 goto next;
4405 4403 }
4406 4404
4407 4405 if (pszc == szc) {
4408 4406 /*
4409 4407 * segvn_full_szcpages() upgraded pages szc.
4410 4408 */
4411 4409 ASSERT(pszc == ppa[0]->p_szc);
4412 4410 ASSERT(IS_P2ALIGNED(pfn, pages));
4413 4411 goto chkszc;
4414 4412 }
4415 4413
4416 4414 if (pszc > szc) {
4417 4415 kmutex_t *szcmtx;
4418 4416 SEGVN_VMSTAT_FLTVNPAGES(35);
4419 4417 /*
4420 4418 * p_szc of ppa[0] can change since we haven't
4421 4419 * locked all constituent pages. Call
4422 4420 * page_lock_szc() to prevent szc changes.
4423 4421 * This should be a rare case that happens when
4424 4422 * multiple segments use a different page size
4425 4423 * to map the same file offsets.
4426 4424 */
4427 4425 szcmtx = page_szc_lock(ppa[0]);
4428 4426 pszc = ppa[0]->p_szc;
4429 4427 ASSERT(szcmtx != NULL || pszc == 0);
4430 4428 ASSERT(ppa[0]->p_szc <= pszc);
4431 4429 if (pszc <= szc) {
4432 4430 SEGVN_VMSTAT_FLTVNPAGES(36);
4433 4431 if (szcmtx != NULL) {
4434 4432 mutex_exit(szcmtx);
4435 4433 }
4436 4434 goto chkszc;
4437 4435 }
4438 4436 if (pplist != NULL) {
4439 4437 /*
4440 4438 * page got promoted since last check.
4441 4439 * we don't need preaalocated large
4442 4440 * page.
4443 4441 */
4444 4442 SEGVN_VMSTAT_FLTVNPAGES(37);
4445 4443 page_free_replacement_page(pplist);
4446 4444 page_create_putback(pages);
4447 4445 }
4448 4446 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4449 4447 prot, vpprot);
4450 4448 hat_memload_array_region(hat, a, pgsz, ppa,
4451 4449 prot & vpprot, hat_flag, svd->rcookie);
4452 4450 mutex_exit(szcmtx);
4453 4451 if (!(hat_flag & HAT_LOAD_LOCK)) {
4454 4452 for (i = 0; i < pages; i++) {
4455 4453 page_unlock(ppa[i]);
4456 4454 }
4457 4455 }
4458 4456 if (amp != NULL) {
4459 4457 anon_array_exit(&an_cookie);
4460 4458 ANON_LOCK_EXIT(&->a_rwlock);
4461 4459 }
4462 4460 goto next;
4463 4461 }
4464 4462
4465 4463 /*
4466 4464 * if page got demoted since last check
4467 4465 * we could have not allocated larger page.
4468 4466 * allocate now.
4469 4467 */
4470 4468 if (pplist == NULL &&
4471 4469 page_alloc_pages(vp, seg, a, &pplist, NULL,
4472 4470 szc, 0, 0) && type != F_SOFTLOCK) {
4473 4471 SEGVN_VMSTAT_FLTVNPAGES(38);
4474 4472 for (i = 0; i < pages; i++) {
4475 4473 page_unlock(ppa[i]);
4476 4474 }
4477 4475 if (amp != NULL) {
4478 4476 anon_array_exit(&an_cookie);
4479 4477 ANON_LOCK_EXIT(&->a_rwlock);
4480 4478 }
4481 4479 ierr = -1;
4482 4480 alloc_failed |= (1 << szc);
4483 4481 break;
4484 4482 }
4485 4483
4486 4484 SEGVN_VMSTAT_FLTVNPAGES(39);
4487 4485
4488 4486 if (pplist != NULL) {
4489 4487 segvn_relocate_pages(ppa, pplist);
4490 4488 #ifdef DEBUG
4491 4489 } else {
4492 4490 ASSERT(type == F_SOFTLOCK);
4493 4491 SEGVN_VMSTAT_FLTVNPAGES(40);
4494 4492 #endif /* DEBUG */
4495 4493 }
4496 4494
4497 4495 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4498 4496
4499 4497 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4500 4498 ASSERT(type == F_SOFTLOCK);
4501 4499 for (i = 0; i < pages; i++) {
4502 4500 ASSERT(ppa[i]->p_szc < szc);
4503 4501 hat_memload_region(hat,
4504 4502 a + (i << PAGESHIFT),
4505 4503 ppa[i], prot & vpprot, hat_flag,
4506 4504 svd->rcookie);
4507 4505 }
4508 4506 } else {
4509 4507 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4510 4508 hat_memload_array_region(hat, a, pgsz, ppa,
4511 4509 prot & vpprot, hat_flag, svd->rcookie);
4512 4510 }
4513 4511 if (!(hat_flag & HAT_LOAD_LOCK)) {
4514 4512 for (i = 0; i < pages; i++) {
4515 4513 ASSERT(PAGE_SHARED(ppa[i]));
4516 4514 page_unlock(ppa[i]);
4517 4515 }
4518 4516 }
4519 4517 if (amp != NULL) {
4520 4518 anon_array_exit(&an_cookie);
4521 4519 ANON_LOCK_EXIT(&->a_rwlock);
4522 4520 }
4523 4521
4524 4522 next:
4525 4523 if (vpage != NULL) {
4526 4524 vpage += pages;
4527 4525 }
4528 4526 adjszc_chk = 1;
4529 4527 }
4530 4528 if (a == lpgeaddr)
4531 4529 break;
4532 4530 ASSERT(a < lpgeaddr);
4533 4531
4534 4532 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4535 4533
4536 4534 /*
4537 4535 * ierr == -1 means we failed to map with a large page.
4538 4536 * (either due to allocation/relocation failures or
4539 4537 * misalignment with other mappings to this file.
4540 4538 *
4541 4539 * ierr == -2 means some other thread allocated a large page
4542 4540 * after we gave up tp map with a large page. retry with
4543 4541 * larger mapping.
4544 4542 */
4545 4543 ASSERT(ierr == -1 || ierr == -2);
4546 4544 ASSERT(ierr == -2 || szc != 0);
4547 4545 ASSERT(ierr == -1 || szc < seg->s_szc);
4548 4546 if (ierr == -2) {
4549 4547 SEGVN_VMSTAT_FLTVNPAGES(41);
4550 4548 ASSERT(pszc > szc && pszc <= seg->s_szc);
4551 4549 szc = pszc;
4552 4550 } else if (segvn_anypgsz_vnode) {
4553 4551 SEGVN_VMSTAT_FLTVNPAGES(42);
4554 4552 szc--;
4555 4553 } else {
4556 4554 SEGVN_VMSTAT_FLTVNPAGES(43);
4557 4555 ASSERT(pszc < szc);
4558 4556 /*
4559 4557 * other process created pszc large page.
4560 4558 * but we still have to drop to 0 szc.
4561 4559 */
4562 4560 szc = 0;
4563 4561 }
4564 4562
4565 4563 pgsz = page_get_pagesize(szc);
4566 4564 pages = btop(pgsz);
4567 4565 if (ierr == -2) {
4568 4566 /*
4569 4567 * Size up case. Note lpgaddr may only be needed for
4570 4568 * softlock case so we don't adjust it here.
4571 4569 */
4572 4570 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4573 4571 ASSERT(a >= lpgaddr);
4574 4572 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4575 4573 off = svd->offset + (uintptr_t)(a - seg->s_base);
4576 4574 aindx = svd->anon_index + seg_page(seg, a);
4577 4575 vpage = (svd->vpage != NULL) ?
4578 4576 &svd->vpage[seg_page(seg, a)] : NULL;
4579 4577 } else {
4580 4578 /*
4581 4579 * Size down case. Note lpgaddr may only be needed for
4582 4580 * softlock case so we don't adjust it here.
4583 4581 */
4584 4582 ASSERT(IS_P2ALIGNED(a, pgsz));
4585 4583 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4586 4584 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4587 4585 ASSERT(a < lpgeaddr);
4588 4586 if (a < addr) {
4589 4587 SEGVN_VMSTAT_FLTVNPAGES(44);
4590 4588 /*
4591 4589 * The beginning of the large page region can
4592 4590 * be pulled to the right to make a smaller
4593 4591 * region. We haven't yet faulted a single
4594 4592 * page.
4595 4593 */
4596 4594 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4597 4595 ASSERT(a >= lpgaddr);
4598 4596 off = svd->offset +
4599 4597 (uintptr_t)(a - seg->s_base);
4600 4598 aindx = svd->anon_index + seg_page(seg, a);
4601 4599 vpage = (svd->vpage != NULL) ?
4602 4600 &svd->vpage[seg_page(seg, a)] : NULL;
4603 4601 }
4604 4602 }
4605 4603 }
4606 4604 out:
4607 4605 kmem_free(ppa, ppasize);
4608 4606 if (!err && !vop_size_err) {
4609 4607 SEGVN_VMSTAT_FLTVNPAGES(45);
4610 4608 return (0);
4611 4609 }
4612 4610 if (type == F_SOFTLOCK && a > lpgaddr) {
4613 4611 SEGVN_VMSTAT_FLTVNPAGES(46);
4614 4612 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4615 4613 }
4616 4614 if (!vop_size_err) {
4617 4615 SEGVN_VMSTAT_FLTVNPAGES(47);
4618 4616 return (err);
4619 4617 }
4620 4618 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4621 4619 /*
4622 4620 * Large page end is mapped beyond the end of file and it's a cow
4623 4621 * fault (can be a text replication induced cow) or softlock so we can't
4624 4622 * reduce the map area. For now just demote the segment. This should
4625 4623 * really only happen if the end of the file changed after the mapping
4626 4624 * was established since when large page segments are created we make
4627 4625 * sure they don't extend beyond the end of the file.
4628 4626 */
4629 4627 SEGVN_VMSTAT_FLTVNPAGES(48);
4630 4628
4631 4629 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4632 4630 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4633 4631 err = 0;
4634 4632 if (seg->s_szc != 0) {
4635 4633 segvn_fltvnpages_clrszc_cnt++;
4636 4634 ASSERT(svd->softlockcnt == 0);
4637 4635 err = segvn_clrszc(seg);
4638 4636 if (err != 0) {
4639 4637 segvn_fltvnpages_clrszc_err++;
4640 4638 }
4641 4639 }
4642 4640 ASSERT(err || seg->s_szc == 0);
4643 4641 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4644 4642 /* segvn_fault will do its job as if szc had been zero to begin with */
4645 4643 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4646 4644 }
4647 4645
4648 4646 /*
4649 4647 * This routine will attempt to fault in one large page.
4650 4648 * it will use smaller pages if that fails.
4651 4649 * It should only be called for pure anonymous segments.
4652 4650 */
4653 4651 static faultcode_t
4654 4652 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4655 4653 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4656 4654 caddr_t eaddr, int brkcow)
4657 4655 {
4658 4656 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4659 4657 struct anon_map *amp = svd->amp;
4660 4658 uchar_t segtype = svd->type;
4661 4659 uint_t szc = seg->s_szc;
4662 4660 size_t pgsz = page_get_pagesize(szc);
4663 4661 size_t maxpgsz = pgsz;
4664 4662 pgcnt_t pages = btop(pgsz);
4665 4663 uint_t ppaszc = szc;
4666 4664 caddr_t a = lpgaddr;
4667 4665 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4668 4666 struct vpage *vpage = (svd->vpage != NULL) ?
4669 4667 &svd->vpage[seg_page(seg, a)] : NULL;
4670 4668 page_t **ppa;
4671 4669 uint_t ppa_szc;
4672 4670 faultcode_t err;
4673 4671 int ierr;
4674 4672 uint_t protchk, prot, vpprot;
4675 4673 ulong_t i;
4676 4674 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4677 4675 anon_sync_obj_t cookie;
4678 4676 int adjszc_chk;
4679 4677 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4680 4678
4681 4679 ASSERT(szc != 0);
4682 4680 ASSERT(amp != NULL);
4683 4681 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4684 4682 ASSERT(!(svd->flags & MAP_NORESERVE));
4685 4683 ASSERT(type != F_SOFTUNLOCK);
4686 4684 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4687 4685 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4688 4686 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4689 4687
4690 4688 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4691 4689
4692 4690 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4693 4691 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4694 4692
4695 4693 if (svd->flags & MAP_TEXT) {
4696 4694 hat_flag |= HAT_LOAD_TEXT;
4697 4695 }
4698 4696
4699 4697 if (svd->pageprot) {
4700 4698 switch (rw) {
4701 4699 case S_READ:
4702 4700 protchk = PROT_READ;
4703 4701 break;
4704 4702 case S_WRITE:
4705 4703 protchk = PROT_WRITE;
4706 4704 break;
4707 4705 case S_EXEC:
4708 4706 protchk = PROT_EXEC;
4709 4707 break;
4710 4708 case S_OTHER:
4711 4709 default:
4712 4710 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4713 4711 break;
4714 4712 }
4715 4713 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4716 4714 } else {
4717 4715 prot = svd->prot;
4718 4716 /* caller has already done segment level protection check. */
4719 4717 }
4720 4718
4721 4719 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4722 4720 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4723 4721 for (;;) {
4724 4722 adjszc_chk = 0;
4725 4723 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4726 4724 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4727 4725 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4728 4726 ASSERT(vpage != NULL);
4729 4727 prot = VPP_PROT(vpage);
4730 4728 ASSERT(sameprot(seg, a, maxpgsz));
4731 4729 if ((prot & protchk) == 0) {
4732 4730 err = FC_PROT;
4733 4731 goto error;
4734 4732 }
4735 4733 }
4736 4734 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4737 4735 pgsz < maxpgsz) {
4738 4736 ASSERT(a > lpgaddr);
4739 4737 szc = seg->s_szc;
4740 4738 pgsz = maxpgsz;
4741 4739 pages = btop(pgsz);
4742 4740 ASSERT(IS_P2ALIGNED(aindx, pages));
4743 4741 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4744 4742 pgsz);
4745 4743 }
4746 4744 if (type == F_SOFTLOCK) {
4747 4745 atomic_add_long((ulong_t *)&svd->softlockcnt,
4748 4746 pages);
4749 4747 }
4750 4748 anon_array_enter(amp, aindx, &cookie);
4751 4749 ppa_szc = (uint_t)-1;
4752 4750 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4753 4751 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4754 4752 segvn_anypgsz, pgflags, svd->cred);
4755 4753 if (ierr != 0) {
4756 4754 anon_array_exit(&cookie);
4757 4755 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4758 4756 if (type == F_SOFTLOCK) {
4759 4757 atomic_add_long(
4760 4758 (ulong_t *)&svd->softlockcnt,
4761 4759 -pages);
4762 4760 }
4763 4761 if (ierr > 0) {
4764 4762 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4765 4763 err = FC_MAKE_ERR(ierr);
4766 4764 goto error;
4767 4765 }
4768 4766 break;
4769 4767 }
4770 4768
4771 4769 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4772 4770
4773 4771 ASSERT(segtype == MAP_SHARED ||
4774 4772 ppa[0]->p_szc <= szc);
4775 4773 ASSERT(segtype == MAP_PRIVATE ||
4776 4774 ppa[0]->p_szc >= szc);
4777 4775
4778 4776 /*
4779 4777 * Handle pages that have been marked for migration
4780 4778 */
4781 4779 if (lgrp_optimizations())
4782 4780 page_migrate(seg, a, ppa, pages);
4783 4781
4784 4782 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4785 4783
4786 4784 if (segtype == MAP_SHARED) {
4787 4785 vpprot |= PROT_WRITE;
4788 4786 }
4789 4787
4790 4788 hat_memload_array(hat, a, pgsz, ppa,
4791 4789 prot & vpprot, hat_flag);
4792 4790
4793 4791 if (hat_flag & HAT_LOAD_LOCK) {
4794 4792 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4795 4793 } else {
4796 4794 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4797 4795 for (i = 0; i < pages; i++)
4798 4796 page_unlock(ppa[i]);
4799 4797 }
4800 4798 if (vpage != NULL)
4801 4799 vpage += pages;
4802 4800
4803 4801 anon_array_exit(&cookie);
4804 4802 adjszc_chk = 1;
4805 4803 }
4806 4804 if (a == lpgeaddr)
4807 4805 break;
4808 4806 ASSERT(a < lpgeaddr);
4809 4807 /*
4810 4808 * ierr == -1 means we failed to allocate a large page.
4811 4809 * so do a size down operation.
4812 4810 *
4813 4811 * ierr == -2 means some other process that privately shares
4814 4812 * pages with this process has allocated a larger page and we
4815 4813 * need to retry with larger pages. So do a size up
4816 4814 * operation. This relies on the fact that large pages are
4817 4815 * never partially shared i.e. if we share any constituent
4818 4816 * page of a large page with another process we must share the
4819 4817 * entire large page. Note this cannot happen for SOFTLOCK
4820 4818 * case, unless current address (a) is at the beginning of the
4821 4819 * next page size boundary because the other process couldn't
4822 4820 * have relocated locked pages.
4823 4821 */
4824 4822 ASSERT(ierr == -1 || ierr == -2);
4825 4823
4826 4824 if (segvn_anypgsz) {
4827 4825 ASSERT(ierr == -2 || szc != 0);
4828 4826 ASSERT(ierr == -1 || szc < seg->s_szc);
4829 4827 szc = (ierr == -1) ? szc - 1 : szc + 1;
4830 4828 } else {
4831 4829 /*
4832 4830 * For non COW faults and segvn_anypgsz == 0
4833 4831 * we need to be careful not to loop forever
4834 4832 * if existing page is found with szc other
4835 4833 * than 0 or seg->s_szc. This could be due
4836 4834 * to page relocations on behalf of DR or
4837 4835 * more likely large page creation. For this
4838 4836 * case simply re-size to existing page's szc
4839 4837 * if returned by anon_map_getpages().
4840 4838 */
4841 4839 if (ppa_szc == (uint_t)-1) {
4842 4840 szc = (ierr == -1) ? 0 : seg->s_szc;
4843 4841 } else {
4844 4842 ASSERT(ppa_szc <= seg->s_szc);
4845 4843 ASSERT(ierr == -2 || ppa_szc < szc);
4846 4844 ASSERT(ierr == -1 || ppa_szc > szc);
4847 4845 szc = ppa_szc;
4848 4846 }
4849 4847 }
4850 4848
4851 4849 pgsz = page_get_pagesize(szc);
4852 4850 pages = btop(pgsz);
4853 4851 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4854 4852 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4855 4853 if (type == F_SOFTLOCK) {
4856 4854 /*
4857 4855 * For softlocks we cannot reduce the fault area
4858 4856 * (calculated based on the largest page size for this
4859 4857 * segment) for size down and a is already next
4860 4858 * page size aligned as assertted above for size
4861 4859 * ups. Therefore just continue in case of softlock.
4862 4860 */
4863 4861 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4864 4862 continue; /* keep lint happy */
4865 4863 } else if (ierr == -2) {
4866 4864
4867 4865 /*
4868 4866 * Size up case. Note lpgaddr may only be needed for
4869 4867 * softlock case so we don't adjust it here.
4870 4868 */
4871 4869 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4872 4870 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4873 4871 ASSERT(a >= lpgaddr);
4874 4872 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4875 4873 aindx = svd->anon_index + seg_page(seg, a);
4876 4874 vpage = (svd->vpage != NULL) ?
4877 4875 &svd->vpage[seg_page(seg, a)] : NULL;
4878 4876 } else {
4879 4877 /*
4880 4878 * Size down case. Note lpgaddr may only be needed for
4881 4879 * softlock case so we don't adjust it here.
4882 4880 */
4883 4881 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4884 4882 ASSERT(IS_P2ALIGNED(a, pgsz));
4885 4883 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4886 4884 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4887 4885 ASSERT(a < lpgeaddr);
4888 4886 if (a < addr) {
4889 4887 /*
4890 4888 * The beginning of the large page region can
4891 4889 * be pulled to the right to make a smaller
4892 4890 * region. We haven't yet faulted a single
4893 4891 * page.
4894 4892 */
4895 4893 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4896 4894 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4897 4895 ASSERT(a >= lpgaddr);
4898 4896 aindx = svd->anon_index + seg_page(seg, a);
4899 4897 vpage = (svd->vpage != NULL) ?
4900 4898 &svd->vpage[seg_page(seg, a)] : NULL;
4901 4899 }
4902 4900 }
4903 4901 }
4904 4902 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4905 4903 ANON_LOCK_EXIT(&->a_rwlock);
4906 4904 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4907 4905 return (0);
4908 4906 error:
4909 4907 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4910 4908 ANON_LOCK_EXIT(&->a_rwlock);
4911 4909 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4912 4910 if (type == F_SOFTLOCK && a > lpgaddr) {
4913 4911 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4914 4912 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4915 4913 }
4916 4914 return (err);
4917 4915 }
4918 4916
4919 4917 int fltadvice = 1; /* set to free behind pages for sequential access */
4920 4918
4921 4919 /*
4922 4920 * This routine is called via a machine specific fault handling routine.
4923 4921 * It is also called by software routines wishing to lock or unlock
4924 4922 * a range of addresses.
4925 4923 *
4926 4924 * Here is the basic algorithm:
4927 4925 * If unlocking
4928 4926 * Call segvn_softunlock
4929 4927 * Return
4930 4928 * endif
4931 4929 * Checking and set up work
4932 4930 * If we will need some non-anonymous pages
4933 4931 * Call VOP_GETPAGE over the range of non-anonymous pages
4934 4932 * endif
4935 4933 * Loop over all addresses requested
4936 4934 * Call segvn_faultpage passing in page list
4937 4935 * to load up translations and handle anonymous pages
4938 4936 * endloop
4939 4937 * Load up translation to any additional pages in page list not
4940 4938 * already handled that fit into this segment
4941 4939 */
4942 4940 static faultcode_t
4943 4941 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4944 4942 enum fault_type type, enum seg_rw rw)
4945 4943 {
4946 4944 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4947 4945 page_t **plp, **ppp, *pp;
4948 4946 u_offset_t off;
4949 4947 caddr_t a;
4950 4948 struct vpage *vpage;
4951 4949 uint_t vpprot, prot;
4952 4950 int err;
4953 4951 page_t *pl[PVN_GETPAGE_NUM + 1];
4954 4952 size_t plsz, pl_alloc_sz;
4955 4953 size_t page;
4956 4954 ulong_t anon_index;
4957 4955 struct anon_map *amp;
4958 4956 int dogetpage = 0;
4959 4957 caddr_t lpgaddr, lpgeaddr;
4960 4958 size_t pgsz;
4961 4959 anon_sync_obj_t cookie;
4962 4960 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4963 4961
4964 4962 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4965 4963 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4966 4964
4967 4965 /*
4968 4966 * First handle the easy stuff
4969 4967 */
4970 4968 if (type == F_SOFTUNLOCK) {
4971 4969 if (rw == S_READ_NOCOW) {
4972 4970 rw = S_READ;
4973 4971 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4974 4972 }
4975 4973 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4976 4974 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4977 4975 page_get_pagesize(seg->s_szc);
4978 4976 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4979 4977 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4980 4978 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4981 4979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4982 4980 return (0);
4983 4981 }
4984 4982
4985 4983 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4986 4984 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4987 4985 if (brkcow == 0) {
4988 4986 if (svd->tr_state == SEGVN_TR_INIT) {
4989 4987 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4990 4988 if (svd->tr_state == SEGVN_TR_INIT) {
4991 4989 ASSERT(svd->vp != NULL && svd->amp == NULL);
4992 4990 ASSERT(svd->flags & MAP_TEXT);
4993 4991 ASSERT(svd->type == MAP_PRIVATE);
4994 4992 segvn_textrepl(seg);
4995 4993 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4996 4994 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4997 4995 svd->amp != NULL);
4998 4996 }
4999 4997 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5000 4998 }
5001 4999 } else if (svd->tr_state != SEGVN_TR_OFF) {
5002 5000 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5003 5001
5004 5002 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
5005 5003 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
5006 5004 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5007 5005 return (FC_PROT);
5008 5006 }
5009 5007
5010 5008 if (svd->tr_state == SEGVN_TR_ON) {
5011 5009 ASSERT(svd->vp != NULL && svd->amp != NULL);
5012 5010 segvn_textunrepl(seg, 0);
5013 5011 ASSERT(svd->amp == NULL &&
5014 5012 svd->tr_state == SEGVN_TR_OFF);
5015 5013 } else if (svd->tr_state != SEGVN_TR_OFF) {
5016 5014 svd->tr_state = SEGVN_TR_OFF;
5017 5015 }
5018 5016 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5019 5017 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5020 5018 }
5021 5019
5022 5020 top:
5023 5021 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5024 5022
5025 5023 /*
5026 5024 * If we have the same protections for the entire segment,
5027 5025 * insure that the access being attempted is legitimate.
5028 5026 */
5029 5027
5030 5028 if (svd->pageprot == 0) {
5031 5029 uint_t protchk;
5032 5030
5033 5031 switch (rw) {
5034 5032 case S_READ:
5035 5033 case S_READ_NOCOW:
5036 5034 protchk = PROT_READ;
5037 5035 break;
5038 5036 case S_WRITE:
5039 5037 protchk = PROT_WRITE;
5040 5038 break;
5041 5039 case S_EXEC:
5042 5040 protchk = PROT_EXEC;
5043 5041 break;
5044 5042 case S_OTHER:
5045 5043 default:
5046 5044 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5047 5045 break;
5048 5046 }
5049 5047
5050 5048 if ((svd->prot & protchk) == 0) {
5051 5049 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5052 5050 return (FC_PROT); /* illegal access type */
5053 5051 }
5054 5052 }
5055 5053
5056 5054 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5057 5055 /* this must be SOFTLOCK S_READ fault */
5058 5056 ASSERT(svd->amp == NULL);
5059 5057 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5060 5058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5061 5059 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5062 5060 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5063 5061 /*
5064 5062 * this must be the first ever non S_READ_NOCOW
5065 5063 * softlock for this segment.
5066 5064 */
5067 5065 ASSERT(svd->softlockcnt == 0);
5068 5066 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5069 5067 HAT_REGION_TEXT);
5070 5068 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5071 5069 }
5072 5070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5073 5071 goto top;
5074 5072 }
5075 5073
5076 5074 /*
5077 5075 * We can't allow the long term use of softlocks for vmpss segments,
5078 5076 * because in some file truncation cases we should be able to demote
5079 5077 * the segment, which requires that there are no softlocks. The
5080 5078 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5081 5079 * segment is S_READ_NOCOW, where the caller holds the address space
5082 5080 * locked as writer and calls softunlock before dropping the as lock.
5083 5081 * S_READ_NOCOW is used by /proc to read memory from another user.
5084 5082 *
5085 5083 * Another deadlock between SOFTLOCK and file truncation can happen
5086 5084 * because segvn_fault_vnodepages() calls the FS one pagesize at
5087 5085 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5088 5086 * can cause a deadlock because the first set of page_t's remain
5089 5087 * locked SE_SHARED. To avoid this, we demote segments on a first
5090 5088 * SOFTLOCK if they have a length greater than the segment's
5091 5089 * page size.
5092 5090 *
5093 5091 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5094 5092 * the access type is S_READ_NOCOW and the fault length is less than
5095 5093 * or equal to the segment's page size. While this is quite restrictive,
5096 5094 * it should be the most common case of SOFTLOCK against a vmpss
5097 5095 * segment.
5098 5096 *
5099 5097 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5100 5098 * caller makes sure no COW will be caused by another thread for a
5101 5099 * softlocked page.
5102 5100 */
5103 5101 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5104 5102 int demote = 0;
5105 5103
5106 5104 if (rw != S_READ_NOCOW) {
5107 5105 demote = 1;
5108 5106 }
5109 5107 if (!demote && len > PAGESIZE) {
5110 5108 pgsz = page_get_pagesize(seg->s_szc);
5111 5109 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5112 5110 lpgeaddr);
5113 5111 if (lpgeaddr - lpgaddr > pgsz) {
5114 5112 demote = 1;
5115 5113 }
5116 5114 }
5117 5115
5118 5116 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5119 5117
5120 5118 if (demote) {
5121 5119 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5122 5120 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5123 5121 if (seg->s_szc != 0) {
5124 5122 segvn_vmpss_clrszc_cnt++;
5125 5123 ASSERT(svd->softlockcnt == 0);
5126 5124 err = segvn_clrszc(seg);
5127 5125 if (err) {
5128 5126 segvn_vmpss_clrszc_err++;
5129 5127 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5130 5128 return (FC_MAKE_ERR(err));
5131 5129 }
5132 5130 }
5133 5131 ASSERT(seg->s_szc == 0);
5134 5132 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5135 5133 goto top;
5136 5134 }
5137 5135 }
5138 5136
5139 5137 /*
5140 5138 * Check to see if we need to allocate an anon_map structure.
5141 5139 */
5142 5140 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5143 5141 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5144 5142 /*
5145 5143 * Drop the "read" lock on the segment and acquire
5146 5144 * the "write" version since we have to allocate the
5147 5145 * anon_map.
5148 5146 */
5149 5147 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5150 5148 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5151 5149
5152 5150 if (svd->amp == NULL) {
5153 5151 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5154 5152 svd->amp->a_szc = seg->s_szc;
5155 5153 }
5156 5154 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5157 5155
5158 5156 /*
5159 5157 * Start all over again since segment protections
5160 5158 * may have changed after we dropped the "read" lock.
5161 5159 */
5162 5160 goto top;
5163 5161 }
5164 5162
5165 5163 /*
5166 5164 * S_READ_NOCOW vs S_READ distinction was
5167 5165 * only needed for the code above. After
5168 5166 * that we treat it as S_READ.
5169 5167 */
5170 5168 if (rw == S_READ_NOCOW) {
5171 5169 ASSERT(type == F_SOFTLOCK);
5172 5170 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5173 5171 rw = S_READ;
5174 5172 }
5175 5173
5176 5174 amp = svd->amp;
5177 5175
5178 5176 /*
5179 5177 * MADV_SEQUENTIAL work is ignored for large page segments.
5180 5178 */
5181 5179 if (seg->s_szc != 0) {
5182 5180 pgsz = page_get_pagesize(seg->s_szc);
5183 5181 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5184 5182 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5185 5183 if (svd->vp == NULL) {
5186 5184 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5187 5185 lpgeaddr, type, rw, addr, addr + len, brkcow);
5188 5186 } else {
5189 5187 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5190 5188 lpgeaddr, type, rw, addr, addr + len, brkcow);
5191 5189 if (err == IE_RETRY) {
5192 5190 ASSERT(seg->s_szc == 0);
5193 5191 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5194 5192 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5195 5193 goto top;
5196 5194 }
5197 5195 }
5198 5196 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5199 5197 return (err);
5200 5198 }
5201 5199
5202 5200 page = seg_page(seg, addr);
5203 5201 if (amp != NULL) {
5204 5202 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5205 5203 anon_index = svd->anon_index + page;
5206 5204
5207 5205 if (type == F_PROT && rw == S_READ &&
5208 5206 svd->tr_state == SEGVN_TR_OFF &&
5209 5207 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5210 5208 size_t index = anon_index;
5211 5209 struct anon *ap;
5212 5210
5213 5211 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5214 5212 /*
5215 5213 * The fast path could apply to S_WRITE also, except
5216 5214 * that the protection fault could be caused by lazy
5217 5215 * tlb flush when ro->rw. In this case, the pte is
5218 5216 * RW already. But RO in the other cpu's tlb causes
5219 5217 * the fault. Since hat_chgprot won't do anything if
5220 5218 * pte doesn't change, we may end up faulting
5221 5219 * indefinitely until the RO tlb entry gets replaced.
5222 5220 */
5223 5221 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5224 5222 anon_array_enter(amp, index, &cookie);
5225 5223 ap = anon_get_ptr(amp->ahp, index);
5226 5224 anon_array_exit(&cookie);
5227 5225 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5228 5226 ANON_LOCK_EXIT(&->a_rwlock);
5229 5227 goto slow;
5230 5228 }
5231 5229 }
5232 5230 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5233 5231 ANON_LOCK_EXIT(&->a_rwlock);
5234 5232 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5235 5233 return (0);
5236 5234 }
5237 5235 }
5238 5236 slow:
5239 5237
5240 5238 if (svd->vpage == NULL)
5241 5239 vpage = NULL;
5242 5240 else
5243 5241 vpage = &svd->vpage[page];
5244 5242
5245 5243 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5246 5244
5247 5245 /*
5248 5246 * If MADV_SEQUENTIAL has been set for the particular page we
5249 5247 * are faulting on, free behind all pages in the segment and put
5250 5248 * them on the free list.
5251 5249 */
5252 5250
5253 5251 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5254 5252 struct vpage *vpp;
5255 5253 ulong_t fanon_index;
5256 5254 size_t fpage;
5257 5255 u_offset_t pgoff, fpgoff;
5258 5256 struct vnode *fvp;
5259 5257 struct anon *fap = NULL;
5260 5258
5261 5259 if (svd->advice == MADV_SEQUENTIAL ||
5262 5260 (svd->pageadvice &&
5263 5261 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5264 5262 pgoff = off - PAGESIZE;
5265 5263 fpage = page - 1;
5266 5264 if (vpage != NULL)
5267 5265 vpp = &svd->vpage[fpage];
5268 5266 if (amp != NULL)
5269 5267 fanon_index = svd->anon_index + fpage;
5270 5268
5271 5269 while (pgoff > svd->offset) {
5272 5270 if (svd->advice != MADV_SEQUENTIAL &&
5273 5271 (!svd->pageadvice || (vpage &&
5274 5272 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5275 5273 break;
5276 5274
5277 5275 /*
5278 5276 * If this is an anon page, we must find the
5279 5277 * correct <vp, offset> for it
5280 5278 */
5281 5279 fap = NULL;
5282 5280 if (amp != NULL) {
5283 5281 ANON_LOCK_ENTER(&->a_rwlock,
5284 5282 RW_READER);
5285 5283 anon_array_enter(amp, fanon_index,
5286 5284 &cookie);
5287 5285 fap = anon_get_ptr(amp->ahp,
5288 5286 fanon_index);
5289 5287 if (fap != NULL) {
5290 5288 swap_xlate(fap, &fvp, &fpgoff);
5291 5289 } else {
5292 5290 fpgoff = pgoff;
5293 5291 fvp = svd->vp;
5294 5292 }
5295 5293 anon_array_exit(&cookie);
5296 5294 ANON_LOCK_EXIT(&->a_rwlock);
5297 5295 } else {
5298 5296 fpgoff = pgoff;
5299 5297 fvp = svd->vp;
5300 5298 }
5301 5299 if (fvp == NULL)
5302 5300 break; /* XXX */
5303 5301 /*
5304 5302 * Skip pages that are free or have an
5305 5303 * "exclusive" lock.
5306 5304 */
5307 5305 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5308 5306 if (pp == NULL)
5309 5307 break;
5310 5308 /*
5311 5309 * We don't need the page_struct_lock to test
5312 5310 * as this is only advisory; even if we
5313 5311 * acquire it someone might race in and lock
5314 5312 * the page after we unlock and before the
5315 5313 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5316 5314 */
5317 5315 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5318 5316 /*
5319 5317 * Hold the vnode before releasing
5320 5318 * the page lock to prevent it from
5321 5319 * being freed and re-used by some
5322 5320 * other thread.
5323 5321 */
5324 5322 VN_HOLD(fvp);
5325 5323 page_unlock(pp);
5326 5324 /*
5327 5325 * We should build a page list
5328 5326 * to kluster putpages XXX
5329 5327 */
5330 5328 (void) VOP_PUTPAGE(fvp,
5331 5329 (offset_t)fpgoff, PAGESIZE,
5332 5330 (B_DONTNEED|B_FREE|B_ASYNC),
5333 5331 svd->cred, NULL);
5334 5332 VN_RELE(fvp);
5335 5333 } else {
5336 5334 /*
5337 5335 * XXX - Should the loop terminate if
5338 5336 * the page is `locked'?
5339 5337 */
5340 5338 page_unlock(pp);
5341 5339 }
5342 5340 --vpp;
5343 5341 --fanon_index;
5344 5342 pgoff -= PAGESIZE;
5345 5343 }
5346 5344 }
5347 5345 }
5348 5346
5349 5347 plp = pl;
5350 5348 *plp = NULL;
5351 5349 pl_alloc_sz = 0;
5352 5350
5353 5351 /*
5354 5352 * See if we need to call VOP_GETPAGE for
5355 5353 * *any* of the range being faulted on.
5356 5354 * We can skip all of this work if there
5357 5355 * was no original vnode.
5358 5356 */
5359 5357 if (svd->vp != NULL) {
5360 5358 u_offset_t vp_off;
5361 5359 size_t vp_len;
5362 5360 struct anon *ap;
5363 5361 vnode_t *vp;
5364 5362
5365 5363 vp_off = off;
5366 5364 vp_len = len;
5367 5365
5368 5366 if (amp == NULL)
5369 5367 dogetpage = 1;
5370 5368 else {
5371 5369 /*
5372 5370 * Only acquire reader lock to prevent amp->ahp
5373 5371 * from being changed. It's ok to miss pages,
5374 5372 * hence we don't do anon_array_enter
5375 5373 */
5376 5374 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5377 5375 ap = anon_get_ptr(amp->ahp, anon_index);
5378 5376
5379 5377 if (len <= PAGESIZE)
5380 5378 /* inline non_anon() */
5381 5379 dogetpage = (ap == NULL);
5382 5380 else
5383 5381 dogetpage = non_anon(amp->ahp, anon_index,
5384 5382 &vp_off, &vp_len);
5385 5383 ANON_LOCK_EXIT(&->a_rwlock);
5386 5384 }
5387 5385
5388 5386 if (dogetpage) {
5389 5387 enum seg_rw arw;
5390 5388 struct as *as = seg->s_as;
5391 5389
5392 5390 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5393 5391 /*
5394 5392 * Page list won't fit in local array,
5395 5393 * allocate one of the needed size.
5396 5394 */
5397 5395 pl_alloc_sz =
5398 5396 (btop(len) + 1) * sizeof (page_t *);
5399 5397 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5400 5398 plp[0] = NULL;
5401 5399 plsz = len;
5402 5400 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5403 5401 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5404 5402 (((size_t)(addr + PAGESIZE) <
5405 5403 (size_t)(seg->s_base + seg->s_size)) &&
5406 5404 hat_probe(as->a_hat, addr + PAGESIZE))) {
5407 5405 /*
5408 5406 * Ask VOP_GETPAGE to return the exact number
5409 5407 * of pages if
5410 5408 * (a) this is a COW fault, or
5411 5409 * (b) this is a software fault, or
5412 5410 * (c) next page is already mapped.
5413 5411 */
5414 5412 plsz = len;
5415 5413 } else {
5416 5414 /*
5417 5415 * Ask VOP_GETPAGE to return adjacent pages
5418 5416 * within the segment.
5419 5417 */
5420 5418 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5421 5419 ((seg->s_base + seg->s_size) - addr));
5422 5420 ASSERT((addr + plsz) <=
5423 5421 (seg->s_base + seg->s_size));
5424 5422 }
5425 5423
5426 5424 /*
5427 5425 * Need to get some non-anonymous pages.
5428 5426 * We need to make only one call to GETPAGE to do
5429 5427 * this to prevent certain deadlocking conditions
5430 5428 * when we are doing locking. In this case
5431 5429 * non_anon() should have picked up the smallest
5432 5430 * range which includes all the non-anonymous
5433 5431 * pages in the requested range. We have to
5434 5432 * be careful regarding which rw flag to pass in
5435 5433 * because on a private mapping, the underlying
5436 5434 * object is never allowed to be written.
5437 5435 */
5438 5436 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5439 5437 arw = S_READ;
5440 5438 } else {
5441 5439 arw = rw;
5442 5440 }
5443 5441 vp = svd->vp;
5444 5442 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5445 5443 "segvn_getpage:seg %p addr %p vp %p",
5446 5444 seg, addr, vp);
5447 5445 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5448 5446 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5449 5447 svd->cred, NULL);
5450 5448 if (err) {
5451 5449 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5452 5450 segvn_pagelist_rele(plp);
5453 5451 if (pl_alloc_sz)
5454 5452 kmem_free(plp, pl_alloc_sz);
5455 5453 return (FC_MAKE_ERR(err));
5456 5454 }
5457 5455 if (svd->type == MAP_PRIVATE)
5458 5456 vpprot &= ~PROT_WRITE;
5459 5457 }
5460 5458 }
5461 5459
5462 5460 /*
5463 5461 * N.B. at this time the plp array has all the needed non-anon
5464 5462 * pages in addition to (possibly) having some adjacent pages.
5465 5463 */
5466 5464
5467 5465 /*
5468 5466 * Always acquire the anon_array_lock to prevent
5469 5467 * 2 threads from allocating separate anon slots for
5470 5468 * the same "addr".
5471 5469 *
5472 5470 * If this is a copy-on-write fault and we don't already
5473 5471 * have the anon_array_lock, acquire it to prevent the
5474 5472 * fault routine from handling multiple copy-on-write faults
5475 5473 * on the same "addr" in the same address space.
5476 5474 *
5477 5475 * Only one thread should deal with the fault since after
5478 5476 * it is handled, the other threads can acquire a translation
5479 5477 * to the newly created private page. This prevents two or
5480 5478 * more threads from creating different private pages for the
5481 5479 * same fault.
5482 5480 *
5483 5481 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5484 5482 * to prevent deadlock between this thread and another thread
5485 5483 * which has soft-locked this page and wants to acquire serial_lock.
5486 5484 * ( bug 4026339 )
5487 5485 *
5488 5486 * The fix for bug 4026339 becomes unnecessary when using the
5489 5487 * locking scheme with per amp rwlock and a global set of hash
5490 5488 * lock, anon_array_lock. If we steal a vnode page when low
5491 5489 * on memory and upgrad the page lock through page_rename,
5492 5490 * then the page is PAGE_HANDLED, nothing needs to be done
5493 5491 * for this page after returning from segvn_faultpage.
5494 5492 *
5495 5493 * But really, the page lock should be downgraded after
5496 5494 * the stolen page is page_rename'd.
5497 5495 */
5498 5496
5499 5497 if (amp != NULL)
5500 5498 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5501 5499
5502 5500 /*
5503 5501 * Ok, now loop over the address range and handle faults
5504 5502 */
5505 5503 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5506 5504 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5507 5505 type, rw, brkcow);
5508 5506 if (err) {
5509 5507 if (amp != NULL)
5510 5508 ANON_LOCK_EXIT(&->a_rwlock);
5511 5509 if (type == F_SOFTLOCK && a > addr) {
5512 5510 segvn_softunlock(seg, addr, (a - addr),
5513 5511 S_OTHER);
5514 5512 }
5515 5513 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5516 5514 segvn_pagelist_rele(plp);
5517 5515 if (pl_alloc_sz)
5518 5516 kmem_free(plp, pl_alloc_sz);
5519 5517 return (err);
5520 5518 }
5521 5519 if (vpage) {
5522 5520 vpage++;
5523 5521 } else if (svd->vpage) {
5524 5522 page = seg_page(seg, addr);
5525 5523 vpage = &svd->vpage[++page];
5526 5524 }
5527 5525 }
5528 5526
5529 5527 /* Didn't get pages from the underlying fs so we're done */
5530 5528 if (!dogetpage)
5531 5529 goto done;
5532 5530
5533 5531 /*
5534 5532 * Now handle any other pages in the list returned.
5535 5533 * If the page can be used, load up the translations now.
5536 5534 * Note that the for loop will only be entered if "plp"
5537 5535 * is pointing to a non-NULL page pointer which means that
5538 5536 * VOP_GETPAGE() was called and vpprot has been initialized.
5539 5537 */
5540 5538 if (svd->pageprot == 0)
5541 5539 prot = svd->prot & vpprot;
5542 5540
5543 5541
5544 5542 /*
5545 5543 * Large Files: diff should be unsigned value because we started
5546 5544 * supporting > 2GB segment sizes from 2.5.1 and when a
5547 5545 * large file of size > 2GB gets mapped to address space
5548 5546 * the diff value can be > 2GB.
5549 5547 */
5550 5548
5551 5549 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5552 5550 size_t diff;
5553 5551 struct anon *ap;
5554 5552 int anon_index;
5555 5553 anon_sync_obj_t cookie;
5556 5554 int hat_flag = HAT_LOAD_ADV;
5557 5555
5558 5556 if (svd->flags & MAP_TEXT) {
5559 5557 hat_flag |= HAT_LOAD_TEXT;
5560 5558 }
5561 5559
5562 5560 if (pp == PAGE_HANDLED)
5563 5561 continue;
5564 5562
5565 5563 if (svd->tr_state != SEGVN_TR_ON &&
5566 5564 pp->p_offset >= svd->offset &&
5567 5565 pp->p_offset < svd->offset + seg->s_size) {
5568 5566
5569 5567 diff = pp->p_offset - svd->offset;
5570 5568
5571 5569 /*
5572 5570 * Large Files: Following is the assertion
5573 5571 * validating the above cast.
5574 5572 */
5575 5573 ASSERT(svd->vp == pp->p_vnode);
5576 5574
5577 5575 page = btop(diff);
5578 5576 if (svd->pageprot)
5579 5577 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5580 5578
5581 5579 /*
5582 5580 * Prevent other threads in the address space from
5583 5581 * creating private pages (i.e., allocating anon slots)
5584 5582 * while we are in the process of loading translations
5585 5583 * to additional pages returned by the underlying
5586 5584 * object.
5587 5585 */
5588 5586 if (amp != NULL) {
5589 5587 anon_index = svd->anon_index + page;
5590 5588 anon_array_enter(amp, anon_index, &cookie);
5591 5589 ap = anon_get_ptr(amp->ahp, anon_index);
5592 5590 }
5593 5591 if ((amp == NULL) || (ap == NULL)) {
5594 5592 if (IS_VMODSORT(pp->p_vnode) ||
5595 5593 enable_mbit_wa) {
5596 5594 if (rw == S_WRITE)
5597 5595 hat_setmod(pp);
5598 5596 else if (rw != S_OTHER &&
5599 5597 !hat_ismod(pp))
5600 5598 prot &= ~PROT_WRITE;
5601 5599 }
5602 5600 /*
5603 5601 * Skip mapping read ahead pages marked
5604 5602 * for migration, so they will get migrated
5605 5603 * properly on fault
5606 5604 */
5607 5605 ASSERT(amp == NULL ||
5608 5606 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5609 5607 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5610 5608 hat_memload_region(hat,
5611 5609 seg->s_base + diff,
5612 5610 pp, prot, hat_flag,
5613 5611 svd->rcookie);
5614 5612 }
5615 5613 }
5616 5614 if (amp != NULL)
5617 5615 anon_array_exit(&cookie);
5618 5616 }
5619 5617 page_unlock(pp);
5620 5618 }
5621 5619 done:
5622 5620 if (amp != NULL)
5623 5621 ANON_LOCK_EXIT(&->a_rwlock);
5624 5622 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5625 5623 if (pl_alloc_sz)
5626 5624 kmem_free(plp, pl_alloc_sz);
5627 5625 return (0);
5628 5626 }
5629 5627
5630 5628 /*
5631 5629 * This routine is used to start I/O on pages asynchronously. XXX it will
5632 5630 * only create PAGESIZE pages. At fault time they will be relocated into
5633 5631 * larger pages.
5634 5632 */
5635 5633 static faultcode_t
5636 5634 segvn_faulta(struct seg *seg, caddr_t addr)
5637 5635 {
5638 5636 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5639 5637 int err;
5640 5638 struct anon_map *amp;
5641 5639 vnode_t *vp;
5642 5640
5643 5641 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5644 5642
5645 5643 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5646 5644 if ((amp = svd->amp) != NULL) {
5647 5645 struct anon *ap;
5648 5646
5649 5647 /*
5650 5648 * Reader lock to prevent amp->ahp from being changed.
5651 5649 * This is advisory, it's ok to miss a page, so
5652 5650 * we don't do anon_array_enter lock.
5653 5651 */
5654 5652 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5655 5653 if ((ap = anon_get_ptr(amp->ahp,
5656 5654 svd->anon_index + seg_page(seg, addr))) != NULL) {
5657 5655
5658 5656 err = anon_getpage(&ap, NULL, NULL,
5659 5657 0, seg, addr, S_READ, svd->cred);
5660 5658
5661 5659 ANON_LOCK_EXIT(&->a_rwlock);
5662 5660 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5663 5661 if (err)
5664 5662 return (FC_MAKE_ERR(err));
5665 5663 return (0);
5666 5664 }
5667 5665 ANON_LOCK_EXIT(&->a_rwlock);
5668 5666 }
5669 5667
5670 5668 if (svd->vp == NULL) {
5671 5669 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5672 5670 return (0); /* zfod page - do nothing now */
5673 5671 }
5674 5672
5675 5673 vp = svd->vp;
5676 5674 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5677 5675 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5678 5676 err = VOP_GETPAGE(vp,
5679 5677 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5680 5678 PAGESIZE, NULL, NULL, 0, seg, addr,
5681 5679 S_OTHER, svd->cred, NULL);
5682 5680
5683 5681 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5684 5682 if (err)
5685 5683 return (FC_MAKE_ERR(err));
5686 5684 return (0);
5687 5685 }
5688 5686
5689 5687 static int
5690 5688 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5691 5689 {
5692 5690 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5693 5691 struct vpage *cvp, *svp, *evp;
5694 5692 struct vnode *vp;
5695 5693 size_t pgsz;
5696 5694 pgcnt_t pgcnt;
5697 5695 anon_sync_obj_t cookie;
5698 5696 int unload_done = 0;
5699 5697
5700 5698 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5701 5699
5702 5700 if ((svd->maxprot & prot) != prot)
5703 5701 return (EACCES); /* violated maxprot */
5704 5702
5705 5703 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5706 5704
5707 5705 /* return if prot is the same */
5708 5706 if (!svd->pageprot && svd->prot == prot) {
5709 5707 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5710 5708 return (0);
5711 5709 }
5712 5710
5713 5711 /*
5714 5712 * Since we change protections we first have to flush the cache.
5715 5713 * This makes sure all the pagelock calls have to recheck
5716 5714 * protections.
5717 5715 */
5718 5716 if (svd->softlockcnt > 0) {
5719 5717 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5720 5718
5721 5719 /*
5722 5720 * If this is shared segment non 0 softlockcnt
5723 5721 * means locked pages are still in use.
5724 5722 */
5725 5723 if (svd->type == MAP_SHARED) {
5726 5724 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5727 5725 return (EAGAIN);
5728 5726 }
5729 5727
5730 5728 /*
5731 5729 * Since we do have the segvn writers lock nobody can fill
5732 5730 * the cache with entries belonging to this seg during
5733 5731 * the purge. The flush either succeeds or we still have
5734 5732 * pending I/Os.
5735 5733 */
5736 5734 segvn_purge(seg);
5737 5735 if (svd->softlockcnt > 0) {
5738 5736 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5739 5737 return (EAGAIN);
5740 5738 }
5741 5739 }
5742 5740
5743 5741 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5744 5742 ASSERT(svd->amp == NULL);
5745 5743 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5746 5744 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5747 5745 HAT_REGION_TEXT);
5748 5746 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5749 5747 unload_done = 1;
5750 5748 } else if (svd->tr_state == SEGVN_TR_INIT) {
5751 5749 svd->tr_state = SEGVN_TR_OFF;
5752 5750 } else if (svd->tr_state == SEGVN_TR_ON) {
5753 5751 ASSERT(svd->amp != NULL);
5754 5752 segvn_textunrepl(seg, 0);
5755 5753 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5756 5754 unload_done = 1;
5757 5755 }
5758 5756
5759 5757 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5760 5758 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5761 5759 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5762 5760 segvn_inval_trcache(svd->vp);
5763 5761 }
5764 5762 if (seg->s_szc != 0) {
5765 5763 int err;
5766 5764 pgsz = page_get_pagesize(seg->s_szc);
5767 5765 pgcnt = pgsz >> PAGESHIFT;
5768 5766 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5769 5767 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5770 5768 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5771 5769 ASSERT(seg->s_base != addr || seg->s_size != len);
5772 5770 /*
5773 5771 * If we are holding the as lock as a reader then
5774 5772 * we need to return IE_RETRY and let the as
5775 5773 * layer drop and re-acquire the lock as a writer.
5776 5774 */
5777 5775 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5778 5776 return (IE_RETRY);
5779 5777 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5780 5778 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5781 5779 err = segvn_demote_range(seg, addr, len,
5782 5780 SDR_END, 0);
5783 5781 } else {
5784 5782 uint_t szcvec = map_pgszcvec(seg->s_base,
5785 5783 pgsz, (uintptr_t)seg->s_base,
5786 5784 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5787 5785 err = segvn_demote_range(seg, addr, len,
5788 5786 SDR_END, szcvec);
5789 5787 }
5790 5788 if (err == 0)
5791 5789 return (IE_RETRY);
5792 5790 if (err == ENOMEM)
5793 5791 return (IE_NOMEM);
5794 5792 return (err);
5795 5793 }
5796 5794 }
5797 5795
5798 5796
5799 5797 /*
5800 5798 * If it's a private mapping and we're making it writable then we
5801 5799 * may have to reserve the additional swap space now. If we are
5802 5800 * making writable only a part of the segment then we use its vpage
5803 5801 * array to keep a record of the pages for which we have reserved
5804 5802 * swap. In this case we set the pageswap field in the segment's
5805 5803 * segvn structure to record this.
5806 5804 *
5807 5805 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5808 5806 * removing write permission on the entire segment and we haven't
5809 5807 * modified any pages, we can release the swap space.
5810 5808 */
5811 5809 if (svd->type == MAP_PRIVATE) {
5812 5810 if (prot & PROT_WRITE) {
5813 5811 if (!(svd->flags & MAP_NORESERVE) &&
5814 5812 !(svd->swresv && svd->pageswap == 0)) {
5815 5813 size_t sz = 0;
5816 5814
5817 5815 /*
5818 5816 * Start by determining how much swap
5819 5817 * space is required.
5820 5818 */
5821 5819 if (addr == seg->s_base &&
5822 5820 len == seg->s_size &&
5823 5821 svd->pageswap == 0) {
5824 5822 /* The whole segment */
5825 5823 sz = seg->s_size;
5826 5824 } else {
5827 5825 /*
5828 5826 * Make sure that the vpage array
5829 5827 * exists, and make a note of the
5830 5828 * range of elements corresponding
5831 5829 * to len.
5832 5830 */
5833 5831 segvn_vpage(seg);
5834 5832 if (svd->vpage == NULL) {
5835 5833 SEGVN_LOCK_EXIT(seg->s_as,
5836 5834 &svd->lock);
5837 5835 return (ENOMEM);
5838 5836 }
5839 5837 svp = &svd->vpage[seg_page(seg, addr)];
5840 5838 evp = &svd->vpage[seg_page(seg,
5841 5839 addr + len)];
5842 5840
5843 5841 if (svd->pageswap == 0) {
5844 5842 /*
5845 5843 * This is the first time we've
5846 5844 * asked for a part of this
5847 5845 * segment, so we need to
5848 5846 * reserve everything we've
5849 5847 * been asked for.
5850 5848 */
5851 5849 sz = len;
5852 5850 } else {
5853 5851 /*
5854 5852 * We have to count the number
5855 5853 * of pages required.
5856 5854 */
5857 5855 for (cvp = svp; cvp < evp;
5858 5856 cvp++) {
5859 5857 if (!VPP_ISSWAPRES(cvp))
5860 5858 sz++;
5861 5859 }
5862 5860 sz <<= PAGESHIFT;
5863 5861 }
5864 5862 }
5865 5863
5866 5864 /* Try to reserve the necessary swap. */
5867 5865 if (anon_resv_zone(sz,
5868 5866 seg->s_as->a_proc->p_zone) == 0) {
5869 5867 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5870 5868 return (IE_NOMEM);
5871 5869 }
5872 5870
5873 5871 /*
5874 5872 * Make a note of how much swap space
5875 5873 * we've reserved.
5876 5874 */
5877 5875 if (svd->pageswap == 0 && sz == seg->s_size) {
5878 5876 svd->swresv = sz;
5879 5877 } else {
5880 5878 ASSERT(svd->vpage != NULL);
5881 5879 svd->swresv += sz;
5882 5880 svd->pageswap = 1;
5883 5881 for (cvp = svp; cvp < evp; cvp++) {
5884 5882 if (!VPP_ISSWAPRES(cvp))
5885 5883 VPP_SETSWAPRES(cvp);
5886 5884 }
5887 5885 }
5888 5886 }
5889 5887 } else {
5890 5888 /*
5891 5889 * Swap space is released only if this segment
5892 5890 * does not map anonymous memory, since read faults
5893 5891 * on such segments still need an anon slot to read
5894 5892 * in the data.
5895 5893 */
5896 5894 if (svd->swresv != 0 && svd->vp != NULL &&
5897 5895 svd->amp == NULL && addr == seg->s_base &&
5898 5896 len == seg->s_size && svd->pageprot == 0) {
5899 5897 ASSERT(svd->pageswap == 0);
5900 5898 anon_unresv_zone(svd->swresv,
5901 5899 seg->s_as->a_proc->p_zone);
5902 5900 svd->swresv = 0;
5903 5901 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5904 5902 "anon proc:%p %lu %u", seg, 0, 0);
5905 5903 }
5906 5904 }
5907 5905 }
5908 5906
5909 5907 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5910 5908 if (svd->prot == prot) {
5911 5909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5912 5910 return (0); /* all done */
5913 5911 }
5914 5912 svd->prot = (uchar_t)prot;
5915 5913 } else if (svd->type == MAP_PRIVATE) {
5916 5914 struct anon *ap = NULL;
5917 5915 page_t *pp;
5918 5916 u_offset_t offset, off;
5919 5917 struct anon_map *amp;
5920 5918 ulong_t anon_idx = 0;
5921 5919
5922 5920 /*
5923 5921 * A vpage structure exists or else the change does not
5924 5922 * involve the entire segment. Establish a vpage structure
5925 5923 * if none is there. Then, for each page in the range,
5926 5924 * adjust its individual permissions. Note that write-
5927 5925 * enabling a MAP_PRIVATE page can affect the claims for
5928 5926 * locked down memory. Overcommitting memory terminates
5929 5927 * the operation.
5930 5928 */
5931 5929 segvn_vpage(seg);
5932 5930 if (svd->vpage == NULL) {
5933 5931 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5934 5932 return (ENOMEM);
5935 5933 }
5936 5934 svd->pageprot = 1;
5937 5935 if ((amp = svd->amp) != NULL) {
5938 5936 anon_idx = svd->anon_index + seg_page(seg, addr);
5939 5937 ASSERT(seg->s_szc == 0 ||
5940 5938 IS_P2ALIGNED(anon_idx, pgcnt));
5941 5939 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5942 5940 }
5943 5941
5944 5942 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5945 5943 evp = &svd->vpage[seg_page(seg, addr + len)];
5946 5944
5947 5945 /*
5948 5946 * See Statement at the beginning of segvn_lockop regarding
5949 5947 * the way cowcnts and lckcnts are handled.
5950 5948 */
5951 5949 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5952 5950
5953 5951 if (seg->s_szc != 0) {
5954 5952 if (amp != NULL) {
5955 5953 anon_array_enter(amp, anon_idx,
5956 5954 &cookie);
5957 5955 }
5958 5956 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5959 5957 !segvn_claim_pages(seg, svp, offset,
5960 5958 anon_idx, prot)) {
5961 5959 if (amp != NULL) {
5962 5960 anon_array_exit(&cookie);
5963 5961 }
5964 5962 break;
5965 5963 }
5966 5964 if (amp != NULL) {
5967 5965 anon_array_exit(&cookie);
5968 5966 }
5969 5967 anon_idx++;
5970 5968 } else {
5971 5969 if (amp != NULL) {
5972 5970 anon_array_enter(amp, anon_idx,
5973 5971 &cookie);
5974 5972 ap = anon_get_ptr(amp->ahp, anon_idx++);
5975 5973 }
5976 5974
5977 5975 if (VPP_ISPPLOCK(svp) &&
5978 5976 VPP_PROT(svp) != prot) {
5979 5977
5980 5978 if (amp == NULL || ap == NULL) {
5981 5979 vp = svd->vp;
5982 5980 off = offset;
5983 5981 } else
5984 5982 swap_xlate(ap, &vp, &off);
5985 5983 if (amp != NULL)
5986 5984 anon_array_exit(&cookie);
5987 5985
5988 5986 if ((pp = page_lookup(vp, off,
5989 5987 SE_SHARED)) == NULL) {
5990 5988 panic("segvn_setprot: no page");
5991 5989 /*NOTREACHED*/
5992 5990 }
5993 5991 ASSERT(seg->s_szc == 0);
5994 5992 if ((VPP_PROT(svp) ^ prot) &
5995 5993 PROT_WRITE) {
5996 5994 if (prot & PROT_WRITE) {
5997 5995 if (!page_addclaim(
5998 5996 pp)) {
5999 5997 page_unlock(pp);
6000 5998 break;
6001 5999 }
6002 6000 } else {
6003 6001 if (!page_subclaim(
6004 6002 pp)) {
6005 6003 page_unlock(pp);
6006 6004 break;
6007 6005 }
6008 6006 }
6009 6007 }
6010 6008 page_unlock(pp);
6011 6009 } else if (amp != NULL)
6012 6010 anon_array_exit(&cookie);
6013 6011 }
6014 6012 VPP_SETPROT(svp, prot);
6015 6013 offset += PAGESIZE;
6016 6014 }
6017 6015 if (amp != NULL)
6018 6016 ANON_LOCK_EXIT(&->a_rwlock);
6019 6017
6020 6018 /*
6021 6019 * Did we terminate prematurely? If so, simply unload
6022 6020 * the translations to the things we've updated so far.
6023 6021 */
6024 6022 if (svp != evp) {
6025 6023 if (unload_done) {
6026 6024 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6027 6025 return (IE_NOMEM);
6028 6026 }
6029 6027 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6030 6028 PAGESIZE;
6031 6029 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6032 6030 if (len != 0)
6033 6031 hat_unload(seg->s_as->a_hat, addr,
6034 6032 len, HAT_UNLOAD);
6035 6033 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6036 6034 return (IE_NOMEM);
6037 6035 }
6038 6036 } else {
6039 6037 segvn_vpage(seg);
6040 6038 if (svd->vpage == NULL) {
6041 6039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6042 6040 return (ENOMEM);
6043 6041 }
6044 6042 svd->pageprot = 1;
6045 6043 evp = &svd->vpage[seg_page(seg, addr + len)];
6046 6044 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6047 6045 VPP_SETPROT(svp, prot);
6048 6046 }
6049 6047 }
6050 6048
6051 6049 if (unload_done) {
6052 6050 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6053 6051 return (0);
6054 6052 }
6055 6053
6056 6054 if (((prot & PROT_WRITE) != 0 &&
6057 6055 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6058 6056 (prot & ~PROT_USER) == PROT_NONE) {
6059 6057 /*
6060 6058 * Either private or shared data with write access (in
6061 6059 * which case we need to throw out all former translations
6062 6060 * so that we get the right translations set up on fault
6063 6061 * and we don't allow write access to any copy-on-write pages
6064 6062 * that might be around or to prevent write access to pages
6065 6063 * representing holes in a file), or we don't have permission
6066 6064 * to access the memory at all (in which case we have to
6067 6065 * unload any current translations that might exist).
6068 6066 */
6069 6067 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6070 6068 } else {
6071 6069 /*
6072 6070 * A shared mapping or a private mapping in which write
6073 6071 * protection is going to be denied - just change all the
6074 6072 * protections over the range of addresses in question.
6075 6073 * segvn does not support any other attributes other
6076 6074 * than prot so we can use hat_chgattr.
6077 6075 */
6078 6076 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6079 6077 }
6080 6078
6081 6079 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6082 6080
6083 6081 return (0);
6084 6082 }
6085 6083
6086 6084 /*
6087 6085 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6088 6086 * to determine if the seg is capable of mapping the requested szc.
6089 6087 */
6090 6088 static int
6091 6089 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6092 6090 {
6093 6091 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6094 6092 struct segvn_data *nsvd;
6095 6093 struct anon_map *amp = svd->amp;
6096 6094 struct seg *nseg;
6097 6095 caddr_t eaddr = addr + len, a;
6098 6096 size_t pgsz = page_get_pagesize(szc);
6099 6097 pgcnt_t pgcnt = page_get_pagecnt(szc);
6100 6098 int err;
6101 6099 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6102 6100
6103 6101 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6104 6102 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6105 6103
6106 6104 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6107 6105 return (0);
6108 6106 }
6109 6107
6110 6108 /*
6111 6109 * addr should always be pgsz aligned but eaddr may be misaligned if
6112 6110 * it's at the end of the segment.
6113 6111 *
6114 6112 * XXX we should assert this condition since as_setpagesize() logic
6115 6113 * guarantees it.
6116 6114 */
6117 6115 if (!IS_P2ALIGNED(addr, pgsz) ||
6118 6116 (!IS_P2ALIGNED(eaddr, pgsz) &&
6119 6117 eaddr != seg->s_base + seg->s_size)) {
6120 6118
6121 6119 segvn_setpgsz_align_err++;
6122 6120 return (EINVAL);
6123 6121 }
6124 6122
6125 6123 if (amp != NULL && svd->type == MAP_SHARED) {
6126 6124 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6127 6125 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6128 6126
6129 6127 segvn_setpgsz_anon_align_err++;
6130 6128 return (EINVAL);
6131 6129 }
6132 6130 }
6133 6131
6134 6132 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6135 6133 szc > segvn_maxpgszc) {
6136 6134 return (EINVAL);
6137 6135 }
6138 6136
6139 6137 /* paranoid check */
6140 6138 if (svd->vp != NULL &&
6141 6139 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6142 6140 return (EINVAL);
6143 6141 }
6144 6142
6145 6143 if (seg->s_szc == 0 && svd->vp != NULL &&
6146 6144 map_addr_vacalign_check(addr, off)) {
6147 6145 return (EINVAL);
6148 6146 }
6149 6147
6150 6148 /*
6151 6149 * Check that protections are the same within new page
6152 6150 * size boundaries.
6153 6151 */
6154 6152 if (svd->pageprot) {
6155 6153 for (a = addr; a < eaddr; a += pgsz) {
6156 6154 if ((a + pgsz) > eaddr) {
6157 6155 if (!sameprot(seg, a, eaddr - a)) {
6158 6156 return (EINVAL);
6159 6157 }
6160 6158 } else {
6161 6159 if (!sameprot(seg, a, pgsz)) {
6162 6160 return (EINVAL);
6163 6161 }
6164 6162 }
6165 6163 }
6166 6164 }
6167 6165
6168 6166 /*
6169 6167 * Since we are changing page size we first have to flush
6170 6168 * the cache. This makes sure all the pagelock calls have
6171 6169 * to recheck protections.
6172 6170 */
6173 6171 if (svd->softlockcnt > 0) {
6174 6172 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6175 6173
6176 6174 /*
6177 6175 * If this is shared segment non 0 softlockcnt
6178 6176 * means locked pages are still in use.
6179 6177 */
6180 6178 if (svd->type == MAP_SHARED) {
6181 6179 return (EAGAIN);
6182 6180 }
6183 6181
6184 6182 /*
6185 6183 * Since we do have the segvn writers lock nobody can fill
6186 6184 * the cache with entries belonging to this seg during
6187 6185 * the purge. The flush either succeeds or we still have
6188 6186 * pending I/Os.
6189 6187 */
6190 6188 segvn_purge(seg);
6191 6189 if (svd->softlockcnt > 0) {
6192 6190 return (EAGAIN);
6193 6191 }
6194 6192 }
6195 6193
6196 6194 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6197 6195 ASSERT(svd->amp == NULL);
6198 6196 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6199 6197 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6200 6198 HAT_REGION_TEXT);
6201 6199 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6202 6200 } else if (svd->tr_state == SEGVN_TR_INIT) {
6203 6201 svd->tr_state = SEGVN_TR_OFF;
6204 6202 } else if (svd->tr_state == SEGVN_TR_ON) {
6205 6203 ASSERT(svd->amp != NULL);
6206 6204 segvn_textunrepl(seg, 1);
6207 6205 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6208 6206 amp = NULL;
6209 6207 }
6210 6208
6211 6209 /*
6212 6210 * Operation for sub range of existing segment.
6213 6211 */
6214 6212 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6215 6213 if (szc < seg->s_szc) {
6216 6214 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6217 6215 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6218 6216 if (err == 0) {
6219 6217 return (IE_RETRY);
6220 6218 }
6221 6219 if (err == ENOMEM) {
6222 6220 return (IE_NOMEM);
6223 6221 }
6224 6222 return (err);
6225 6223 }
6226 6224 if (addr != seg->s_base) {
6227 6225 nseg = segvn_split_seg(seg, addr);
6228 6226 if (eaddr != (nseg->s_base + nseg->s_size)) {
6229 6227 /* eaddr is szc aligned */
6230 6228 (void) segvn_split_seg(nseg, eaddr);
6231 6229 }
6232 6230 return (IE_RETRY);
6233 6231 }
6234 6232 if (eaddr != (seg->s_base + seg->s_size)) {
6235 6233 /* eaddr is szc aligned */
6236 6234 (void) segvn_split_seg(seg, eaddr);
6237 6235 }
6238 6236 return (IE_RETRY);
6239 6237 }
6240 6238
6241 6239 /*
6242 6240 * Break any low level sharing and reset seg->s_szc to 0.
6243 6241 */
6244 6242 if ((err = segvn_clrszc(seg)) != 0) {
6245 6243 if (err == ENOMEM) {
6246 6244 err = IE_NOMEM;
6247 6245 }
6248 6246 return (err);
6249 6247 }
6250 6248 ASSERT(seg->s_szc == 0);
6251 6249
6252 6250 /*
6253 6251 * If the end of the current segment is not pgsz aligned
6254 6252 * then attempt to concatenate with the next segment.
6255 6253 */
6256 6254 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6257 6255 nseg = AS_SEGNEXT(seg->s_as, seg);
6258 6256 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6259 6257 return (ENOMEM);
6260 6258 }
6261 6259 if (nseg->s_ops != &segvn_ops) {
6262 6260 return (EINVAL);
6263 6261 }
6264 6262 nsvd = (struct segvn_data *)nseg->s_data;
6265 6263 if (nsvd->softlockcnt > 0) {
6266 6264 /*
6267 6265 * If this is shared segment non 0 softlockcnt
6268 6266 * means locked pages are still in use.
6269 6267 */
6270 6268 if (nsvd->type == MAP_SHARED) {
6271 6269 return (EAGAIN);
6272 6270 }
6273 6271 segvn_purge(nseg);
6274 6272 if (nsvd->softlockcnt > 0) {
6275 6273 return (EAGAIN);
6276 6274 }
6277 6275 }
6278 6276 err = segvn_clrszc(nseg);
6279 6277 if (err == ENOMEM) {
6280 6278 err = IE_NOMEM;
6281 6279 }
6282 6280 if (err != 0) {
6283 6281 return (err);
6284 6282 }
6285 6283 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6286 6284 err = segvn_concat(seg, nseg, 1);
6287 6285 if (err == -1) {
6288 6286 return (EINVAL);
6289 6287 }
6290 6288 if (err == -2) {
6291 6289 return (IE_NOMEM);
6292 6290 }
6293 6291 return (IE_RETRY);
6294 6292 }
6295 6293
6296 6294 /*
6297 6295 * May need to re-align anon array to
6298 6296 * new szc.
6299 6297 */
6300 6298 if (amp != NULL) {
6301 6299 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6302 6300 struct anon_hdr *nahp;
6303 6301
6304 6302 ASSERT(svd->type == MAP_PRIVATE);
6305 6303
6306 6304 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6307 6305 ASSERT(amp->refcnt == 1);
6308 6306 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6309 6307 if (nahp == NULL) {
6310 6308 ANON_LOCK_EXIT(&->a_rwlock);
6311 6309 return (IE_NOMEM);
6312 6310 }
6313 6311 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6314 6312 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6315 6313 anon_release(nahp, btop(amp->size));
6316 6314 ANON_LOCK_EXIT(&->a_rwlock);
6317 6315 return (IE_NOMEM);
6318 6316 }
6319 6317 anon_release(amp->ahp, btop(amp->size));
6320 6318 amp->ahp = nahp;
6321 6319 svd->anon_index = 0;
6322 6320 ANON_LOCK_EXIT(&->a_rwlock);
6323 6321 }
6324 6322 }
6325 6323 if (svd->vp != NULL && szc != 0) {
6326 6324 struct vattr va;
6327 6325 u_offset_t eoffpage = svd->offset;
6328 6326 va.va_mask = AT_SIZE;
6329 6327 eoffpage += seg->s_size;
6330 6328 eoffpage = btopr(eoffpage);
6331 6329 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6332 6330 segvn_setpgsz_getattr_err++;
6333 6331 return (EINVAL);
6334 6332 }
6335 6333 if (btopr(va.va_size) < eoffpage) {
6336 6334 segvn_setpgsz_eof_err++;
6337 6335 return (EINVAL);
6338 6336 }
6339 6337 if (amp != NULL) {
6340 6338 /*
6341 6339 * anon_fill_cow_holes() may call VOP_GETPAGE().
6342 6340 * don't take anon map lock here to avoid holding it
6343 6341 * across VOP_GETPAGE() calls that may call back into
6344 6342 * segvn for klsutering checks. We don't really need
6345 6343 * anon map lock here since it's a private segment and
6346 6344 * we hold as level lock as writers.
6347 6345 */
6348 6346 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6349 6347 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6350 6348 seg->s_size, szc, svd->prot, svd->vpage,
6351 6349 svd->cred)) != 0) {
6352 6350 return (EINVAL);
6353 6351 }
6354 6352 }
6355 6353 segvn_setvnode_mpss(svd->vp);
6356 6354 }
6357 6355
6358 6356 if (amp != NULL) {
6359 6357 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6360 6358 if (svd->type == MAP_PRIVATE) {
6361 6359 amp->a_szc = szc;
6362 6360 } else if (szc > amp->a_szc) {
6363 6361 amp->a_szc = szc;
6364 6362 }
6365 6363 ANON_LOCK_EXIT(&->a_rwlock);
6366 6364 }
6367 6365
6368 6366 seg->s_szc = szc;
6369 6367
6370 6368 return (0);
6371 6369 }
6372 6370
6373 6371 static int
6374 6372 segvn_clrszc(struct seg *seg)
6375 6373 {
6376 6374 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6377 6375 struct anon_map *amp = svd->amp;
6378 6376 size_t pgsz;
6379 6377 pgcnt_t pages;
6380 6378 int err = 0;
6381 6379 caddr_t a = seg->s_base;
6382 6380 caddr_t ea = a + seg->s_size;
6383 6381 ulong_t an_idx = svd->anon_index;
6384 6382 vnode_t *vp = svd->vp;
6385 6383 struct vpage *vpage = svd->vpage;
6386 6384 page_t *anon_pl[1 + 1], *pp;
6387 6385 struct anon *ap, *oldap;
6388 6386 uint_t prot = svd->prot, vpprot;
6389 6387 int pageflag = 0;
6390 6388
6391 6389 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6392 6390 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6393 6391 ASSERT(svd->softlockcnt == 0);
6394 6392
6395 6393 if (vp == NULL && amp == NULL) {
6396 6394 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6397 6395 seg->s_szc = 0;
6398 6396 return (0);
6399 6397 }
6400 6398
6401 6399 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6402 6400 ASSERT(svd->amp == NULL);
6403 6401 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6404 6402 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6405 6403 HAT_REGION_TEXT);
6406 6404 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6407 6405 } else if (svd->tr_state == SEGVN_TR_ON) {
6408 6406 ASSERT(svd->amp != NULL);
6409 6407 segvn_textunrepl(seg, 1);
6410 6408 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6411 6409 amp = NULL;
6412 6410 } else {
6413 6411 if (svd->tr_state != SEGVN_TR_OFF) {
6414 6412 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6415 6413 svd->tr_state = SEGVN_TR_OFF;
6416 6414 }
6417 6415
6418 6416 /*
6419 6417 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6420 6418 * unload argument is 0 when we are freeing the segment
6421 6419 * and unload was already done.
6422 6420 */
6423 6421 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6424 6422 HAT_UNLOAD_UNMAP);
6425 6423 }
6426 6424
6427 6425 if (amp == NULL || svd->type == MAP_SHARED) {
6428 6426 seg->s_szc = 0;
6429 6427 return (0);
6430 6428 }
6431 6429
6432 6430 pgsz = page_get_pagesize(seg->s_szc);
6433 6431 pages = btop(pgsz);
6434 6432
6435 6433 /*
6436 6434 * XXX anon rwlock is not really needed because this is a
6437 6435 * private segment and we are writers.
6438 6436 */
6439 6437 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6440 6438
6441 6439 for (; a < ea; a += pgsz, an_idx += pages) {
6442 6440 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6443 6441 ASSERT(vpage != NULL || svd->pageprot == 0);
6444 6442 if (vpage != NULL) {
6445 6443 ASSERT(sameprot(seg, a, pgsz));
6446 6444 prot = VPP_PROT(vpage);
6447 6445 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6448 6446 }
6449 6447 if (seg->s_szc != 0) {
6450 6448 ASSERT(vp == NULL || anon_pages(amp->ahp,
6451 6449 an_idx, pages) == pages);
6452 6450 if ((err = anon_map_demotepages(amp, an_idx,
6453 6451 seg, a, prot, vpage, svd->cred)) != 0) {
6454 6452 goto out;
6455 6453 }
6456 6454 } else {
6457 6455 if (oldap->an_refcnt == 1) {
6458 6456 continue;
6459 6457 }
6460 6458 if ((err = anon_getpage(&oldap, &vpprot,
6461 6459 anon_pl, PAGESIZE, seg, a, S_READ,
6462 6460 svd->cred))) {
6463 6461 goto out;
6464 6462 }
6465 6463 if ((pp = anon_private(&ap, seg, a, prot,
6466 6464 anon_pl[0], pageflag, svd->cred)) == NULL) {
6467 6465 err = ENOMEM;
6468 6466 goto out;
6469 6467 }
6470 6468 anon_decref(oldap);
6471 6469 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6472 6470 ANON_SLEEP);
6473 6471 page_unlock(pp);
6474 6472 }
6475 6473 }
6476 6474 vpage = (vpage == NULL) ? NULL : vpage + pages;
6477 6475 }
6478 6476
6479 6477 amp->a_szc = 0;
6480 6478 seg->s_szc = 0;
6481 6479 out:
6482 6480 ANON_LOCK_EXIT(&->a_rwlock);
6483 6481 return (err);
6484 6482 }
6485 6483
6486 6484 static int
6487 6485 segvn_claim_pages(
6488 6486 struct seg *seg,
6489 6487 struct vpage *svp,
6490 6488 u_offset_t off,
6491 6489 ulong_t anon_idx,
6492 6490 uint_t prot)
6493 6491 {
6494 6492 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6495 6493 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6496 6494 page_t **ppa;
6497 6495 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6498 6496 struct anon_map *amp = svd->amp;
6499 6497 struct vpage *evp = svp + pgcnt;
6500 6498 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6501 6499 + seg->s_base;
6502 6500 struct anon *ap;
6503 6501 struct vnode *vp = svd->vp;
6504 6502 page_t *pp;
6505 6503 pgcnt_t pg_idx, i;
6506 6504 int err = 0;
6507 6505 anoff_t aoff;
6508 6506 int anon = (amp != NULL) ? 1 : 0;
6509 6507
6510 6508 ASSERT(svd->type == MAP_PRIVATE);
6511 6509 ASSERT(svd->vpage != NULL);
6512 6510 ASSERT(seg->s_szc != 0);
6513 6511 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6514 6512 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6515 6513 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6516 6514
6517 6515 if (VPP_PROT(svp) == prot)
6518 6516 return (1);
6519 6517 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6520 6518 return (1);
6521 6519
6522 6520 ppa = kmem_alloc(ppasize, KM_SLEEP);
6523 6521 if (anon && vp != NULL) {
6524 6522 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6525 6523 anon = 0;
6526 6524 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6527 6525 }
6528 6526 ASSERT(!anon ||
6529 6527 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6530 6528 }
6531 6529
6532 6530 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6533 6531 if (!VPP_ISPPLOCK(svp))
6534 6532 continue;
6535 6533 if (anon) {
6536 6534 ap = anon_get_ptr(amp->ahp, anon_idx);
6537 6535 if (ap == NULL) {
6538 6536 panic("segvn_claim_pages: no anon slot");
6539 6537 }
6540 6538 swap_xlate(ap, &vp, &aoff);
6541 6539 off = (u_offset_t)aoff;
6542 6540 }
6543 6541 ASSERT(vp != NULL);
6544 6542 if ((pp = page_lookup(vp,
6545 6543 (u_offset_t)off, SE_SHARED)) == NULL) {
6546 6544 panic("segvn_claim_pages: no page");
6547 6545 }
6548 6546 ppa[pg_idx++] = pp;
6549 6547 off += PAGESIZE;
6550 6548 }
6551 6549
6552 6550 if (ppa[0] == NULL) {
6553 6551 kmem_free(ppa, ppasize);
6554 6552 return (1);
6555 6553 }
6556 6554
6557 6555 ASSERT(pg_idx <= pgcnt);
6558 6556 ppa[pg_idx] = NULL;
6559 6557
6560 6558
6561 6559 /* Find each large page within ppa, and adjust its claim */
6562 6560
6563 6561 /* Does ppa cover a single large page? */
6564 6562 if (ppa[0]->p_szc == seg->s_szc) {
6565 6563 if (prot & PROT_WRITE)
6566 6564 err = page_addclaim_pages(ppa);
6567 6565 else
6568 6566 err = page_subclaim_pages(ppa);
6569 6567 } else {
6570 6568 for (i = 0; ppa[i]; i += pgcnt) {
6571 6569 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6572 6570 if (prot & PROT_WRITE)
6573 6571 err = page_addclaim_pages(&ppa[i]);
6574 6572 else
6575 6573 err = page_subclaim_pages(&ppa[i]);
6576 6574 if (err == 0)
6577 6575 break;
6578 6576 }
6579 6577 }
6580 6578
6581 6579 for (i = 0; i < pg_idx; i++) {
6582 6580 ASSERT(ppa[i] != NULL);
6583 6581 page_unlock(ppa[i]);
6584 6582 }
6585 6583
6586 6584 kmem_free(ppa, ppasize);
6587 6585 return (err);
6588 6586 }
6589 6587
6590 6588 /*
6591 6589 * Returns right (upper address) segment if split occurred.
6592 6590 * If the address is equal to the beginning or end of its segment it returns
6593 6591 * the current segment.
6594 6592 */
6595 6593 static struct seg *
6596 6594 segvn_split_seg(struct seg *seg, caddr_t addr)
6597 6595 {
6598 6596 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6599 6597 struct seg *nseg;
6600 6598 size_t nsize;
6601 6599 struct segvn_data *nsvd;
6602 6600
6603 6601 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6604 6602 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6605 6603
6606 6604 ASSERT(addr >= seg->s_base);
6607 6605 ASSERT(addr <= seg->s_base + seg->s_size);
6608 6606 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6609 6607
6610 6608 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6611 6609 return (seg);
6612 6610
6613 6611 nsize = seg->s_base + seg->s_size - addr;
6614 6612 seg->s_size = addr - seg->s_base;
6615 6613 nseg = seg_alloc(seg->s_as, addr, nsize);
6616 6614 ASSERT(nseg != NULL);
6617 6615 nseg->s_ops = seg->s_ops;
6618 6616 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6619 6617 nseg->s_data = (void *)nsvd;
6620 6618 nseg->s_szc = seg->s_szc;
6621 6619 *nsvd = *svd;
6622 6620 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6623 6621 nsvd->seg = nseg;
6624 6622 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6625 6623
6626 6624 if (nsvd->vp != NULL) {
6627 6625 VN_HOLD(nsvd->vp);
6628 6626 nsvd->offset = svd->offset +
6629 6627 (uintptr_t)(nseg->s_base - seg->s_base);
6630 6628 if (nsvd->type == MAP_SHARED)
6631 6629 lgrp_shm_policy_init(NULL, nsvd->vp);
6632 6630 } else {
6633 6631 /*
6634 6632 * The offset for an anonymous segment has no signifigance in
6635 6633 * terms of an offset into a file. If we were to use the above
6636 6634 * calculation instead, the structures read out of
6637 6635 * /proc/<pid>/xmap would be more difficult to decipher since
6638 6636 * it would be unclear whether two seemingly contiguous
6639 6637 * prxmap_t structures represented different segments or a
6640 6638 * single segment that had been split up into multiple prxmap_t
6641 6639 * structures (e.g. if some part of the segment had not yet
6642 6640 * been faulted in).
6643 6641 */
6644 6642 nsvd->offset = 0;
6645 6643 }
6646 6644
6647 6645 ASSERT(svd->softlockcnt == 0);
6648 6646 ASSERT(svd->softlockcnt_sbase == 0);
6649 6647 ASSERT(svd->softlockcnt_send == 0);
6650 6648 crhold(svd->cred);
6651 6649
6652 6650 if (svd->vpage != NULL) {
6653 6651 size_t bytes = vpgtob(seg_pages(seg));
6654 6652 size_t nbytes = vpgtob(seg_pages(nseg));
6655 6653 struct vpage *ovpage = svd->vpage;
6656 6654
6657 6655 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6658 6656 bcopy(ovpage, svd->vpage, bytes);
6659 6657 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6660 6658 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6661 6659 kmem_free(ovpage, bytes + nbytes);
6662 6660 }
6663 6661 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6664 6662 struct anon_map *oamp = svd->amp, *namp;
6665 6663 struct anon_hdr *nahp;
6666 6664
6667 6665 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6668 6666 ASSERT(oamp->refcnt == 1);
6669 6667 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6670 6668 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6671 6669 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6672 6670
6673 6671 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6674 6672 namp->a_szc = nseg->s_szc;
6675 6673 (void) anon_copy_ptr(oamp->ahp,
6676 6674 svd->anon_index + btop(seg->s_size),
6677 6675 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6678 6676 anon_release(oamp->ahp, btop(oamp->size));
6679 6677 oamp->ahp = nahp;
6680 6678 oamp->size = seg->s_size;
6681 6679 svd->anon_index = 0;
6682 6680 nsvd->amp = namp;
6683 6681 nsvd->anon_index = 0;
6684 6682 ANON_LOCK_EXIT(&oamp->a_rwlock);
6685 6683 } else if (svd->amp != NULL) {
6686 6684 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6687 6685 ASSERT(svd->amp == nsvd->amp);
6688 6686 ASSERT(seg->s_szc <= svd->amp->a_szc);
6689 6687 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6690 6688 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6691 6689 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6692 6690 svd->amp->refcnt++;
6693 6691 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6694 6692 }
6695 6693
6696 6694 /*
6697 6695 * Split the amount of swap reserved.
6698 6696 */
6699 6697 if (svd->swresv) {
6700 6698 /*
6701 6699 * For MAP_NORESERVE, only allocate swap reserve for pages
6702 6700 * being used. Other segments get enough to cover whole
6703 6701 * segment.
6704 6702 */
6705 6703 if (svd->flags & MAP_NORESERVE) {
6706 6704 size_t oswresv;
6707 6705
6708 6706 ASSERT(svd->amp);
6709 6707 oswresv = svd->swresv;
6710 6708 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6711 6709 svd->anon_index, btop(seg->s_size)));
6712 6710 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6713 6711 nsvd->anon_index, btop(nseg->s_size)));
6714 6712 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6715 6713 } else {
6716 6714 if (svd->pageswap) {
6717 6715 svd->swresv = segvn_count_swap_by_vpages(seg);
6718 6716 ASSERT(nsvd->swresv >= svd->swresv);
6719 6717 nsvd->swresv -= svd->swresv;
6720 6718 } else {
6721 6719 ASSERT(svd->swresv == seg->s_size +
6722 6720 nseg->s_size);
6723 6721 svd->swresv = seg->s_size;
6724 6722 nsvd->swresv = nseg->s_size;
6725 6723 }
6726 6724 }
6727 6725 }
6728 6726
6729 6727 return (nseg);
6730 6728 }
6731 6729
6732 6730 /*
6733 6731 * called on memory operations (unmap, setprot, setpagesize) for a subset
6734 6732 * of a large page segment to either demote the memory range (SDR_RANGE)
6735 6733 * or the ends (SDR_END) by addr/len.
6736 6734 *
6737 6735 * returns 0 on success. returns errno, including ENOMEM, on failure.
6738 6736 */
6739 6737 static int
6740 6738 segvn_demote_range(
6741 6739 struct seg *seg,
6742 6740 caddr_t addr,
6743 6741 size_t len,
6744 6742 int flag,
6745 6743 uint_t szcvec)
6746 6744 {
6747 6745 caddr_t eaddr = addr + len;
6748 6746 caddr_t lpgaddr, lpgeaddr;
6749 6747 struct seg *nseg;
6750 6748 struct seg *badseg1 = NULL;
6751 6749 struct seg *badseg2 = NULL;
6752 6750 size_t pgsz;
6753 6751 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6754 6752 int err;
6755 6753 uint_t szc = seg->s_szc;
6756 6754 uint_t tszcvec;
6757 6755
6758 6756 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6759 6757 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6760 6758 ASSERT(szc != 0);
6761 6759 pgsz = page_get_pagesize(szc);
6762 6760 ASSERT(seg->s_base != addr || seg->s_size != len);
6763 6761 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6764 6762 ASSERT(svd->softlockcnt == 0);
6765 6763 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6766 6764 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6767 6765
6768 6766 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6769 6767 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6770 6768 if (flag == SDR_RANGE) {
6771 6769 /* demote entire range */
6772 6770 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6773 6771 (void) segvn_split_seg(nseg, lpgeaddr);
6774 6772 ASSERT(badseg1->s_base == lpgaddr);
6775 6773 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6776 6774 } else if (addr != lpgaddr) {
6777 6775 ASSERT(flag == SDR_END);
6778 6776 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6779 6777 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6780 6778 eaddr < lpgaddr + 2 * pgsz) {
6781 6779 (void) segvn_split_seg(nseg, lpgeaddr);
6782 6780 ASSERT(badseg1->s_base == lpgaddr);
6783 6781 ASSERT(badseg1->s_size == 2 * pgsz);
6784 6782 } else {
6785 6783 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6786 6784 ASSERT(badseg1->s_base == lpgaddr);
6787 6785 ASSERT(badseg1->s_size == pgsz);
6788 6786 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6789 6787 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6790 6788 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6791 6789 badseg2 = nseg;
6792 6790 (void) segvn_split_seg(nseg, lpgeaddr);
6793 6791 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6794 6792 ASSERT(badseg2->s_size == pgsz);
6795 6793 }
6796 6794 }
6797 6795 } else {
6798 6796 ASSERT(flag == SDR_END);
6799 6797 ASSERT(eaddr < lpgeaddr);
6800 6798 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6801 6799 (void) segvn_split_seg(nseg, lpgeaddr);
6802 6800 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6803 6801 ASSERT(badseg1->s_size == pgsz);
6804 6802 }
6805 6803
6806 6804 ASSERT(badseg1 != NULL);
6807 6805 ASSERT(badseg1->s_szc == szc);
6808 6806 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6809 6807 badseg1->s_size == 2 * pgsz);
6810 6808 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6811 6809 ASSERT(badseg1->s_size == pgsz ||
6812 6810 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6813 6811 if (err = segvn_clrszc(badseg1)) {
6814 6812 return (err);
6815 6813 }
6816 6814 ASSERT(badseg1->s_szc == 0);
6817 6815
6818 6816 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6819 6817 uint_t tszc = highbit(tszcvec) - 1;
6820 6818 caddr_t ta = MAX(addr, badseg1->s_base);
6821 6819 caddr_t te;
6822 6820 size_t tpgsz = page_get_pagesize(tszc);
6823 6821
6824 6822 ASSERT(svd->type == MAP_SHARED);
6825 6823 ASSERT(flag == SDR_END);
6826 6824 ASSERT(tszc < szc && tszc > 0);
6827 6825
6828 6826 if (eaddr > badseg1->s_base + badseg1->s_size) {
6829 6827 te = badseg1->s_base + badseg1->s_size;
6830 6828 } else {
6831 6829 te = eaddr;
6832 6830 }
6833 6831
6834 6832 ASSERT(ta <= te);
6835 6833 badseg1->s_szc = tszc;
6836 6834 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6837 6835 if (badseg2 != NULL) {
6838 6836 err = segvn_demote_range(badseg1, ta, te - ta,
6839 6837 SDR_END, tszcvec);
6840 6838 if (err != 0) {
6841 6839 return (err);
6842 6840 }
6843 6841 } else {
6844 6842 return (segvn_demote_range(badseg1, ta,
6845 6843 te - ta, SDR_END, tszcvec));
6846 6844 }
6847 6845 }
6848 6846 }
6849 6847
6850 6848 if (badseg2 == NULL)
6851 6849 return (0);
6852 6850 ASSERT(badseg2->s_szc == szc);
6853 6851 ASSERT(badseg2->s_size == pgsz);
6854 6852 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6855 6853 if (err = segvn_clrszc(badseg2)) {
6856 6854 return (err);
6857 6855 }
6858 6856 ASSERT(badseg2->s_szc == 0);
6859 6857
6860 6858 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6861 6859 uint_t tszc = highbit(tszcvec) - 1;
6862 6860 size_t tpgsz = page_get_pagesize(tszc);
6863 6861
6864 6862 ASSERT(svd->type == MAP_SHARED);
6865 6863 ASSERT(flag == SDR_END);
6866 6864 ASSERT(tszc < szc && tszc > 0);
6867 6865 ASSERT(badseg2->s_base > addr);
6868 6866 ASSERT(eaddr > badseg2->s_base);
6869 6867 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6870 6868
6871 6869 badseg2->s_szc = tszc;
6872 6870 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6873 6871 return (segvn_demote_range(badseg2, badseg2->s_base,
6874 6872 eaddr - badseg2->s_base, SDR_END, tszcvec));
6875 6873 }
6876 6874 }
6877 6875
6878 6876 return (0);
6879 6877 }
6880 6878
6881 6879 static int
6882 6880 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6883 6881 {
6884 6882 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6885 6883 struct vpage *vp, *evp;
6886 6884
6887 6885 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6888 6886
6889 6887 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6890 6888 /*
6891 6889 * If segment protection can be used, simply check against them.
6892 6890 */
6893 6891 if (svd->pageprot == 0) {
6894 6892 int err;
6895 6893
6896 6894 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6897 6895 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6898 6896 return (err);
6899 6897 }
6900 6898
6901 6899 /*
6902 6900 * Have to check down to the vpage level.
6903 6901 */
6904 6902 evp = &svd->vpage[seg_page(seg, addr + len)];
6905 6903 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6906 6904 if ((VPP_PROT(vp) & prot) != prot) {
6907 6905 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6908 6906 return (EACCES);
6909 6907 }
6910 6908 }
6911 6909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6912 6910 return (0);
6913 6911 }
6914 6912
6915 6913 static int
6916 6914 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6917 6915 {
6918 6916 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6919 6917 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6920 6918
6921 6919 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6922 6920
6923 6921 if (pgno != 0) {
6924 6922 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6925 6923 if (svd->pageprot == 0) {
6926 6924 do {
6927 6925 protv[--pgno] = svd->prot;
6928 6926 } while (pgno != 0);
6929 6927 } else {
6930 6928 size_t pgoff = seg_page(seg, addr);
6931 6929
6932 6930 do {
6933 6931 pgno--;
6934 6932 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6935 6933 } while (pgno != 0);
6936 6934 }
6937 6935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6938 6936 }
6939 6937 return (0);
6940 6938 }
6941 6939
6942 6940 static u_offset_t
6943 6941 segvn_getoffset(struct seg *seg, caddr_t addr)
6944 6942 {
6945 6943 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6946 6944
6947 6945 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6948 6946
6949 6947 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6950 6948 }
6951 6949
6952 6950 /*ARGSUSED*/
6953 6951 static int
6954 6952 segvn_gettype(struct seg *seg, caddr_t addr)
6955 6953 {
6956 6954 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6957 6955
6958 6956 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6959 6957
6960 6958 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6961 6959 MAP_INITDATA)));
6962 6960 }
6963 6961
6964 6962 /*ARGSUSED*/
6965 6963 static int
6966 6964 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6967 6965 {
6968 6966 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6969 6967
6970 6968 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6971 6969
6972 6970 *vpp = svd->vp;
6973 6971 return (0);
6974 6972 }
6975 6973
6976 6974 /*
6977 6975 * Check to see if it makes sense to do kluster/read ahead to
6978 6976 * addr + delta relative to the mapping at addr. We assume here
6979 6977 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6980 6978 *
6981 6979 * For segvn, we currently "approve" of the action if we are
6982 6980 * still in the segment and it maps from the same vp/off,
6983 6981 * or if the advice stored in segvn_data or vpages allows it.
6984 6982 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6985 6983 */
6986 6984 static int
6987 6985 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6988 6986 {
6989 6987 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6990 6988 struct anon *oap, *ap;
6991 6989 ssize_t pd;
6992 6990 size_t page;
6993 6991 struct vnode *vp1, *vp2;
6994 6992 u_offset_t off1, off2;
6995 6993 struct anon_map *amp;
6996 6994
6997 6995 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6998 6996 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6999 6997 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
7000 6998
7001 6999 if (addr + delta < seg->s_base ||
7002 7000 addr + delta >= (seg->s_base + seg->s_size))
7003 7001 return (-1); /* exceeded segment bounds */
7004 7002
7005 7003 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
7006 7004 page = seg_page(seg, addr);
7007 7005
7008 7006 /*
7009 7007 * Check to see if either of the pages addr or addr + delta
7010 7008 * have advice set that prevents klustering (if MADV_RANDOM advice
7011 7009 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
7012 7010 * is negative).
7013 7011 */
7014 7012 if (svd->advice == MADV_RANDOM ||
7015 7013 svd->advice == MADV_SEQUENTIAL && delta < 0)
7016 7014 return (-1);
7017 7015 else if (svd->pageadvice && svd->vpage) {
7018 7016 struct vpage *bvpp, *evpp;
7019 7017
7020 7018 bvpp = &svd->vpage[page];
7021 7019 evpp = &svd->vpage[page + pd];
7022 7020 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
7023 7021 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
7024 7022 return (-1);
7025 7023 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7026 7024 VPP_ADVICE(evpp) == MADV_RANDOM)
7027 7025 return (-1);
7028 7026 }
7029 7027
7030 7028 if (svd->type == MAP_SHARED)
7031 7029 return (0); /* shared mapping - all ok */
7032 7030
7033 7031 if ((amp = svd->amp) == NULL)
7034 7032 return (0); /* off original vnode */
7035 7033
7036 7034 page += svd->anon_index;
7037 7035
7038 7036 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7039 7037
7040 7038 oap = anon_get_ptr(amp->ahp, page);
7041 7039 ap = anon_get_ptr(amp->ahp, page + pd);
7042 7040
7043 7041 ANON_LOCK_EXIT(&->a_rwlock);
7044 7042
7045 7043 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7046 7044 return (-1); /* one with and one without an anon */
7047 7045 }
7048 7046
7049 7047 if (oap == NULL) { /* implies that ap == NULL */
7050 7048 return (0); /* off original vnode */
7051 7049 }
7052 7050
7053 7051 /*
7054 7052 * Now we know we have two anon pointers - check to
7055 7053 * see if they happen to be properly allocated.
7056 7054 */
7057 7055
7058 7056 /*
7059 7057 * XXX We cheat here and don't lock the anon slots. We can't because
7060 7058 * we may have been called from the anon layer which might already
7061 7059 * have locked them. We are holding a refcnt on the slots so they
7062 7060 * can't disappear. The worst that will happen is we'll get the wrong
7063 7061 * names (vp, off) for the slots and make a poor klustering decision.
7064 7062 */
7065 7063 swap_xlate(ap, &vp1, &off1);
7066 7064 swap_xlate(oap, &vp2, &off2);
7067 7065
7068 7066
7069 7067 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7070 7068 return (-1);
7071 7069 return (0);
7072 7070 }
7073 7071
7074 7072 /*
7075 7073 * Swap the pages of seg out to secondary storage, returning the
7076 7074 * number of bytes of storage freed.
7077 7075 *
7078 7076 * The basic idea is first to unload all translations and then to call
7079 7077 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7080 7078 * swap device. Pages to which other segments have mappings will remain
7081 7079 * mapped and won't be swapped. Our caller (as_swapout) has already
7082 7080 * performed the unloading step.
7083 7081 *
7084 7082 * The value returned is intended to correlate well with the process's
7085 7083 * memory requirements. However, there are some caveats:
7086 7084 * 1) When given a shared segment as argument, this routine will
7087 7085 * only succeed in swapping out pages for the last sharer of the
7088 7086 * segment. (Previous callers will only have decremented mapping
7089 7087 * reference counts.)
7090 7088 * 2) We assume that the hat layer maintains a large enough translation
7091 7089 * cache to capture process reference patterns.
7092 7090 */
7093 7091 static size_t
7094 7092 segvn_swapout(struct seg *seg)
7095 7093 {
7096 7094 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7097 7095 struct anon_map *amp;
7098 7096 pgcnt_t pgcnt = 0;
7099 7097 pgcnt_t npages;
7100 7098 pgcnt_t page;
7101 7099 ulong_t anon_index;
7102 7100
7103 7101 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7104 7102
7105 7103 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7106 7104 /*
7107 7105 * Find pages unmapped by our caller and force them
7108 7106 * out to the virtual swap device.
7109 7107 */
7110 7108 if ((amp = svd->amp) != NULL)
7111 7109 anon_index = svd->anon_index;
7112 7110 npages = seg->s_size >> PAGESHIFT;
7113 7111 for (page = 0; page < npages; page++) {
7114 7112 page_t *pp;
7115 7113 struct anon *ap;
7116 7114 struct vnode *vp;
7117 7115 u_offset_t off;
7118 7116 anon_sync_obj_t cookie;
7119 7117
7120 7118 /*
7121 7119 * Obtain <vp, off> pair for the page, then look it up.
7122 7120 *
7123 7121 * Note that this code is willing to consider regular
7124 7122 * pages as well as anon pages. Is this appropriate here?
7125 7123 */
7126 7124 ap = NULL;
7127 7125 if (amp != NULL) {
7128 7126 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7129 7127 if (anon_array_try_enter(amp, anon_index + page,
7130 7128 &cookie)) {
7131 7129 ANON_LOCK_EXIT(&->a_rwlock);
7132 7130 continue;
7133 7131 }
7134 7132 ap = anon_get_ptr(amp->ahp, anon_index + page);
7135 7133 if (ap != NULL) {
7136 7134 swap_xlate(ap, &vp, &off);
7137 7135 } else {
7138 7136 vp = svd->vp;
7139 7137 off = svd->offset + ptob(page);
7140 7138 }
7141 7139 anon_array_exit(&cookie);
7142 7140 ANON_LOCK_EXIT(&->a_rwlock);
7143 7141 } else {
7144 7142 vp = svd->vp;
7145 7143 off = svd->offset + ptob(page);
7146 7144 }
7147 7145 if (vp == NULL) { /* untouched zfod page */
7148 7146 ASSERT(ap == NULL);
7149 7147 continue;
7150 7148 }
7151 7149
7152 7150 pp = page_lookup_nowait(vp, off, SE_SHARED);
7153 7151 if (pp == NULL)
7154 7152 continue;
7155 7153
7156 7154
7157 7155 /*
7158 7156 * Examine the page to see whether it can be tossed out,
7159 7157 * keeping track of how many we've found.
7160 7158 */
7161 7159 if (!page_tryupgrade(pp)) {
7162 7160 /*
7163 7161 * If the page has an i/o lock and no mappings,
7164 7162 * it's very likely that the page is being
7165 7163 * written out as a result of klustering.
7166 7164 * Assume this is so and take credit for it here.
7167 7165 */
7168 7166 if (!page_io_trylock(pp)) {
7169 7167 if (!hat_page_is_mapped(pp))
7170 7168 pgcnt++;
7171 7169 } else {
7172 7170 page_io_unlock(pp);
7173 7171 }
7174 7172 page_unlock(pp);
7175 7173 continue;
7176 7174 }
7177 7175 ASSERT(!page_iolock_assert(pp));
7178 7176
7179 7177
7180 7178 /*
7181 7179 * Skip if page is locked or has mappings.
7182 7180 * We don't need the page_struct_lock to look at lckcnt
7183 7181 * and cowcnt because the page is exclusive locked.
7184 7182 */
7185 7183 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7186 7184 hat_page_is_mapped(pp)) {
7187 7185 page_unlock(pp);
7188 7186 continue;
7189 7187 }
7190 7188
7191 7189 /*
7192 7190 * dispose skips large pages so try to demote first.
7193 7191 */
7194 7192 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7195 7193 page_unlock(pp);
7196 7194 /*
7197 7195 * XXX should skip the remaining page_t's of this
7198 7196 * large page.
7199 7197 */
7200 7198 continue;
7201 7199 }
7202 7200
7203 7201 ASSERT(pp->p_szc == 0);
7204 7202
7205 7203 /*
7206 7204 * No longer mapped -- we can toss it out. How
7207 7205 * we do so depends on whether or not it's dirty.
7208 7206 */
7209 7207 if (hat_ismod(pp) && pp->p_vnode) {
7210 7208 /*
7211 7209 * We must clean the page before it can be
7212 7210 * freed. Setting B_FREE will cause pvn_done
7213 7211 * to free the page when the i/o completes.
7214 7212 * XXX: This also causes it to be accounted
7215 7213 * as a pageout instead of a swap: need
7216 7214 * B_SWAPOUT bit to use instead of B_FREE.
7217 7215 *
7218 7216 * Hold the vnode before releasing the page lock
7219 7217 * to prevent it from being freed and re-used by
7220 7218 * some other thread.
7221 7219 */
7222 7220 VN_HOLD(vp);
7223 7221 page_unlock(pp);
7224 7222
7225 7223 /*
7226 7224 * Queue all i/o requests for the pageout thread
7227 7225 * to avoid saturating the pageout devices.
7228 7226 */
7229 7227 if (!queue_io_request(vp, off))
7230 7228 VN_RELE(vp);
7231 7229 } else {
7232 7230 /*
7233 7231 * The page was clean, free it.
7234 7232 *
7235 7233 * XXX: Can we ever encounter modified pages
7236 7234 * with no associated vnode here?
7237 7235 */
7238 7236 ASSERT(pp->p_vnode != NULL);
7239 7237 /*LINTED: constant in conditional context*/
7240 7238 VN_DISPOSE(pp, B_FREE, 0, kcred);
7241 7239 }
7242 7240
7243 7241 /*
7244 7242 * Credit now even if i/o is in progress.
7245 7243 */
7246 7244 pgcnt++;
7247 7245 }
7248 7246 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7249 7247
7250 7248 /*
7251 7249 * Wakeup pageout to initiate i/o on all queued requests.
7252 7250 */
7253 7251 cv_signal_pageout();
7254 7252 return (ptob(pgcnt));
7255 7253 }
7256 7254
7257 7255 /*
7258 7256 * Synchronize primary storage cache with real object in virtual memory.
7259 7257 *
7260 7258 * XXX - Anonymous pages should not be sync'ed out at all.
7261 7259 */
7262 7260 static int
7263 7261 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7264 7262 {
7265 7263 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7266 7264 struct vpage *vpp;
7267 7265 page_t *pp;
7268 7266 u_offset_t offset;
7269 7267 struct vnode *vp;
7270 7268 u_offset_t off;
7271 7269 caddr_t eaddr;
7272 7270 int bflags;
7273 7271 int err = 0;
7274 7272 int segtype;
7275 7273 int pageprot;
7276 7274 int prot;
7277 7275 ulong_t anon_index;
7278 7276 struct anon_map *amp;
7279 7277 struct anon *ap;
7280 7278 anon_sync_obj_t cookie;
7281 7279
7282 7280 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7283 7281
7284 7282 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7285 7283
7286 7284 if (svd->softlockcnt > 0) {
7287 7285 /*
7288 7286 * If this is shared segment non 0 softlockcnt
7289 7287 * means locked pages are still in use.
7290 7288 */
7291 7289 if (svd->type == MAP_SHARED) {
7292 7290 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7293 7291 return (EAGAIN);
7294 7292 }
7295 7293
7296 7294 /*
7297 7295 * flush all pages from seg cache
7298 7296 * otherwise we may deadlock in swap_putpage
7299 7297 * for B_INVAL page (4175402).
7300 7298 *
7301 7299 * Even if we grab segvn WRITER's lock
7302 7300 * here, there might be another thread which could've
7303 7301 * successfully performed lookup/insert just before
7304 7302 * we acquired the lock here. So, grabbing either
7305 7303 * lock here is of not much use. Until we devise
7306 7304 * a strategy at upper layers to solve the
7307 7305 * synchronization issues completely, we expect
7308 7306 * applications to handle this appropriately.
7309 7307 */
7310 7308 segvn_purge(seg);
7311 7309 if (svd->softlockcnt > 0) {
7312 7310 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7313 7311 return (EAGAIN);
7314 7312 }
7315 7313 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7316 7314 svd->amp->a_softlockcnt > 0) {
7317 7315 /*
7318 7316 * Try to purge this amp's entries from pcache. It will
7319 7317 * succeed only if other segments that share the amp have no
7320 7318 * outstanding softlock's.
7321 7319 */
7322 7320 segvn_purge(seg);
7323 7321 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7324 7322 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7325 7323 return (EAGAIN);
7326 7324 }
7327 7325 }
7328 7326
7329 7327 vpp = svd->vpage;
7330 7328 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7331 7329 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7332 7330 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7333 7331
7334 7332 if (attr) {
7335 7333 pageprot = attr & ~(SHARED|PRIVATE);
7336 7334 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7337 7335
7338 7336 /*
7339 7337 * We are done if the segment types don't match
7340 7338 * or if we have segment level protections and
7341 7339 * they don't match.
7342 7340 */
7343 7341 if (svd->type != segtype) {
7344 7342 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7345 7343 return (0);
7346 7344 }
7347 7345 if (vpp == NULL) {
7348 7346 if (svd->prot != pageprot) {
7349 7347 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7350 7348 return (0);
7351 7349 }
7352 7350 prot = svd->prot;
7353 7351 } else
7354 7352 vpp = &svd->vpage[seg_page(seg, addr)];
7355 7353
7356 7354 } else if (svd->vp && svd->amp == NULL &&
7357 7355 (flags & MS_INVALIDATE) == 0) {
7358 7356
7359 7357 /*
7360 7358 * No attributes, no anonymous pages and MS_INVALIDATE flag
7361 7359 * is not on, just use one big request.
7362 7360 */
7363 7361 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7364 7362 bflags, svd->cred, NULL);
7365 7363 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7366 7364 return (err);
7367 7365 }
7368 7366
7369 7367 if ((amp = svd->amp) != NULL)
7370 7368 anon_index = svd->anon_index + seg_page(seg, addr);
7371 7369
7372 7370 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7373 7371 ap = NULL;
7374 7372 if (amp != NULL) {
7375 7373 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7376 7374 anon_array_enter(amp, anon_index, &cookie);
7377 7375 ap = anon_get_ptr(amp->ahp, anon_index++);
7378 7376 if (ap != NULL) {
7379 7377 swap_xlate(ap, &vp, &off);
7380 7378 } else {
7381 7379 vp = svd->vp;
7382 7380 off = offset;
7383 7381 }
7384 7382 anon_array_exit(&cookie);
7385 7383 ANON_LOCK_EXIT(&->a_rwlock);
7386 7384 } else {
7387 7385 vp = svd->vp;
7388 7386 off = offset;
7389 7387 }
7390 7388 offset += PAGESIZE;
7391 7389
7392 7390 if (vp == NULL) /* untouched zfod page */
7393 7391 continue;
7394 7392
7395 7393 if (attr) {
7396 7394 if (vpp) {
7397 7395 prot = VPP_PROT(vpp);
7398 7396 vpp++;
7399 7397 }
7400 7398 if (prot != pageprot) {
7401 7399 continue;
7402 7400 }
7403 7401 }
7404 7402
7405 7403 /*
7406 7404 * See if any of these pages are locked -- if so, then we
7407 7405 * will have to truncate an invalidate request at the first
7408 7406 * locked one. We don't need the page_struct_lock to test
7409 7407 * as this is only advisory; even if we acquire it someone
7410 7408 * might race in and lock the page after we unlock and before
7411 7409 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7412 7410 */
7413 7411 if (flags & MS_INVALIDATE) {
7414 7412 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7415 7413 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7416 7414 page_unlock(pp);
7417 7415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7418 7416 return (EBUSY);
7419 7417 }
7420 7418 if (ap != NULL && pp->p_szc != 0 &&
7421 7419 page_tryupgrade(pp)) {
7422 7420 if (pp->p_lckcnt == 0 &&
7423 7421 pp->p_cowcnt == 0) {
7424 7422 /*
7425 7423 * swapfs VN_DISPOSE() won't
7426 7424 * invalidate large pages.
7427 7425 * Attempt to demote.
7428 7426 * XXX can't help it if it
7429 7427 * fails. But for swapfs
7430 7428 * pages it is no big deal.
7431 7429 */
7432 7430 (void) page_try_demote_pages(
7433 7431 pp);
7434 7432 }
7435 7433 }
7436 7434 page_unlock(pp);
7437 7435 }
7438 7436 } else if (svd->type == MAP_SHARED && amp != NULL) {
7439 7437 /*
7440 7438 * Avoid writing out to disk ISM's large pages
7441 7439 * because segspt_free_pages() relies on NULL an_pvp
7442 7440 * of anon slots of such pages.
7443 7441 */
7444 7442
7445 7443 ASSERT(svd->vp == NULL);
7446 7444 /*
7447 7445 * swapfs uses page_lookup_nowait if not freeing or
7448 7446 * invalidating and skips a page if
7449 7447 * page_lookup_nowait returns NULL.
7450 7448 */
7451 7449 pp = page_lookup_nowait(vp, off, SE_SHARED);
7452 7450 if (pp == NULL) {
7453 7451 continue;
7454 7452 }
7455 7453 if (pp->p_szc != 0) {
7456 7454 page_unlock(pp);
7457 7455 continue;
7458 7456 }
7459 7457
7460 7458 /*
7461 7459 * Note ISM pages are created large so (vp, off)'s
7462 7460 * page cannot suddenly become large after we unlock
7463 7461 * pp.
7464 7462 */
7465 7463 page_unlock(pp);
7466 7464 }
7467 7465 /*
7468 7466 * XXX - Should ultimately try to kluster
7469 7467 * calls to VOP_PUTPAGE() for performance.
7470 7468 */
7471 7469 VN_HOLD(vp);
7472 7470 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7473 7471 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7474 7472 svd->cred, NULL);
7475 7473
7476 7474 VN_RELE(vp);
7477 7475 if (err)
7478 7476 break;
7479 7477 }
7480 7478 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7481 7479 return (err);
7482 7480 }
7483 7481
7484 7482 /*
7485 7483 * Determine if we have data corresponding to pages in the
7486 7484 * primary storage virtual memory cache (i.e., "in core").
7487 7485 */
7488 7486 static size_t
7489 7487 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7490 7488 {
7491 7489 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7492 7490 struct vnode *vp, *avp;
7493 7491 u_offset_t offset, aoffset;
7494 7492 size_t p, ep;
7495 7493 int ret;
7496 7494 struct vpage *vpp;
7497 7495 page_t *pp;
7498 7496 uint_t start;
7499 7497 struct anon_map *amp; /* XXX - for locknest */
7500 7498 struct anon *ap;
7501 7499 uint_t attr;
7502 7500 anon_sync_obj_t cookie;
7503 7501
7504 7502 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7505 7503
7506 7504 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7507 7505 if (svd->amp == NULL && svd->vp == NULL) {
7508 7506 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7509 7507 bzero(vec, btopr(len));
7510 7508 return (len); /* no anonymous pages created yet */
7511 7509 }
7512 7510
7513 7511 p = seg_page(seg, addr);
7514 7512 ep = seg_page(seg, addr + len);
7515 7513 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7516 7514
7517 7515 amp = svd->amp;
7518 7516 for (; p < ep; p++, addr += PAGESIZE) {
7519 7517 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7520 7518 ret = start;
7521 7519 ap = NULL;
7522 7520 avp = NULL;
7523 7521 /* Grab the vnode/offset for the anon slot */
7524 7522 if (amp != NULL) {
7525 7523 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7526 7524 anon_array_enter(amp, svd->anon_index + p, &cookie);
7527 7525 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7528 7526 if (ap != NULL) {
7529 7527 swap_xlate(ap, &avp, &aoffset);
7530 7528 }
7531 7529 anon_array_exit(&cookie);
7532 7530 ANON_LOCK_EXIT(&->a_rwlock);
7533 7531 }
7534 7532 if ((avp != NULL) && page_exists(avp, aoffset)) {
7535 7533 /* A page exists for the anon slot */
7536 7534 ret |= SEG_PAGE_INCORE;
7537 7535
7538 7536 /*
7539 7537 * If page is mapped and writable
7540 7538 */
7541 7539 attr = (uint_t)0;
7542 7540 if ((hat_getattr(seg->s_as->a_hat, addr,
7543 7541 &attr) != -1) && (attr & PROT_WRITE)) {
7544 7542 ret |= SEG_PAGE_ANON;
7545 7543 }
7546 7544 /*
7547 7545 * Don't get page_struct lock for lckcnt and cowcnt,
7548 7546 * since this is purely advisory.
7549 7547 */
7550 7548 if ((pp = page_lookup_nowait(avp, aoffset,
7551 7549 SE_SHARED)) != NULL) {
7552 7550 if (pp->p_lckcnt)
7553 7551 ret |= SEG_PAGE_SOFTLOCK;
7554 7552 if (pp->p_cowcnt)
7555 7553 ret |= SEG_PAGE_HASCOW;
7556 7554 page_unlock(pp);
7557 7555 }
7558 7556 }
7559 7557
7560 7558 /* Gather vnode statistics */
7561 7559 vp = svd->vp;
7562 7560 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7563 7561
7564 7562 if (vp != NULL) {
7565 7563 /*
7566 7564 * Try to obtain a "shared" lock on the page
7567 7565 * without blocking. If this fails, determine
7568 7566 * if the page is in memory.
7569 7567 */
7570 7568 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7571 7569 if ((pp == NULL) && (page_exists(vp, offset))) {
7572 7570 /* Page is incore, and is named */
7573 7571 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7574 7572 }
7575 7573 /*
7576 7574 * Don't get page_struct lock for lckcnt and cowcnt,
7577 7575 * since this is purely advisory.
7578 7576 */
7579 7577 if (pp != NULL) {
7580 7578 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7581 7579 if (pp->p_lckcnt)
7582 7580 ret |= SEG_PAGE_SOFTLOCK;
7583 7581 if (pp->p_cowcnt)
7584 7582 ret |= SEG_PAGE_HASCOW;
7585 7583 page_unlock(pp);
7586 7584 }
7587 7585 }
7588 7586
7589 7587 /* Gather virtual page information */
7590 7588 if (vpp) {
7591 7589 if (VPP_ISPPLOCK(vpp))
7592 7590 ret |= SEG_PAGE_LOCKED;
7593 7591 vpp++;
7594 7592 }
7595 7593
7596 7594 *vec++ = (char)ret;
7597 7595 }
7598 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7599 7597 return (len);
7600 7598 }
7601 7599
7602 7600 /*
7603 7601 * Statement for p_cowcnts/p_lckcnts.
7604 7602 *
7605 7603 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7606 7604 * irrespective of the following factors or anything else:
7607 7605 *
7608 7606 * (1) anon slots are populated or not
7609 7607 * (2) cow is broken or not
7610 7608 * (3) refcnt on ap is 1 or greater than 1
7611 7609 *
7612 7610 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7613 7611 * and munlock.
7614 7612 *
7615 7613 *
7616 7614 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7617 7615 *
7618 7616 * if vpage has PROT_WRITE
7619 7617 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7620 7618 * else
7621 7619 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7622 7620 *
7623 7621 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7624 7622 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7625 7623 *
7626 7624 * We may also break COW if softlocking on read access in the physio case.
7627 7625 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7628 7626 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7629 7627 * vpage doesn't have PROT_WRITE.
7630 7628 *
7631 7629 *
7632 7630 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7633 7631 *
7634 7632 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7635 7633 * increment p_lckcnt by calling page_subclaim() which takes care of
7636 7634 * availrmem accounting and p_lckcnt overflow.
7637 7635 *
7638 7636 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7639 7637 * increment p_cowcnt by calling page_addclaim() which takes care of
7640 7638 * availrmem availability and p_cowcnt overflow.
7641 7639 */
7642 7640
7643 7641 /*
7644 7642 * Lock down (or unlock) pages mapped by this segment.
7645 7643 *
7646 7644 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7647 7645 * At fault time they will be relocated into larger pages.
7648 7646 */
7649 7647 static int
7650 7648 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7651 7649 int attr, int op, ulong_t *lockmap, size_t pos)
7652 7650 {
7653 7651 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7654 7652 struct vpage *vpp;
7655 7653 struct vpage *evp;
7656 7654 page_t *pp;
7657 7655 u_offset_t offset;
7658 7656 u_offset_t off;
7659 7657 int segtype;
7660 7658 int pageprot;
7661 7659 int claim;
7662 7660 struct vnode *vp;
7663 7661 ulong_t anon_index;
7664 7662 struct anon_map *amp;
7665 7663 struct anon *ap;
7666 7664 struct vattr va;
7667 7665 anon_sync_obj_t cookie;
7668 7666 struct kshmid *sp = NULL;
7669 7667 struct proc *p = curproc;
7670 7668 kproject_t *proj = NULL;
7671 7669 int chargeproc = 1;
7672 7670 size_t locked_bytes = 0;
7673 7671 size_t unlocked_bytes = 0;
7674 7672 int err = 0;
7675 7673
7676 7674 /*
7677 7675 * Hold write lock on address space because may split or concatenate
7678 7676 * segments
7679 7677 */
7680 7678 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7681 7679
7682 7680 /*
7683 7681 * If this is a shm, use shm's project and zone, else use
7684 7682 * project and zone of calling process
7685 7683 */
7686 7684
7687 7685 /* Determine if this segment backs a sysV shm */
7688 7686 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7689 7687 ASSERT(svd->type == MAP_SHARED);
7690 7688 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7691 7689 sp = svd->amp->a_sp;
7692 7690 proj = sp->shm_perm.ipc_proj;
7693 7691 chargeproc = 0;
7694 7692 }
7695 7693
7696 7694 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7697 7695 if (attr) {
7698 7696 pageprot = attr & ~(SHARED|PRIVATE);
7699 7697 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7700 7698
7701 7699 /*
7702 7700 * We are done if the segment types don't match
7703 7701 * or if we have segment level protections and
7704 7702 * they don't match.
7705 7703 */
7706 7704 if (svd->type != segtype) {
7707 7705 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7708 7706 return (0);
7709 7707 }
7710 7708 if (svd->pageprot == 0 && svd->prot != pageprot) {
7711 7709 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7712 7710 return (0);
7713 7711 }
7714 7712 }
7715 7713
7716 7714 if (op == MC_LOCK) {
7717 7715 if (svd->tr_state == SEGVN_TR_INIT) {
7718 7716 svd->tr_state = SEGVN_TR_OFF;
7719 7717 } else if (svd->tr_state == SEGVN_TR_ON) {
7720 7718 ASSERT(svd->amp != NULL);
7721 7719 segvn_textunrepl(seg, 0);
7722 7720 ASSERT(svd->amp == NULL &&
7723 7721 svd->tr_state == SEGVN_TR_OFF);
7724 7722 }
7725 7723 }
7726 7724
7727 7725 /*
7728 7726 * If we're locking, then we must create a vpage structure if
7729 7727 * none exists. If we're unlocking, then check to see if there
7730 7728 * is a vpage -- if not, then we could not have locked anything.
7731 7729 */
7732 7730
7733 7731 if ((vpp = svd->vpage) == NULL) {
7734 7732 if (op == MC_LOCK) {
7735 7733 segvn_vpage(seg);
7736 7734 if (svd->vpage == NULL) {
7737 7735 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7738 7736 return (ENOMEM);
7739 7737 }
7740 7738 } else {
7741 7739 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7742 7740 return (0);
7743 7741 }
7744 7742 }
7745 7743
7746 7744 /*
7747 7745 * The anonymous data vector (i.e., previously
7748 7746 * unreferenced mapping to swap space) can be allocated
7749 7747 * by lazily testing for its existence.
7750 7748 */
7751 7749 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7752 7750 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7753 7751 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7754 7752 svd->amp->a_szc = seg->s_szc;
7755 7753 }
7756 7754
7757 7755 if ((amp = svd->amp) != NULL) {
7758 7756 anon_index = svd->anon_index + seg_page(seg, addr);
7759 7757 }
7760 7758
7761 7759 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7762 7760 evp = &svd->vpage[seg_page(seg, addr + len)];
7763 7761
7764 7762 if (sp != NULL)
7765 7763 mutex_enter(&sp->shm_mlock);
7766 7764
7767 7765 /* determine number of unlocked bytes in range for lock operation */
7768 7766 if (op == MC_LOCK) {
7769 7767
7770 7768 if (sp == NULL) {
7771 7769 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7772 7770 vpp++) {
7773 7771 if (!VPP_ISPPLOCK(vpp))
7774 7772 unlocked_bytes += PAGESIZE;
7775 7773 }
7776 7774 } else {
7777 7775 ulong_t i_idx, i_edx;
7778 7776 anon_sync_obj_t i_cookie;
7779 7777 struct anon *i_ap;
7780 7778 struct vnode *i_vp;
7781 7779 u_offset_t i_off;
7782 7780
7783 7781 /* Only count sysV pages once for locked memory */
7784 7782 i_edx = svd->anon_index + seg_page(seg, addr + len);
7785 7783 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7786 7784 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7787 7785 anon_array_enter(amp, i_idx, &i_cookie);
7788 7786 i_ap = anon_get_ptr(amp->ahp, i_idx);
7789 7787 if (i_ap == NULL) {
7790 7788 unlocked_bytes += PAGESIZE;
7791 7789 anon_array_exit(&i_cookie);
7792 7790 continue;
7793 7791 }
7794 7792 swap_xlate(i_ap, &i_vp, &i_off);
7795 7793 anon_array_exit(&i_cookie);
7796 7794 pp = page_lookup(i_vp, i_off, SE_SHARED);
7797 7795 if (pp == NULL) {
7798 7796 unlocked_bytes += PAGESIZE;
7799 7797 continue;
7800 7798 } else if (pp->p_lckcnt == 0)
7801 7799 unlocked_bytes += PAGESIZE;
7802 7800 page_unlock(pp);
7803 7801 }
7804 7802 ANON_LOCK_EXIT(&->a_rwlock);
7805 7803 }
7806 7804
7807 7805 mutex_enter(&p->p_lock);
7808 7806 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7809 7807 chargeproc);
7810 7808 mutex_exit(&p->p_lock);
7811 7809
7812 7810 if (err) {
7813 7811 if (sp != NULL)
7814 7812 mutex_exit(&sp->shm_mlock);
7815 7813 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7816 7814 return (err);
7817 7815 }
7818 7816 }
7819 7817 /*
7820 7818 * Loop over all pages in the range. Process if we're locking and
7821 7819 * page has not already been locked in this mapping; or if we're
7822 7820 * unlocking and the page has been locked.
7823 7821 */
7824 7822 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7825 7823 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7826 7824 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7827 7825 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7828 7826 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7829 7827
7830 7828 if (amp != NULL)
7831 7829 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7832 7830 /*
7833 7831 * If this isn't a MAP_NORESERVE segment and
7834 7832 * we're locking, allocate anon slots if they
7835 7833 * don't exist. The page is brought in later on.
7836 7834 */
7837 7835 if (op == MC_LOCK && svd->vp == NULL &&
7838 7836 ((svd->flags & MAP_NORESERVE) == 0) &&
7839 7837 amp != NULL &&
7840 7838 ((ap = anon_get_ptr(amp->ahp, anon_index))
7841 7839 == NULL)) {
7842 7840 anon_array_enter(amp, anon_index, &cookie);
7843 7841
7844 7842 if ((ap = anon_get_ptr(amp->ahp,
7845 7843 anon_index)) == NULL) {
7846 7844 pp = anon_zero(seg, addr, &ap,
7847 7845 svd->cred);
7848 7846 if (pp == NULL) {
7849 7847 anon_array_exit(&cookie);
7850 7848 ANON_LOCK_EXIT(&->a_rwlock);
7851 7849 err = ENOMEM;
7852 7850 goto out;
7853 7851 }
7854 7852 ASSERT(anon_get_ptr(amp->ahp,
7855 7853 anon_index) == NULL);
7856 7854 (void) anon_set_ptr(amp->ahp,
7857 7855 anon_index, ap, ANON_SLEEP);
7858 7856 page_unlock(pp);
7859 7857 }
7860 7858 anon_array_exit(&cookie);
7861 7859 }
7862 7860
7863 7861 /*
7864 7862 * Get name for page, accounting for
7865 7863 * existence of private copy.
7866 7864 */
7867 7865 ap = NULL;
7868 7866 if (amp != NULL) {
7869 7867 anon_array_enter(amp, anon_index, &cookie);
7870 7868 ap = anon_get_ptr(amp->ahp, anon_index);
7871 7869 if (ap != NULL) {
7872 7870 swap_xlate(ap, &vp, &off);
7873 7871 } else {
7874 7872 if (svd->vp == NULL &&
7875 7873 (svd->flags & MAP_NORESERVE)) {
7876 7874 anon_array_exit(&cookie);
7877 7875 ANON_LOCK_EXIT(&->a_rwlock);
7878 7876 continue;
7879 7877 }
7880 7878 vp = svd->vp;
7881 7879 off = offset;
7882 7880 }
7883 7881 if (op != MC_LOCK || ap == NULL) {
7884 7882 anon_array_exit(&cookie);
7885 7883 ANON_LOCK_EXIT(&->a_rwlock);
7886 7884 }
7887 7885 } else {
7888 7886 vp = svd->vp;
7889 7887 off = offset;
7890 7888 }
7891 7889
7892 7890 /*
7893 7891 * Get page frame. It's ok if the page is
7894 7892 * not available when we're unlocking, as this
7895 7893 * may simply mean that a page we locked got
7896 7894 * truncated out of existence after we locked it.
7897 7895 *
7898 7896 * Invoke VOP_GETPAGE() to obtain the page struct
7899 7897 * since we may need to read it from disk if its
7900 7898 * been paged out.
7901 7899 */
7902 7900 if (op != MC_LOCK)
7903 7901 pp = page_lookup(vp, off, SE_SHARED);
7904 7902 else {
7905 7903 page_t *pl[1 + 1];
7906 7904 int error;
7907 7905
7908 7906 ASSERT(vp != NULL);
7909 7907
7910 7908 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7911 7909 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7912 7910 S_OTHER, svd->cred, NULL);
7913 7911
7914 7912 if (error && ap != NULL) {
7915 7913 anon_array_exit(&cookie);
7916 7914 ANON_LOCK_EXIT(&->a_rwlock);
7917 7915 }
7918 7916
7919 7917 /*
7920 7918 * If the error is EDEADLK then we must bounce
7921 7919 * up and drop all vm subsystem locks and then
7922 7920 * retry the operation later
7923 7921 * This behavior is a temporary measure because
7924 7922 * ufs/sds logging is badly designed and will
7925 7923 * deadlock if we don't allow this bounce to
7926 7924 * happen. The real solution is to re-design
7927 7925 * the logging code to work properly. See bug
7928 7926 * 4125102 for details of the problem.
7929 7927 */
7930 7928 if (error == EDEADLK) {
7931 7929 err = error;
7932 7930 goto out;
7933 7931 }
7934 7932 /*
7935 7933 * Quit if we fail to fault in the page. Treat
7936 7934 * the failure as an error, unless the addr
7937 7935 * is mapped beyond the end of a file.
7938 7936 */
7939 7937 if (error && svd->vp) {
7940 7938 va.va_mask = AT_SIZE;
7941 7939 if (VOP_GETATTR(svd->vp, &va, 0,
7942 7940 svd->cred, NULL) != 0) {
7943 7941 err = EIO;
7944 7942 goto out;
7945 7943 }
7946 7944 if (btopr(va.va_size) >=
7947 7945 btopr(off + 1)) {
7948 7946 err = EIO;
7949 7947 goto out;
7950 7948 }
7951 7949 goto out;
7952 7950
7953 7951 } else if (error) {
7954 7952 err = EIO;
7955 7953 goto out;
7956 7954 }
7957 7955 pp = pl[0];
7958 7956 ASSERT(pp != NULL);
7959 7957 }
7960 7958
7961 7959 /*
7962 7960 * See Statement at the beginning of this routine.
7963 7961 *
7964 7962 * claim is always set if MAP_PRIVATE and PROT_WRITE
7965 7963 * irrespective of following factors:
7966 7964 *
7967 7965 * (1) anon slots are populated or not
7968 7966 * (2) cow is broken or not
7969 7967 * (3) refcnt on ap is 1 or greater than 1
7970 7968 *
7971 7969 * See 4140683 for details
7972 7970 */
7973 7971 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7974 7972 (svd->type == MAP_PRIVATE));
7975 7973
7976 7974 /*
7977 7975 * Perform page-level operation appropriate to
7978 7976 * operation. If locking, undo the SOFTLOCK
7979 7977 * performed to bring the page into memory
7980 7978 * after setting the lock. If unlocking,
7981 7979 * and no page was found, account for the claim
7982 7980 * separately.
7983 7981 */
7984 7982 if (op == MC_LOCK) {
7985 7983 int ret = 1; /* Assume success */
7986 7984
7987 7985 ASSERT(!VPP_ISPPLOCK(vpp));
7988 7986
7989 7987 ret = page_pp_lock(pp, claim, 0);
7990 7988 if (ap != NULL) {
7991 7989 if (ap->an_pvp != NULL) {
7992 7990 anon_swap_free(ap, pp);
7993 7991 }
7994 7992 anon_array_exit(&cookie);
7995 7993 ANON_LOCK_EXIT(&->a_rwlock);
7996 7994 }
7997 7995 if (ret == 0) {
7998 7996 /* locking page failed */
7999 7997 page_unlock(pp);
8000 7998 err = EAGAIN;
8001 7999 goto out;
8002 8000 }
8003 8001 VPP_SETPPLOCK(vpp);
8004 8002 if (sp != NULL) {
8005 8003 if (pp->p_lckcnt == 1)
8006 8004 locked_bytes += PAGESIZE;
8007 8005 } else
8008 8006 locked_bytes += PAGESIZE;
8009 8007
8010 8008 if (lockmap != (ulong_t *)NULL)
8011 8009 BT_SET(lockmap, pos);
8012 8010
8013 8011 page_unlock(pp);
8014 8012 } else {
8015 8013 ASSERT(VPP_ISPPLOCK(vpp));
8016 8014 if (pp != NULL) {
8017 8015 /* sysV pages should be locked */
8018 8016 ASSERT(sp == NULL || pp->p_lckcnt > 0);
8019 8017 page_pp_unlock(pp, claim, 0);
8020 8018 if (sp != NULL) {
8021 8019 if (pp->p_lckcnt == 0)
8022 8020 unlocked_bytes
8023 8021 += PAGESIZE;
8024 8022 } else
8025 8023 unlocked_bytes += PAGESIZE;
8026 8024 page_unlock(pp);
8027 8025 } else {
8028 8026 ASSERT(sp == NULL);
8029 8027 unlocked_bytes += PAGESIZE;
8030 8028 }
8031 8029 VPP_CLRPPLOCK(vpp);
8032 8030 }
8033 8031 }
8034 8032 }
8035 8033 out:
8036 8034 if (op == MC_LOCK) {
8037 8035 /* Credit back bytes that did not get locked */
8038 8036 if ((unlocked_bytes - locked_bytes) > 0) {
8039 8037 if (proj == NULL)
8040 8038 mutex_enter(&p->p_lock);
8041 8039 rctl_decr_locked_mem(p, proj,
8042 8040 (unlocked_bytes - locked_bytes), chargeproc);
8043 8041 if (proj == NULL)
8044 8042 mutex_exit(&p->p_lock);
8045 8043 }
8046 8044
8047 8045 } else {
8048 8046 /* Account bytes that were unlocked */
8049 8047 if (unlocked_bytes > 0) {
8050 8048 if (proj == NULL)
8051 8049 mutex_enter(&p->p_lock);
8052 8050 rctl_decr_locked_mem(p, proj, unlocked_bytes,
8053 8051 chargeproc);
8054 8052 if (proj == NULL)
8055 8053 mutex_exit(&p->p_lock);
8056 8054 }
8057 8055 }
8058 8056 if (sp != NULL)
8059 8057 mutex_exit(&sp->shm_mlock);
8060 8058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8061 8059
8062 8060 return (err);
8063 8061 }
8064 8062
8065 8063 /*
8066 8064 * Set advice from user for specified pages
8067 8065 * There are 9 types of advice:
8068 8066 * MADV_NORMAL - Normal (default) behavior (whatever that is)
8069 8067 * MADV_RANDOM - Random page references
8070 8068 * do not allow readahead or 'klustering'
8071 8069 * MADV_SEQUENTIAL - Sequential page references
8072 8070 * Pages previous to the one currently being
8073 8071 * accessed (determined by fault) are 'not needed'
8074 8072 * and are freed immediately
8075 8073 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
8076 8074 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
8077 8075 * MADV_FREE - Contents can be discarded
8078 8076 * MADV_ACCESS_DEFAULT- Default access
8079 8077 * MADV_ACCESS_LWP - Next LWP will access heavily
8080 8078 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8081 8079 */
8082 8080 static int
8083 8081 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8084 8082 {
8085 8083 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8086 8084 size_t page;
8087 8085 int err = 0;
8088 8086 int already_set;
8089 8087 struct anon_map *amp;
8090 8088 ulong_t anon_index;
8091 8089 struct seg *next;
8092 8090 lgrp_mem_policy_t policy;
8093 8091 struct seg *prev;
8094 8092 struct vnode *vp;
8095 8093
8096 8094 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8097 8095
8098 8096 /*
8099 8097 * In case of MADV_FREE, we won't be modifying any segment private
8100 8098 * data structures; so, we only need to grab READER's lock
8101 8099 */
8102 8100 if (behav != MADV_FREE) {
8103 8101 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8104 8102 if (svd->tr_state != SEGVN_TR_OFF) {
8105 8103 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8106 8104 return (0);
8107 8105 }
8108 8106 } else {
8109 8107 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8110 8108 }
8111 8109
8112 8110 /*
8113 8111 * Large pages are assumed to be only turned on when accesses to the
8114 8112 * segment's address range have spatial and temporal locality. That
8115 8113 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8116 8114 * Also, ignore advice affecting lgroup memory allocation
8117 8115 * if don't need to do lgroup optimizations on this system
8118 8116 */
8119 8117
8120 8118 if ((behav == MADV_SEQUENTIAL &&
8121 8119 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8122 8120 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8123 8121 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8124 8122 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8125 8123 return (0);
8126 8124 }
8127 8125
8128 8126 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8129 8127 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8130 8128 /*
8131 8129 * Since we are going to unload hat mappings
8132 8130 * we first have to flush the cache. Otherwise
8133 8131 * this might lead to system panic if another
8134 8132 * thread is doing physio on the range whose
8135 8133 * mappings are unloaded by madvise(3C).
8136 8134 */
8137 8135 if (svd->softlockcnt > 0) {
8138 8136 /*
8139 8137 * If this is shared segment non 0 softlockcnt
8140 8138 * means locked pages are still in use.
8141 8139 */
8142 8140 if (svd->type == MAP_SHARED) {
8143 8141 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8144 8142 return (EAGAIN);
8145 8143 }
8146 8144 /*
8147 8145 * Since we do have the segvn writers lock
8148 8146 * nobody can fill the cache with entries
8149 8147 * belonging to this seg during the purge.
8150 8148 * The flush either succeeds or we still
8151 8149 * have pending I/Os. In the later case,
8152 8150 * madvise(3C) fails.
8153 8151 */
8154 8152 segvn_purge(seg);
8155 8153 if (svd->softlockcnt > 0) {
8156 8154 /*
8157 8155 * Since madvise(3C) is advisory and
8158 8156 * it's not part of UNIX98, madvise(3C)
8159 8157 * failure here doesn't cause any hardship.
8160 8158 * Note that we don't block in "as" layer.
8161 8159 */
8162 8160 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8163 8161 return (EAGAIN);
8164 8162 }
8165 8163 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8166 8164 svd->amp->a_softlockcnt > 0) {
8167 8165 /*
8168 8166 * Try to purge this amp's entries from pcache. It
8169 8167 * will succeed only if other segments that share the
8170 8168 * amp have no outstanding softlock's.
8171 8169 */
8172 8170 segvn_purge(seg);
8173 8171 }
8174 8172 }
8175 8173
8176 8174 amp = svd->amp;
8177 8175 vp = svd->vp;
8178 8176 if (behav == MADV_FREE) {
8179 8177 /*
8180 8178 * MADV_FREE is not supported for segments with
8181 8179 * underlying object; if anonmap is NULL, anon slots
8182 8180 * are not yet populated and there is nothing for
8183 8181 * us to do. As MADV_FREE is advisory, we don't
8184 8182 * return error in either case.
8185 8183 */
8186 8184 if (vp != NULL || amp == NULL) {
8187 8185 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8188 8186 return (0);
8189 8187 }
8190 8188
8191 8189 segvn_purge(seg);
8192 8190
8193 8191 page = seg_page(seg, addr);
8194 8192 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8195 8193 anon_disclaim(amp, svd->anon_index + page, len);
8196 8194 ANON_LOCK_EXIT(&->a_rwlock);
8197 8195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8198 8196 return (0);
8199 8197 }
8200 8198
8201 8199 /*
8202 8200 * If advice is to be applied to entire segment,
8203 8201 * use advice field in seg_data structure
8204 8202 * otherwise use appropriate vpage entry.
8205 8203 */
8206 8204 if ((addr == seg->s_base) && (len == seg->s_size)) {
8207 8205 switch (behav) {
8208 8206 case MADV_ACCESS_LWP:
8209 8207 case MADV_ACCESS_MANY:
8210 8208 case MADV_ACCESS_DEFAULT:
8211 8209 /*
8212 8210 * Set memory allocation policy for this segment
8213 8211 */
8214 8212 policy = lgrp_madv_to_policy(behav, len, svd->type);
8215 8213 if (svd->type == MAP_SHARED)
8216 8214 already_set = lgrp_shm_policy_set(policy, amp,
8217 8215 svd->anon_index, vp, svd->offset, len);
8218 8216 else {
8219 8217 /*
8220 8218 * For private memory, need writers lock on
8221 8219 * address space because the segment may be
8222 8220 * split or concatenated when changing policy
8223 8221 */
8224 8222 if (AS_READ_HELD(seg->s_as,
8225 8223 &seg->s_as->a_lock)) {
8226 8224 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8227 8225 return (IE_RETRY);
8228 8226 }
8229 8227
8230 8228 already_set = lgrp_privm_policy_set(policy,
8231 8229 &svd->policy_info, len);
8232 8230 }
8233 8231
8234 8232 /*
8235 8233 * If policy set already and it shouldn't be reapplied,
8236 8234 * don't do anything.
8237 8235 */
8238 8236 if (already_set &&
8239 8237 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8240 8238 break;
8241 8239
8242 8240 /*
8243 8241 * Mark any existing pages in given range for
8244 8242 * migration
8245 8243 */
8246 8244 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8247 8245 vp, svd->offset, 1);
8248 8246
8249 8247 /*
8250 8248 * If same policy set already or this is a shared
8251 8249 * memory segment, don't need to try to concatenate
8252 8250 * segment with adjacent ones.
8253 8251 */
8254 8252 if (already_set || svd->type == MAP_SHARED)
8255 8253 break;
8256 8254
8257 8255 /*
8258 8256 * Try to concatenate this segment with previous
8259 8257 * one and next one, since we changed policy for
8260 8258 * this one and it may be compatible with adjacent
8261 8259 * ones now.
8262 8260 */
8263 8261 prev = AS_SEGPREV(seg->s_as, seg);
8264 8262 next = AS_SEGNEXT(seg->s_as, seg);
8265 8263
8266 8264 if (next && next->s_ops == &segvn_ops &&
8267 8265 addr + len == next->s_base)
8268 8266 (void) segvn_concat(seg, next, 1);
8269 8267
8270 8268 if (prev && prev->s_ops == &segvn_ops &&
8271 8269 addr == prev->s_base + prev->s_size) {
8272 8270 /*
8273 8271 * Drop lock for private data of current
8274 8272 * segment before concatenating (deleting) it
8275 8273 * and return IE_REATTACH to tell as_ctl() that
8276 8274 * current segment has changed
8277 8275 */
8278 8276 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8279 8277 if (!segvn_concat(prev, seg, 1))
8280 8278 err = IE_REATTACH;
8281 8279
8282 8280 return (err);
8283 8281 }
8284 8282 break;
8285 8283
8286 8284 case MADV_SEQUENTIAL:
8287 8285 /*
8288 8286 * unloading mapping guarantees
8289 8287 * detection in segvn_fault
8290 8288 */
8291 8289 ASSERT(seg->s_szc == 0);
8292 8290 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8293 8291 hat_unload(seg->s_as->a_hat, addr, len,
8294 8292 HAT_UNLOAD);
8295 8293 /* FALLTHROUGH */
8296 8294 case MADV_NORMAL:
8297 8295 case MADV_RANDOM:
8298 8296 svd->advice = (uchar_t)behav;
8299 8297 svd->pageadvice = 0;
8300 8298 break;
8301 8299 case MADV_WILLNEED: /* handled in memcntl */
8302 8300 case MADV_DONTNEED: /* handled in memcntl */
8303 8301 case MADV_FREE: /* handled above */
8304 8302 break;
8305 8303 default:
8306 8304 err = EINVAL;
8307 8305 }
8308 8306 } else {
8309 8307 caddr_t eaddr;
8310 8308 struct seg *new_seg;
8311 8309 struct segvn_data *new_svd;
8312 8310 u_offset_t off;
8313 8311 caddr_t oldeaddr;
8314 8312
8315 8313 page = seg_page(seg, addr);
8316 8314
8317 8315 segvn_vpage(seg);
8318 8316 if (svd->vpage == NULL) {
8319 8317 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8320 8318 return (ENOMEM);
8321 8319 }
8322 8320
8323 8321 switch (behav) {
8324 8322 struct vpage *bvpp, *evpp;
8325 8323
8326 8324 case MADV_ACCESS_LWP:
8327 8325 case MADV_ACCESS_MANY:
8328 8326 case MADV_ACCESS_DEFAULT:
8329 8327 /*
8330 8328 * Set memory allocation policy for portion of this
8331 8329 * segment
8332 8330 */
8333 8331
8334 8332 /*
8335 8333 * Align address and length of advice to page
8336 8334 * boundaries for large pages
8337 8335 */
8338 8336 if (seg->s_szc != 0) {
8339 8337 size_t pgsz;
8340 8338
8341 8339 pgsz = page_get_pagesize(seg->s_szc);
8342 8340 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8343 8341 len = P2ROUNDUP(len, pgsz);
8344 8342 }
8345 8343
8346 8344 /*
8347 8345 * Check to see whether policy is set already
8348 8346 */
8349 8347 policy = lgrp_madv_to_policy(behav, len, svd->type);
8350 8348
8351 8349 anon_index = svd->anon_index + page;
8352 8350 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8353 8351
8354 8352 if (svd->type == MAP_SHARED)
8355 8353 already_set = lgrp_shm_policy_set(policy, amp,
8356 8354 anon_index, vp, off, len);
8357 8355 else
8358 8356 already_set =
8359 8357 (policy == svd->policy_info.mem_policy);
8360 8358
8361 8359 /*
8362 8360 * If policy set already and it shouldn't be reapplied,
8363 8361 * don't do anything.
8364 8362 */
8365 8363 if (already_set &&
8366 8364 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8367 8365 break;
8368 8366
8369 8367 /*
8370 8368 * For private memory, need writers lock on
8371 8369 * address space because the segment may be
8372 8370 * split or concatenated when changing policy
8373 8371 */
8374 8372 if (svd->type == MAP_PRIVATE &&
8375 8373 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8376 8374 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8377 8375 return (IE_RETRY);
8378 8376 }
8379 8377
8380 8378 /*
8381 8379 * Mark any existing pages in given range for
8382 8380 * migration
8383 8381 */
8384 8382 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8385 8383 vp, svd->offset, 1);
8386 8384
8387 8385 /*
8388 8386 * Don't need to try to split or concatenate
8389 8387 * segments, since policy is same or this is a shared
8390 8388 * memory segment
8391 8389 */
8392 8390 if (already_set || svd->type == MAP_SHARED)
8393 8391 break;
8394 8392
8395 8393 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8396 8394 ASSERT(svd->amp == NULL);
8397 8395 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8398 8396 ASSERT(svd->softlockcnt == 0);
8399 8397 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8400 8398 HAT_REGION_TEXT);
8401 8399 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8402 8400 }
8403 8401
8404 8402 /*
8405 8403 * Split off new segment if advice only applies to a
8406 8404 * portion of existing segment starting in middle
8407 8405 */
8408 8406 new_seg = NULL;
8409 8407 eaddr = addr + len;
8410 8408 oldeaddr = seg->s_base + seg->s_size;
8411 8409 if (addr > seg->s_base) {
8412 8410 /*
8413 8411 * Must flush I/O page cache
8414 8412 * before splitting segment
8415 8413 */
8416 8414 if (svd->softlockcnt > 0)
8417 8415 segvn_purge(seg);
8418 8416
8419 8417 /*
8420 8418 * Split segment and return IE_REATTACH to tell
8421 8419 * as_ctl() that current segment changed
8422 8420 */
8423 8421 new_seg = segvn_split_seg(seg, addr);
8424 8422 new_svd = (struct segvn_data *)new_seg->s_data;
8425 8423 err = IE_REATTACH;
8426 8424
8427 8425 /*
8428 8426 * If new segment ends where old one
8429 8427 * did, try to concatenate the new
8430 8428 * segment with next one.
8431 8429 */
8432 8430 if (eaddr == oldeaddr) {
8433 8431 /*
8434 8432 * Set policy for new segment
8435 8433 */
8436 8434 (void) lgrp_privm_policy_set(policy,
8437 8435 &new_svd->policy_info,
8438 8436 new_seg->s_size);
8439 8437
8440 8438 next = AS_SEGNEXT(new_seg->s_as,
8441 8439 new_seg);
8442 8440
8443 8441 if (next &&
8444 8442 next->s_ops == &segvn_ops &&
8445 8443 eaddr == next->s_base)
8446 8444 (void) segvn_concat(new_seg,
8447 8445 next, 1);
8448 8446 }
8449 8447 }
8450 8448
8451 8449 /*
8452 8450 * Split off end of existing segment if advice only
8453 8451 * applies to a portion of segment ending before
8454 8452 * end of the existing segment
8455 8453 */
8456 8454 if (eaddr < oldeaddr) {
8457 8455 /*
8458 8456 * Must flush I/O page cache
8459 8457 * before splitting segment
8460 8458 */
8461 8459 if (svd->softlockcnt > 0)
8462 8460 segvn_purge(seg);
8463 8461
8464 8462 /*
8465 8463 * If beginning of old segment was already
8466 8464 * split off, use new segment to split end off
8467 8465 * from.
8468 8466 */
8469 8467 if (new_seg != NULL && new_seg != seg) {
8470 8468 /*
8471 8469 * Split segment
8472 8470 */
8473 8471 (void) segvn_split_seg(new_seg, eaddr);
8474 8472
8475 8473 /*
8476 8474 * Set policy for new segment
8477 8475 */
8478 8476 (void) lgrp_privm_policy_set(policy,
8479 8477 &new_svd->policy_info,
8480 8478 new_seg->s_size);
8481 8479 } else {
8482 8480 /*
8483 8481 * Split segment and return IE_REATTACH
8484 8482 * to tell as_ctl() that current
8485 8483 * segment changed
8486 8484 */
8487 8485 (void) segvn_split_seg(seg, eaddr);
8488 8486 err = IE_REATTACH;
8489 8487
8490 8488 (void) lgrp_privm_policy_set(policy,
8491 8489 &svd->policy_info, seg->s_size);
8492 8490
8493 8491 /*
8494 8492 * If new segment starts where old one
8495 8493 * did, try to concatenate it with
8496 8494 * previous segment.
8497 8495 */
8498 8496 if (addr == seg->s_base) {
8499 8497 prev = AS_SEGPREV(seg->s_as,
8500 8498 seg);
8501 8499
8502 8500 /*
8503 8501 * Drop lock for private data
8504 8502 * of current segment before
8505 8503 * concatenating (deleting) it
8506 8504 */
8507 8505 if (prev &&
8508 8506 prev->s_ops ==
8509 8507 &segvn_ops &&
8510 8508 addr == prev->s_base +
8511 8509 prev->s_size) {
8512 8510 SEGVN_LOCK_EXIT(
8513 8511 seg->s_as,
8514 8512 &svd->lock);
8515 8513 (void) segvn_concat(
8516 8514 prev, seg, 1);
8517 8515 return (err);
8518 8516 }
8519 8517 }
8520 8518 }
8521 8519 }
8522 8520 break;
8523 8521 case MADV_SEQUENTIAL:
8524 8522 ASSERT(seg->s_szc == 0);
8525 8523 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8526 8524 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8527 8525 /* FALLTHROUGH */
8528 8526 case MADV_NORMAL:
8529 8527 case MADV_RANDOM:
8530 8528 bvpp = &svd->vpage[page];
8531 8529 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8532 8530 for (; bvpp < evpp; bvpp++)
8533 8531 VPP_SETADVICE(bvpp, behav);
8534 8532 svd->advice = MADV_NORMAL;
8535 8533 break;
8536 8534 case MADV_WILLNEED: /* handled in memcntl */
8537 8535 case MADV_DONTNEED: /* handled in memcntl */
8538 8536 case MADV_FREE: /* handled above */
8539 8537 break;
8540 8538 default:
8541 8539 err = EINVAL;
8542 8540 }
8543 8541 }
8544 8542 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8545 8543 return (err);
8546 8544 }
8547 8545
8548 8546 /*
8549 8547 * There is one kind of inheritance that can be specified for pages:
8550 8548 *
8551 8549 * SEGP_INH_ZERO - Pages should be zeroed in the child
8552 8550 */
8553 8551 static int
8554 8552 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8555 8553 {
8556 8554 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8557 8555 struct vpage *bvpp, *evpp;
8558 8556 size_t page;
8559 8557 int ret = 0;
8560 8558
8561 8559 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8562 8560
8563 8561 /* Can't support something we don't know about */
8564 8562 if (behav != SEGP_INH_ZERO)
8565 8563 return (ENOTSUP);
8566 8564
8567 8565 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8568 8566
8569 8567 /*
8570 8568 * This must be a straightforward anonymous segment that is mapped
8571 8569 * privately and is not backed by a vnode.
8572 8570 */
8573 8571 if (svd->tr_state != SEGVN_TR_OFF ||
8574 8572 svd->type != MAP_PRIVATE ||
8575 8573 svd->vp != NULL) {
8576 8574 ret = EINVAL;
8577 8575 goto out;
8578 8576 }
8579 8577
8580 8578 /*
8581 8579 * If the entire segment has been marked as inherit zero, then no reason
8582 8580 * to do anything else.
8583 8581 */
8584 8582 if (svd->svn_inz == SEGVN_INZ_ALL) {
8585 8583 ret = 0;
8586 8584 goto out;
8587 8585 }
8588 8586
8589 8587 /*
8590 8588 * If this applies to the entire segment, simply mark it and we're done.
8591 8589 */
8592 8590 if ((addr == seg->s_base) && (len == seg->s_size)) {
8593 8591 svd->svn_inz = SEGVN_INZ_ALL;
8594 8592 ret = 0;
8595 8593 goto out;
8596 8594 }
8597 8595
8598 8596 /*
8599 8597 * We've been asked to mark a subset of this segment as inherit zero,
8600 8598 * therefore we need to mainpulate its vpages.
8601 8599 */
8602 8600 if (svd->vpage == NULL) {
8603 8601 segvn_vpage(seg);
8604 8602 if (svd->vpage == NULL) {
8605 8603 ret = ENOMEM;
8606 8604 goto out;
8607 8605 }
8608 8606 }
8609 8607
8610 8608 svd->svn_inz = SEGVN_INZ_VPP;
8611 8609 page = seg_page(seg, addr);
8612 8610 bvpp = &svd->vpage[page];
8613 8611 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8614 8612 for (; bvpp < evpp; bvpp++)
8615 8613 VPP_SETINHZERO(bvpp);
8616 8614 ret = 0;
8617 8615
8618 8616 out:
8619 8617 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8620 8618 return (ret);
8621 8619 }
8622 8620
8623 8621 /*
8624 8622 * Create a vpage structure for this seg.
8625 8623 */
8626 8624 static void
8627 8625 segvn_vpage(struct seg *seg)
8628 8626 {
8629 8627 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8630 8628 struct vpage *vp, *evp;
8631 8629 static pgcnt_t page_limit = 0;
8632 8630
8633 8631 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8634 8632
8635 8633 /*
8636 8634 * If no vpage structure exists, allocate one. Copy the protections
8637 8635 * and the advice from the segment itself to the individual pages.
8638 8636 */
8639 8637 if (svd->vpage == NULL) {
8640 8638 /*
8641 8639 * Start by calculating the number of pages we must allocate to
8642 8640 * track the per-page vpage structs needs for this entire
8643 8641 * segment. If we know now that it will require more than our
8644 8642 * heuristic for the maximum amount of kmem we can consume then
8645 8643 * fail. We do this here, instead of trying to detect this deep
8646 8644 * in page_resv and propagating the error up, since the entire
8647 8645 * memory allocation stack is not amenable to passing this
8648 8646 * back. Instead, it wants to keep trying.
8649 8647 *
8650 8648 * As a heuristic we set a page limit of 5/8s of total_pages
8651 8649 * for this allocation. We use shifts so that no floating
8652 8650 * point conversion takes place and only need to do the
8653 8651 * calculation once.
8654 8652 */
8655 8653 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8656 8654 pgcnt_t npages = mem_needed >> PAGESHIFT;
8657 8655
8658 8656 if (page_limit == 0)
8659 8657 page_limit = (total_pages >> 1) + (total_pages >> 3);
8660 8658
8661 8659 if (npages > page_limit)
8662 8660 return;
8663 8661
8664 8662 svd->pageadvice = 1;
8665 8663 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8666 8664 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8667 8665 for (vp = svd->vpage; vp < evp; vp++) {
8668 8666 VPP_SETPROT(vp, svd->prot);
8669 8667 VPP_SETADVICE(vp, svd->advice);
8670 8668 }
8671 8669 }
8672 8670 }
8673 8671
8674 8672 /*
8675 8673 * Dump the pages belonging to this segvn segment.
8676 8674 */
8677 8675 static void
8678 8676 segvn_dump(struct seg *seg)
8679 8677 {
8680 8678 struct segvn_data *svd;
8681 8679 page_t *pp;
8682 8680 struct anon_map *amp;
8683 8681 ulong_t anon_index;
8684 8682 struct vnode *vp;
8685 8683 u_offset_t off, offset;
8686 8684 pfn_t pfn;
8687 8685 pgcnt_t page, npages;
8688 8686 caddr_t addr;
8689 8687
8690 8688 npages = seg_pages(seg);
8691 8689 svd = (struct segvn_data *)seg->s_data;
8692 8690 vp = svd->vp;
8693 8691 off = offset = svd->offset;
8694 8692 addr = seg->s_base;
8695 8693
8696 8694 if ((amp = svd->amp) != NULL) {
8697 8695 anon_index = svd->anon_index;
8698 8696 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8699 8697 }
8700 8698
8701 8699 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8702 8700 struct anon *ap;
8703 8701 int we_own_it = 0;
8704 8702
8705 8703 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8706 8704 swap_xlate_nopanic(ap, &vp, &off);
8707 8705 } else {
8708 8706 vp = svd->vp;
8709 8707 off = offset;
8710 8708 }
8711 8709
8712 8710 /*
8713 8711 * If pp == NULL, the page either does not exist
8714 8712 * or is exclusively locked. So determine if it
8715 8713 * exists before searching for it.
8716 8714 */
8717 8715
8718 8716 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8719 8717 we_own_it = 1;
8720 8718 else
8721 8719 pp = page_exists(vp, off);
8722 8720
8723 8721 if (pp) {
8724 8722 pfn = page_pptonum(pp);
8725 8723 dump_addpage(seg->s_as, addr, pfn);
8726 8724 if (we_own_it)
8727 8725 page_unlock(pp);
8728 8726 }
8729 8727 addr += PAGESIZE;
8730 8728 dump_timeleft = dump_timeout;
8731 8729 }
8732 8730
8733 8731 if (amp != NULL)
8734 8732 ANON_LOCK_EXIT(&->a_rwlock);
8735 8733 }
8736 8734
8737 8735 #ifdef DEBUG
8738 8736 static uint32_t segvn_pglock_mtbf = 0;
8739 8737 #endif
8740 8738
8741 8739 #define PCACHE_SHWLIST ((page_t *)-2)
8742 8740 #define NOPCACHE_SHWLIST ((page_t *)-1)
8743 8741
8744 8742 /*
8745 8743 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8746 8744 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8747 8745 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8748 8746 * the same parts of the segment. Currently shadow list creation is only
8749 8747 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8750 8748 * tagged with segment pointer, starting virtual address and length. This
8751 8749 * approach for MAP_SHARED segments may add many pcache entries for the same
8752 8750 * set of pages and lead to long hash chains that decrease pcache lookup
8753 8751 * performance. To avoid this issue for shared segments shared anon map and
8754 8752 * starting anon index are used for pcache entry tagging. This allows all
8755 8753 * segments to share pcache entries for the same anon range and reduces pcache
8756 8754 * chain's length as well as memory overhead from duplicate shadow lists and
8757 8755 * pcache entries.
8758 8756 *
8759 8757 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8760 8758 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8761 8759 * part of softlockcnt accounting is done differently for private and shared
8762 8760 * segments. In private segment case softlock is only incremented when a new
8763 8761 * shadow list is created but not when an existing one is found via
8764 8762 * seg_plookup(). pcache entries have reference count incremented/decremented
8765 8763 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8766 8764 * reference count can be purged (and purging is needed before segment can be
8767 8765 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8768 8766 * decrement softlockcnt. Since in private segment case each of its pcache
8769 8767 * entries only belongs to this segment we can expect that when
8770 8768 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8771 8769 * segment purge will succeed and softlockcnt will drop to 0. In shared
8772 8770 * segment case reference count in pcache entry counts active locks from many
8773 8771 * different segments so we can't expect segment purging to succeed even when
8774 8772 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8775 8773 * segment. To be able to determine when there're no pending pagelocks in
8776 8774 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8777 8775 * but instead softlockcnt is incremented and decremented for every
8778 8776 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8779 8777 * list was created or an existing one was found. When softlockcnt drops to 0
8780 8778 * this segment no longer has any claims for pcached shadow lists and the
8781 8779 * segment can be freed even if there're still active pcache entries
8782 8780 * shared by this segment anon map. Shared segment pcache entries belong to
8783 8781 * anon map and are typically removed when anon map is freed after all
8784 8782 * processes destroy the segments that use this anon map.
8785 8783 */
8786 8784 static int
8787 8785 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8788 8786 enum lock_type type, enum seg_rw rw)
8789 8787 {
8790 8788 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8791 8789 size_t np;
8792 8790 pgcnt_t adjustpages;
8793 8791 pgcnt_t npages;
8794 8792 ulong_t anon_index;
8795 8793 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8796 8794 uint_t error;
8797 8795 struct anon_map *amp;
8798 8796 pgcnt_t anpgcnt;
8799 8797 struct page **pplist, **pl, *pp;
8800 8798 caddr_t a;
8801 8799 size_t page;
8802 8800 caddr_t lpgaddr, lpgeaddr;
8803 8801 anon_sync_obj_t cookie;
8804 8802 int anlock;
8805 8803 struct anon_map *pamp;
8806 8804 caddr_t paddr;
8807 8805 seg_preclaim_cbfunc_t preclaim_callback;
8808 8806 size_t pgsz;
8809 8807 int use_pcache;
8810 8808 size_t wlen;
8811 8809 uint_t pflags = 0;
8812 8810 int sftlck_sbase = 0;
8813 8811 int sftlck_send = 0;
8814 8812
8815 8813 #ifdef DEBUG
8816 8814 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8817 8815 hrtime_t ts = gethrtime();
8818 8816 if ((ts % segvn_pglock_mtbf) == 0) {
8819 8817 return (ENOTSUP);
8820 8818 }
8821 8819 if ((ts % segvn_pglock_mtbf) == 1) {
8822 8820 return (EFAULT);
8823 8821 }
8824 8822 }
8825 8823 #endif
8826 8824
8827 8825 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8828 8826 "segvn_pagelock: start seg %p addr %p", seg, addr);
8829 8827
8830 8828 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8831 8829 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8832 8830
8833 8831 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8834 8832
8835 8833 /*
8836 8834 * for now we only support pagelock to anon memory. We would have to
8837 8835 * check protections for vnode objects and call into the vnode driver.
8838 8836 * That's too much for a fast path. Let the fault entry point handle
8839 8837 * it.
8840 8838 */
8841 8839 if (svd->vp != NULL) {
8842 8840 if (type == L_PAGELOCK) {
8843 8841 error = ENOTSUP;
8844 8842 goto out;
8845 8843 }
8846 8844 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8847 8845 }
8848 8846 if ((amp = svd->amp) == NULL) {
8849 8847 if (type == L_PAGELOCK) {
8850 8848 error = EFAULT;
8851 8849 goto out;
8852 8850 }
8853 8851 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8854 8852 }
8855 8853 if (rw != S_READ && rw != S_WRITE) {
8856 8854 if (type == L_PAGELOCK) {
8857 8855 error = ENOTSUP;
8858 8856 goto out;
8859 8857 }
8860 8858 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8861 8859 }
8862 8860
8863 8861 if (seg->s_szc != 0) {
8864 8862 /*
8865 8863 * We are adjusting the pagelock region to the large page size
8866 8864 * boundary because the unlocked part of a large page cannot
8867 8865 * be freed anyway unless all constituent pages of a large
8868 8866 * page are locked. Bigger regions reduce pcache chain length
8869 8867 * and improve lookup performance. The tradeoff is that the
8870 8868 * very first segvn_pagelock() call for a given page is more
8871 8869 * expensive if only 1 page_t is needed for IO. This is only
8872 8870 * an issue if pcache entry doesn't get reused by several
8873 8871 * subsequent calls. We optimize here for the case when pcache
8874 8872 * is heavily used by repeated IOs to the same address range.
8875 8873 *
8876 8874 * Note segment's page size cannot change while we are holding
8877 8875 * as lock. And then it cannot change while softlockcnt is
8878 8876 * not 0. This will allow us to correctly recalculate large
8879 8877 * page size region for the matching pageunlock/reclaim call
8880 8878 * since as_pageunlock() caller must always match
8881 8879 * as_pagelock() call's addr and len.
8882 8880 *
8883 8881 * For pageunlock *ppp points to the pointer of page_t that
8884 8882 * corresponds to the real unadjusted start address. Similar
8885 8883 * for pagelock *ppp must point to the pointer of page_t that
8886 8884 * corresponds to the real unadjusted start address.
8887 8885 */
8888 8886 pgsz = page_get_pagesize(seg->s_szc);
8889 8887 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8890 8888 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8891 8889 } else if (len < segvn_pglock_comb_thrshld) {
8892 8890 lpgaddr = addr;
8893 8891 lpgeaddr = addr + len;
8894 8892 adjustpages = 0;
8895 8893 pgsz = PAGESIZE;
8896 8894 } else {
8897 8895 /*
8898 8896 * Align the address range of large enough requests to allow
8899 8897 * combining of different shadow lists into 1 to reduce memory
8900 8898 * overhead from potentially overlapping large shadow lists
8901 8899 * (worst case is we have a 1MB IO into buffers with start
8902 8900 * addresses separated by 4K). Alignment is only possible if
8903 8901 * padded chunks have sufficient access permissions. Note
8904 8902 * permissions won't change between L_PAGELOCK and
8905 8903 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8906 8904 * segvn_setprot() to wait until softlockcnt drops to 0. This
8907 8905 * allows us to determine in L_PAGEUNLOCK the same range we
8908 8906 * computed in L_PAGELOCK.
8909 8907 *
8910 8908 * If alignment is limited by segment ends set
8911 8909 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8912 8910 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8913 8911 * per segment counters. In L_PAGEUNLOCK case decrease
8914 8912 * softlockcnt_sbase/softlockcnt_send counters if
8915 8913 * sftlck_sbase/sftlck_send flags are set. When
8916 8914 * softlockcnt_sbase/softlockcnt_send are non 0
8917 8915 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8918 8916 * won't merge the segments. This restriction combined with
8919 8917 * restriction on segment unmapping and splitting for segments
8920 8918 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8921 8919 * correctly determine the same range that was previously
8922 8920 * locked by matching L_PAGELOCK.
8923 8921 */
8924 8922 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8925 8923 pgsz = PAGESIZE;
8926 8924 if (svd->type == MAP_PRIVATE) {
8927 8925 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8928 8926 segvn_pglock_comb_balign);
8929 8927 if (lpgaddr < seg->s_base) {
8930 8928 lpgaddr = seg->s_base;
8931 8929 sftlck_sbase = 1;
8932 8930 }
8933 8931 } else {
8934 8932 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8935 8933 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8936 8934 if (aaix < svd->anon_index) {
8937 8935 lpgaddr = seg->s_base;
8938 8936 sftlck_sbase = 1;
8939 8937 } else {
8940 8938 lpgaddr = addr - ptob(aix - aaix);
8941 8939 ASSERT(lpgaddr >= seg->s_base);
8942 8940 }
8943 8941 }
8944 8942 if (svd->pageprot && lpgaddr != addr) {
8945 8943 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8946 8944 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8947 8945 while (vp < evp) {
8948 8946 if ((VPP_PROT(vp) & protchk) == 0) {
8949 8947 break;
8950 8948 }
8951 8949 vp++;
8952 8950 }
8953 8951 if (vp < evp) {
8954 8952 lpgaddr = addr;
8955 8953 pflags = 0;
8956 8954 }
8957 8955 }
8958 8956 lpgeaddr = addr + len;
8959 8957 if (pflags) {
8960 8958 if (svd->type == MAP_PRIVATE) {
8961 8959 lpgeaddr = (caddr_t)P2ROUNDUP(
8962 8960 (uintptr_t)lpgeaddr,
8963 8961 segvn_pglock_comb_balign);
8964 8962 } else {
8965 8963 ulong_t aix = svd->anon_index +
8966 8964 seg_page(seg, lpgeaddr);
8967 8965 ulong_t aaix = P2ROUNDUP(aix,
8968 8966 segvn_pglock_comb_palign);
8969 8967 if (aaix < aix) {
8970 8968 lpgeaddr = 0;
8971 8969 } else {
8972 8970 lpgeaddr += ptob(aaix - aix);
8973 8971 }
8974 8972 }
8975 8973 if (lpgeaddr == 0 ||
8976 8974 lpgeaddr > seg->s_base + seg->s_size) {
8977 8975 lpgeaddr = seg->s_base + seg->s_size;
8978 8976 sftlck_send = 1;
8979 8977 }
8980 8978 }
8981 8979 if (svd->pageprot && lpgeaddr != addr + len) {
8982 8980 struct vpage *vp;
8983 8981 struct vpage *evp;
8984 8982
8985 8983 vp = &svd->vpage[seg_page(seg, addr + len)];
8986 8984 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8987 8985
8988 8986 while (vp < evp) {
8989 8987 if ((VPP_PROT(vp) & protchk) == 0) {
8990 8988 break;
8991 8989 }
8992 8990 vp++;
8993 8991 }
8994 8992 if (vp < evp) {
8995 8993 lpgeaddr = addr + len;
8996 8994 }
8997 8995 }
8998 8996 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8999 8997 }
9000 8998
9001 8999 /*
9002 9000 * For MAP_SHARED segments we create pcache entries tagged by amp and
9003 9001 * anon index so that we can share pcache entries with other segments
9004 9002 * that map this amp. For private segments pcache entries are tagged
9005 9003 * with segment and virtual address.
9006 9004 */
9007 9005 if (svd->type == MAP_SHARED) {
9008 9006 pamp = amp;
9009 9007 paddr = (caddr_t)((lpgaddr - seg->s_base) +
9010 9008 ptob(svd->anon_index));
9011 9009 preclaim_callback = shamp_reclaim;
9012 9010 } else {
9013 9011 pamp = NULL;
9014 9012 paddr = lpgaddr;
9015 9013 preclaim_callback = segvn_reclaim;
9016 9014 }
9017 9015
9018 9016 if (type == L_PAGEUNLOCK) {
9019 9017 VM_STAT_ADD(segvnvmstats.pagelock[0]);
9020 9018
9021 9019 /*
9022 9020 * update hat ref bits for /proc. We need to make sure
9023 9021 * that threads tracing the ref and mod bits of the
9024 9022 * address space get the right data.
9025 9023 * Note: page ref and mod bits are updated at reclaim time
9026 9024 */
9027 9025 if (seg->s_as->a_vbits) {
9028 9026 for (a = addr; a < addr + len; a += PAGESIZE) {
9029 9027 if (rw == S_WRITE) {
9030 9028 hat_setstat(seg->s_as, a,
9031 9029 PAGESIZE, P_REF | P_MOD);
9032 9030 } else {
9033 9031 hat_setstat(seg->s_as, a,
9034 9032 PAGESIZE, P_REF);
9035 9033 }
9036 9034 }
9037 9035 }
9038 9036
9039 9037 /*
9040 9038 * Check the shadow list entry after the last page used in
9041 9039 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
9042 9040 * was not inserted into pcache and is not large page
9043 9041 * adjusted. In this case call reclaim callback directly and
9044 9042 * don't adjust the shadow list start and size for large
9045 9043 * pages.
9046 9044 */
9047 9045 npages = btop(len);
9048 9046 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
9049 9047 void *ptag;
9050 9048 if (pamp != NULL) {
9051 9049 ASSERT(svd->type == MAP_SHARED);
9052 9050 ptag = (void *)pamp;
9053 9051 paddr = (caddr_t)((addr - seg->s_base) +
9054 9052 ptob(svd->anon_index));
9055 9053 } else {
9056 9054 ptag = (void *)seg;
9057 9055 paddr = addr;
9058 9056 }
9059 9057 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
9060 9058 } else {
9061 9059 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
9062 9060 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
9063 9061 len = lpgeaddr - lpgaddr;
9064 9062 npages = btop(len);
9065 9063 seg_pinactive(seg, pamp, paddr, len,
9066 9064 *ppp - adjustpages, rw, pflags, preclaim_callback);
9067 9065 }
9068 9066
9069 9067 if (pamp != NULL) {
9070 9068 ASSERT(svd->type == MAP_SHARED);
9071 9069 ASSERT(svd->softlockcnt >= npages);
9072 9070 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
9073 9071 }
9074 9072
9075 9073 if (sftlck_sbase) {
9076 9074 ASSERT(svd->softlockcnt_sbase > 0);
9077 9075 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
9078 9076 }
9079 9077 if (sftlck_send) {
9080 9078 ASSERT(svd->softlockcnt_send > 0);
9081 9079 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
9082 9080 }
9083 9081
9084 9082 /*
9085 9083 * If someone is blocked while unmapping, we purge
9086 9084 * segment page cache and thus reclaim pplist synchronously
9087 9085 * without waiting for seg_pasync_thread. This speeds up
9088 9086 * unmapping in cases where munmap(2) is called, while
9089 9087 * raw async i/o is still in progress or where a thread
9090 9088 * exits on data fault in a multithreaded application.
9091 9089 */
9092 9090 if (AS_ISUNMAPWAIT(seg->s_as)) {
9093 9091 if (svd->softlockcnt == 0) {
9094 9092 mutex_enter(&seg->s_as->a_contents);
9095 9093 if (AS_ISUNMAPWAIT(seg->s_as)) {
9096 9094 AS_CLRUNMAPWAIT(seg->s_as);
9097 9095 cv_broadcast(&seg->s_as->a_cv);
9098 9096 }
9099 9097 mutex_exit(&seg->s_as->a_contents);
9100 9098 } else if (pamp == NULL) {
9101 9099 /*
9102 9100 * softlockcnt is not 0 and this is a
9103 9101 * MAP_PRIVATE segment. Try to purge its
9104 9102 * pcache entries to reduce softlockcnt.
9105 9103 * If it drops to 0 segvn_reclaim()
9106 9104 * will wake up a thread waiting on
9107 9105 * unmapwait flag.
9108 9106 *
9109 9107 * We don't purge MAP_SHARED segments with non
9110 9108 * 0 softlockcnt since IO is still in progress
9111 9109 * for such segments.
9112 9110 */
9113 9111 ASSERT(svd->type == MAP_PRIVATE);
9114 9112 segvn_purge(seg);
9115 9113 }
9116 9114 }
9117 9115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9118 9116 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
9119 9117 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
9120 9118 return (0);
9121 9119 }
9122 9120
9123 9121 /* The L_PAGELOCK case ... */
9124 9122
9125 9123 VM_STAT_ADD(segvnvmstats.pagelock[1]);
9126 9124
9127 9125 /*
9128 9126 * For MAP_SHARED segments we have to check protections before
9129 9127 * seg_plookup() since pcache entries may be shared by many segments
9130 9128 * with potentially different page protections.
9131 9129 */
9132 9130 if (pamp != NULL) {
9133 9131 ASSERT(svd->type == MAP_SHARED);
9134 9132 if (svd->pageprot == 0) {
9135 9133 if ((svd->prot & protchk) == 0) {
9136 9134 error = EACCES;
9137 9135 goto out;
9138 9136 }
9139 9137 } else {
9140 9138 /*
9141 9139 * check page protections
9142 9140 */
9143 9141 caddr_t ea;
9144 9142
9145 9143 if (seg->s_szc) {
9146 9144 a = lpgaddr;
9147 9145 ea = lpgeaddr;
9148 9146 } else {
9149 9147 a = addr;
9150 9148 ea = addr + len;
9151 9149 }
9152 9150 for (; a < ea; a += pgsz) {
9153 9151 struct vpage *vp;
9154 9152
9155 9153 ASSERT(seg->s_szc == 0 ||
9156 9154 sameprot(seg, a, pgsz));
9157 9155 vp = &svd->vpage[seg_page(seg, a)];
9158 9156 if ((VPP_PROT(vp) & protchk) == 0) {
9159 9157 error = EACCES;
9160 9158 goto out;
9161 9159 }
9162 9160 }
9163 9161 }
9164 9162 }
9165 9163
9166 9164 /*
9167 9165 * try to find pages in segment page cache
9168 9166 */
9169 9167 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
9170 9168 if (pplist != NULL) {
9171 9169 if (pamp != NULL) {
9172 9170 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
9173 9171 ASSERT(svd->type == MAP_SHARED);
9174 9172 atomic_add_long((ulong_t *)&svd->softlockcnt,
9175 9173 npages);
9176 9174 }
9177 9175 if (sftlck_sbase) {
9178 9176 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9179 9177 }
9180 9178 if (sftlck_send) {
9181 9179 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9182 9180 }
9183 9181 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9184 9182 *ppp = pplist + adjustpages;
9185 9183 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9186 9184 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9187 9185 return (0);
9188 9186 }
9189 9187
9190 9188 /*
9191 9189 * For MAP_SHARED segments we already verified above that segment
9192 9190 * protections allow this pagelock operation.
9193 9191 */
9194 9192 if (pamp == NULL) {
9195 9193 ASSERT(svd->type == MAP_PRIVATE);
9196 9194 if (svd->pageprot == 0) {
9197 9195 if ((svd->prot & protchk) == 0) {
9198 9196 error = EACCES;
9199 9197 goto out;
9200 9198 }
9201 9199 if (svd->prot & PROT_WRITE) {
9202 9200 wlen = lpgeaddr - lpgaddr;
9203 9201 } else {
9204 9202 wlen = 0;
9205 9203 ASSERT(rw == S_READ);
9206 9204 }
9207 9205 } else {
9208 9206 int wcont = 1;
9209 9207 /*
9210 9208 * check page protections
9211 9209 */
9212 9210 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9213 9211 struct vpage *vp;
9214 9212
9215 9213 ASSERT(seg->s_szc == 0 ||
9216 9214 sameprot(seg, a, pgsz));
9217 9215 vp = &svd->vpage[seg_page(seg, a)];
9218 9216 if ((VPP_PROT(vp) & protchk) == 0) {
9219 9217 error = EACCES;
9220 9218 goto out;
9221 9219 }
9222 9220 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9223 9221 wlen += pgsz;
9224 9222 } else {
9225 9223 wcont = 0;
9226 9224 ASSERT(rw == S_READ);
9227 9225 }
9228 9226 }
9229 9227 }
9230 9228 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9231 9229 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9232 9230 }
9233 9231
9234 9232 /*
9235 9233 * Only build large page adjusted shadow list if we expect to insert
9236 9234 * it into pcache. For large enough pages it's a big overhead to
9237 9235 * create a shadow list of the entire large page. But this overhead
9238 9236 * should be amortized over repeated pcache hits on subsequent reuse
9239 9237 * of this shadow list (IO into any range within this shadow list will
9240 9238 * find it in pcache since we large page align the request for pcache
9241 9239 * lookups). pcache performance is improved with bigger shadow lists
9242 9240 * as it reduces the time to pcache the entire big segment and reduces
9243 9241 * pcache chain length.
9244 9242 */
9245 9243 if (seg_pinsert_check(seg, pamp, paddr,
9246 9244 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9247 9245 addr = lpgaddr;
9248 9246 len = lpgeaddr - lpgaddr;
9249 9247 use_pcache = 1;
9250 9248 } else {
9251 9249 use_pcache = 0;
9252 9250 /*
9253 9251 * Since this entry will not be inserted into the pcache, we
9254 9252 * will not do any adjustments to the starting address or
9255 9253 * size of the memory to be locked.
9256 9254 */
9257 9255 adjustpages = 0;
9258 9256 }
9259 9257 npages = btop(len);
9260 9258
9261 9259 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9262 9260 pl = pplist;
9263 9261 *ppp = pplist + adjustpages;
9264 9262 /*
9265 9263 * If use_pcache is 0 this shadow list is not large page adjusted.
9266 9264 * Record this info in the last entry of shadow array so that
9267 9265 * L_PAGEUNLOCK can determine if it should large page adjust the
9268 9266 * address range to find the real range that was locked.
9269 9267 */
9270 9268 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9271 9269
9272 9270 page = seg_page(seg, addr);
9273 9271 anon_index = svd->anon_index + page;
9274 9272
9275 9273 anlock = 0;
9276 9274 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9277 9275 ASSERT(amp->a_szc >= seg->s_szc);
9278 9276 anpgcnt = page_get_pagecnt(amp->a_szc);
9279 9277 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9280 9278 struct anon *ap;
9281 9279 struct vnode *vp;
9282 9280 u_offset_t off;
9283 9281
9284 9282 /*
9285 9283 * Lock and unlock anon array only once per large page.
9286 9284 * anon_array_enter() locks the root anon slot according to
9287 9285 * a_szc which can't change while anon map is locked. We lock
9288 9286 * anon the first time through this loop and each time we
9289 9287 * reach anon index that corresponds to a root of a large
9290 9288 * page.
9291 9289 */
9292 9290 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9293 9291 ASSERT(anlock == 0);
9294 9292 anon_array_enter(amp, anon_index, &cookie);
9295 9293 anlock = 1;
9296 9294 }
9297 9295 ap = anon_get_ptr(amp->ahp, anon_index);
9298 9296
9299 9297 /*
9300 9298 * We must never use seg_pcache for COW pages
9301 9299 * because we might end up with original page still
9302 9300 * lying in seg_pcache even after private page is
9303 9301 * created. This leads to data corruption as
9304 9302 * aio_write refers to the page still in cache
9305 9303 * while all other accesses refer to the private
9306 9304 * page.
9307 9305 */
9308 9306 if (ap == NULL || ap->an_refcnt != 1) {
9309 9307 struct vpage *vpage;
9310 9308
9311 9309 if (seg->s_szc) {
9312 9310 error = EFAULT;
9313 9311 break;
9314 9312 }
9315 9313 if (svd->vpage != NULL) {
9316 9314 vpage = &svd->vpage[seg_page(seg, a)];
9317 9315 } else {
9318 9316 vpage = NULL;
9319 9317 }
9320 9318 ASSERT(anlock);
9321 9319 anon_array_exit(&cookie);
9322 9320 anlock = 0;
9323 9321 pp = NULL;
9324 9322 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9325 9323 vpage, &pp, 0, F_INVAL, rw, 1);
9326 9324 if (error) {
9327 9325 error = fc_decode(error);
9328 9326 break;
9329 9327 }
9330 9328 anon_array_enter(amp, anon_index, &cookie);
9331 9329 anlock = 1;
9332 9330 ap = anon_get_ptr(amp->ahp, anon_index);
9333 9331 if (ap == NULL || ap->an_refcnt != 1) {
9334 9332 error = EFAULT;
9335 9333 break;
9336 9334 }
9337 9335 }
9338 9336 swap_xlate(ap, &vp, &off);
9339 9337 pp = page_lookup_nowait(vp, off, SE_SHARED);
9340 9338 if (pp == NULL) {
9341 9339 error = EFAULT;
9342 9340 break;
9343 9341 }
9344 9342 if (ap->an_pvp != NULL) {
9345 9343 anon_swap_free(ap, pp);
9346 9344 }
9347 9345 /*
9348 9346 * Unlock anon if this is the last slot in a large page.
9349 9347 */
9350 9348 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9351 9349 ASSERT(anlock);
9352 9350 anon_array_exit(&cookie);
9353 9351 anlock = 0;
9354 9352 }
9355 9353 *pplist++ = pp;
9356 9354 }
9357 9355 if (anlock) { /* Ensure the lock is dropped */
9358 9356 anon_array_exit(&cookie);
9359 9357 }
9360 9358 ANON_LOCK_EXIT(&->a_rwlock);
9361 9359
9362 9360 if (a >= addr + len) {
9363 9361 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9364 9362 if (pamp != NULL) {
9365 9363 ASSERT(svd->type == MAP_SHARED);
9366 9364 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9367 9365 npages);
9368 9366 wlen = len;
9369 9367 }
9370 9368 if (sftlck_sbase) {
9371 9369 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9372 9370 }
9373 9371 if (sftlck_send) {
9374 9372 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9375 9373 }
9376 9374 if (use_pcache) {
9377 9375 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9378 9376 rw, pflags, preclaim_callback);
9379 9377 }
9380 9378 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9381 9379 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9382 9380 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9383 9381 return (0);
9384 9382 }
9385 9383
9386 9384 pplist = pl;
9387 9385 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9388 9386 while (np > (uint_t)0) {
9389 9387 ASSERT(PAGE_LOCKED(*pplist));
9390 9388 page_unlock(*pplist);
9391 9389 np--;
9392 9390 pplist++;
9393 9391 }
9394 9392 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9395 9393 out:
9396 9394 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9397 9395 *ppp = NULL;
9398 9396 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9399 9397 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9400 9398 return (error);
9401 9399 }
9402 9400
9403 9401 /*
9404 9402 * purge any cached pages in the I/O page cache
9405 9403 */
9406 9404 static void
9407 9405 segvn_purge(struct seg *seg)
9408 9406 {
9409 9407 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9410 9408
9411 9409 /*
9412 9410 * pcache is only used by pure anon segments.
9413 9411 */
9414 9412 if (svd->amp == NULL || svd->vp != NULL) {
9415 9413 return;
9416 9414 }
9417 9415
9418 9416 /*
9419 9417 * For MAP_SHARED segments non 0 segment's softlockcnt means
9420 9418 * active IO is still in progress via this segment. So we only
9421 9419 * purge MAP_SHARED segments when their softlockcnt is 0.
9422 9420 */
9423 9421 if (svd->type == MAP_PRIVATE) {
9424 9422 if (svd->softlockcnt) {
9425 9423 seg_ppurge(seg, NULL, 0);
9426 9424 }
9427 9425 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9428 9426 seg_ppurge(seg, svd->amp, 0);
9429 9427 }
9430 9428 }
9431 9429
9432 9430 /*
9433 9431 * If async argument is not 0 we are called from pcache async thread and don't
9434 9432 * hold AS lock.
9435 9433 */
9436 9434
9437 9435 /*ARGSUSED*/
9438 9436 static int
9439 9437 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9440 9438 enum seg_rw rw, int async)
9441 9439 {
9442 9440 struct seg *seg = (struct seg *)ptag;
9443 9441 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9444 9442 pgcnt_t np, npages;
9445 9443 struct page **pl;
9446 9444
9447 9445 npages = np = btop(len);
9448 9446 ASSERT(npages);
9449 9447
9450 9448 ASSERT(svd->vp == NULL && svd->amp != NULL);
9451 9449 ASSERT(svd->softlockcnt >= npages);
9452 9450 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9453 9451
9454 9452 pl = pplist;
9455 9453
9456 9454 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9457 9455 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9458 9456
9459 9457 while (np > (uint_t)0) {
9460 9458 if (rw == S_WRITE) {
9461 9459 hat_setrefmod(*pplist);
9462 9460 } else {
9463 9461 hat_setref(*pplist);
9464 9462 }
9465 9463 page_unlock(*pplist);
9466 9464 np--;
9467 9465 pplist++;
9468 9466 }
9469 9467
9470 9468 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9471 9469
9472 9470 /*
9473 9471 * If we are pcache async thread we don't hold AS lock. This means if
9474 9472 * softlockcnt drops to 0 after the decrement below address space may
9475 9473 * get freed. We can't allow it since after softlock derement to 0 we
9476 9474 * still need to access as structure for possible wakeup of unmap
9477 9475 * waiters. To prevent the disappearance of as we take this segment
9478 9476 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9479 9477 * make sure this routine completes before segment is freed.
9480 9478 *
9481 9479 * The second complication we have to deal with in async case is a
9482 9480 * possibility of missed wake up of unmap wait thread. When we don't
9483 9481 * hold as lock here we may take a_contents lock before unmap wait
9484 9482 * thread that was first to see softlockcnt was still not 0. As a
9485 9483 * result we'll fail to wake up an unmap wait thread. To avoid this
9486 9484 * race we set nounmapwait flag in as structure if we drop softlockcnt
9487 9485 * to 0 when we were called by pcache async thread. unmapwait thread
9488 9486 * will not block if this flag is set.
9489 9487 */
9490 9488 if (async) {
9491 9489 mutex_enter(&svd->segfree_syncmtx);
9492 9490 }
9493 9491
9494 9492 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9495 9493 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9496 9494 mutex_enter(&seg->s_as->a_contents);
9497 9495 if (async) {
9498 9496 AS_SETNOUNMAPWAIT(seg->s_as);
9499 9497 }
9500 9498 if (AS_ISUNMAPWAIT(seg->s_as)) {
9501 9499 AS_CLRUNMAPWAIT(seg->s_as);
9502 9500 cv_broadcast(&seg->s_as->a_cv);
9503 9501 }
9504 9502 mutex_exit(&seg->s_as->a_contents);
9505 9503 }
9506 9504 }
9507 9505
9508 9506 if (async) {
9509 9507 mutex_exit(&svd->segfree_syncmtx);
9510 9508 }
9511 9509 return (0);
9512 9510 }
9513 9511
9514 9512 /*ARGSUSED*/
9515 9513 static int
9516 9514 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9517 9515 enum seg_rw rw, int async)
9518 9516 {
9519 9517 amp_t *amp = (amp_t *)ptag;
9520 9518 pgcnt_t np, npages;
9521 9519 struct page **pl;
9522 9520
9523 9521 npages = np = btop(len);
9524 9522 ASSERT(npages);
9525 9523 ASSERT(amp->a_softlockcnt >= npages);
9526 9524
9527 9525 pl = pplist;
9528 9526
9529 9527 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9530 9528 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9531 9529
9532 9530 while (np > (uint_t)0) {
9533 9531 if (rw == S_WRITE) {
9534 9532 hat_setrefmod(*pplist);
9535 9533 } else {
9536 9534 hat_setref(*pplist);
9537 9535 }
9538 9536 page_unlock(*pplist);
9539 9537 np--;
9540 9538 pplist++;
9541 9539 }
9542 9540
9543 9541 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9544 9542
9545 9543 /*
9546 9544 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9547 9545 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9548 9546 * and anonmap_purge() acquires a_purgemtx.
9549 9547 */
9550 9548 mutex_enter(&->a_purgemtx);
9551 9549 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9552 9550 amp->a_purgewait) {
9553 9551 amp->a_purgewait = 0;
9554 9552 cv_broadcast(&->a_purgecv);
9555 9553 }
9556 9554 mutex_exit(&->a_purgemtx);
9557 9555 return (0);
9558 9556 }
9559 9557
9560 9558 /*
9561 9559 * get a memory ID for an addr in a given segment
9562 9560 *
9563 9561 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9564 9562 * At fault time they will be relocated into larger pages.
9565 9563 */
9566 9564 static int
9567 9565 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9568 9566 {
9569 9567 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9570 9568 struct anon *ap = NULL;
9571 9569 ulong_t anon_index;
9572 9570 struct anon_map *amp;
9573 9571 anon_sync_obj_t cookie;
9574 9572
9575 9573 if (svd->type == MAP_PRIVATE) {
9576 9574 memidp->val[0] = (uintptr_t)seg->s_as;
9577 9575 memidp->val[1] = (uintptr_t)addr;
9578 9576 return (0);
9579 9577 }
9580 9578
9581 9579 if (svd->type == MAP_SHARED) {
9582 9580 if (svd->vp) {
9583 9581 memidp->val[0] = (uintptr_t)svd->vp;
9584 9582 memidp->val[1] = (u_longlong_t)svd->offset +
9585 9583 (uintptr_t)(addr - seg->s_base);
9586 9584 return (0);
9587 9585 } else {
9588 9586
9589 9587 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9590 9588 if ((amp = svd->amp) != NULL) {
9591 9589 anon_index = svd->anon_index +
9592 9590 seg_page(seg, addr);
9593 9591 }
9594 9592 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9595 9593
9596 9594 ASSERT(amp != NULL);
9597 9595
9598 9596 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9599 9597 anon_array_enter(amp, anon_index, &cookie);
9600 9598 ap = anon_get_ptr(amp->ahp, anon_index);
9601 9599 if (ap == NULL) {
9602 9600 page_t *pp;
9603 9601
9604 9602 pp = anon_zero(seg, addr, &ap, svd->cred);
9605 9603 if (pp == NULL) {
9606 9604 anon_array_exit(&cookie);
9607 9605 ANON_LOCK_EXIT(&->a_rwlock);
9608 9606 return (ENOMEM);
9609 9607 }
9610 9608 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9611 9609 == NULL);
9612 9610 (void) anon_set_ptr(amp->ahp, anon_index,
9613 9611 ap, ANON_SLEEP);
9614 9612 page_unlock(pp);
9615 9613 }
9616 9614
9617 9615 anon_array_exit(&cookie);
9618 9616 ANON_LOCK_EXIT(&->a_rwlock);
9619 9617
9620 9618 memidp->val[0] = (uintptr_t)ap;
9621 9619 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9622 9620 return (0);
9623 9621 }
9624 9622 }
9625 9623 return (EINVAL);
9626 9624 }
9627 9625
9628 9626 static int
9629 9627 sameprot(struct seg *seg, caddr_t a, size_t len)
9630 9628 {
9631 9629 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9632 9630 struct vpage *vpage;
9633 9631 spgcnt_t pages = btop(len);
9634 9632 uint_t prot;
9635 9633
9636 9634 if (svd->pageprot == 0)
9637 9635 return (1);
9638 9636
9639 9637 ASSERT(svd->vpage != NULL);
9640 9638
9641 9639 vpage = &svd->vpage[seg_page(seg, a)];
9642 9640 prot = VPP_PROT(vpage);
9643 9641 vpage++;
9644 9642 pages--;
9645 9643 while (pages-- > 0) {
9646 9644 if (prot != VPP_PROT(vpage))
9647 9645 return (0);
9648 9646 vpage++;
9649 9647 }
9650 9648 return (1);
9651 9649 }
9652 9650
9653 9651 /*
9654 9652 * Get memory allocation policy info for specified address in given segment
9655 9653 */
9656 9654 static lgrp_mem_policy_info_t *
9657 9655 segvn_getpolicy(struct seg *seg, caddr_t addr)
9658 9656 {
9659 9657 struct anon_map *amp;
9660 9658 ulong_t anon_index;
9661 9659 lgrp_mem_policy_info_t *policy_info;
9662 9660 struct segvn_data *svn_data;
9663 9661 u_offset_t vn_off;
9664 9662 vnode_t *vp;
9665 9663
9666 9664 ASSERT(seg != NULL);
9667 9665
9668 9666 svn_data = (struct segvn_data *)seg->s_data;
9669 9667 if (svn_data == NULL)
9670 9668 return (NULL);
9671 9669
9672 9670 /*
9673 9671 * Get policy info for private or shared memory
9674 9672 */
9675 9673 if (svn_data->type != MAP_SHARED) {
9676 9674 if (svn_data->tr_state != SEGVN_TR_ON) {
9677 9675 policy_info = &svn_data->policy_info;
9678 9676 } else {
9679 9677 policy_info = &svn_data->tr_policy_info;
9680 9678 ASSERT(policy_info->mem_policy ==
9681 9679 LGRP_MEM_POLICY_NEXT_SEG);
↓ open down ↓ |
9508 lines elided |
↑ open up ↑ |
9682 9680 }
9683 9681 } else {
9684 9682 amp = svn_data->amp;
9685 9683 anon_index = svn_data->anon_index + seg_page(seg, addr);
9686 9684 vp = svn_data->vp;
9687 9685 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9688 9686 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9689 9687 }
9690 9688
9691 9689 return (policy_info);
9692 -}
9693 -
9694 -/*ARGSUSED*/
9695 -static int
9696 -segvn_capable(struct seg *seg, segcapability_t capability)
9697 -{
9698 - return (0);
9699 9690 }
9700 9691
9701 9692 /*
9702 9693 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9703 9694 * established to per vnode mapping per lgroup amp pages instead of to vnode
9704 9695 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9705 9696 * may share the same text replication amp. If a suitable amp doesn't already
9706 9697 * exist in svntr hash table create a new one. We may fail to bind to amp if
9707 9698 * segment is not eligible for text replication. Code below first checks for
9708 9699 * these conditions. If binding is successful segment tr_state is set to on
9709 9700 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9710 9701 * svd->amp remains as NULL.
9711 9702 */
9712 9703 static void
9713 9704 segvn_textrepl(struct seg *seg)
9714 9705 {
9715 9706 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9716 9707 vnode_t *vp = svd->vp;
9717 9708 u_offset_t off = svd->offset;
9718 9709 size_t size = seg->s_size;
9719 9710 u_offset_t eoff = off + size;
9720 9711 uint_t szc = seg->s_szc;
9721 9712 ulong_t hash = SVNTR_HASH_FUNC(vp);
9722 9713 svntr_t *svntrp;
9723 9714 struct vattr va;
9724 9715 proc_t *p = seg->s_as->a_proc;
9725 9716 lgrp_id_t lgrp_id;
9726 9717 lgrp_id_t olid;
9727 9718 int first;
9728 9719 struct anon_map *amp;
9729 9720
9730 9721 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9731 9722 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9732 9723 ASSERT(p != NULL);
9733 9724 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9734 9725 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9735 9726 ASSERT(svd->flags & MAP_TEXT);
9736 9727 ASSERT(svd->type == MAP_PRIVATE);
9737 9728 ASSERT(vp != NULL && svd->amp == NULL);
9738 9729 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9739 9730 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9740 9731 ASSERT(seg->s_as != &kas);
9741 9732 ASSERT(off < eoff);
9742 9733 ASSERT(svntr_hashtab != NULL);
9743 9734
9744 9735 /*
9745 9736 * If numa optimizations are no longer desired bail out.
9746 9737 */
9747 9738 if (!lgrp_optimizations()) {
9748 9739 svd->tr_state = SEGVN_TR_OFF;
9749 9740 return;
9750 9741 }
9751 9742
9752 9743 /*
9753 9744 * Avoid creating anon maps with size bigger than the file size.
9754 9745 * If VOP_GETATTR() call fails bail out.
9755 9746 */
9756 9747 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9757 9748 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9758 9749 svd->tr_state = SEGVN_TR_OFF;
9759 9750 SEGVN_TR_ADDSTAT(gaerr);
9760 9751 return;
9761 9752 }
9762 9753 if (btopr(va.va_size) < btopr(eoff)) {
9763 9754 svd->tr_state = SEGVN_TR_OFF;
9764 9755 SEGVN_TR_ADDSTAT(overmap);
9765 9756 return;
9766 9757 }
9767 9758
9768 9759 /*
9769 9760 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9770 9761 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9771 9762 * mapping that checks if trcache for this vnode needs to be
9772 9763 * invalidated can't miss us.
9773 9764 */
9774 9765 if (!(vp->v_flag & VVMEXEC)) {
9775 9766 mutex_enter(&vp->v_lock);
9776 9767 vp->v_flag |= VVMEXEC;
9777 9768 mutex_exit(&vp->v_lock);
9778 9769 }
9779 9770 mutex_enter(&svntr_hashtab[hash].tr_lock);
9780 9771 /*
9781 9772 * Bail out if potentially MAP_SHARED writable mappings exist to this
9782 9773 * vnode. We don't want to use old file contents from existing
9783 9774 * replicas if this mapping was established after the original file
9784 9775 * was changed.
9785 9776 */
9786 9777 if (vn_is_mapped(vp, V_WRITE)) {
9787 9778 mutex_exit(&svntr_hashtab[hash].tr_lock);
9788 9779 svd->tr_state = SEGVN_TR_OFF;
9789 9780 SEGVN_TR_ADDSTAT(wrcnt);
9790 9781 return;
9791 9782 }
9792 9783 svntrp = svntr_hashtab[hash].tr_head;
9793 9784 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9794 9785 ASSERT(svntrp->tr_refcnt != 0);
9795 9786 if (svntrp->tr_vp != vp) {
9796 9787 continue;
9797 9788 }
9798 9789
9799 9790 /*
9800 9791 * Bail out if the file or its attributes were changed after
9801 9792 * this replication entry was created since we need to use the
9802 9793 * latest file contents. Note that mtime test alone is not
9803 9794 * sufficient because a user can explicitly change mtime via
9804 9795 * utimes(2) interfaces back to the old value after modifiying
9805 9796 * the file contents. To detect this case we also have to test
9806 9797 * ctime which among other things records the time of the last
9807 9798 * mtime change by utimes(2). ctime is not changed when the file
9808 9799 * is only read or executed so we expect that typically existing
9809 9800 * replication amp's can be used most of the time.
9810 9801 */
9811 9802 if (!svntrp->tr_valid ||
9812 9803 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9813 9804 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9814 9805 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9815 9806 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9816 9807 mutex_exit(&svntr_hashtab[hash].tr_lock);
9817 9808 svd->tr_state = SEGVN_TR_OFF;
9818 9809 SEGVN_TR_ADDSTAT(stale);
9819 9810 return;
9820 9811 }
9821 9812 /*
9822 9813 * if off, eoff and szc match current segment we found the
9823 9814 * existing entry we can use.
9824 9815 */
9825 9816 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9826 9817 svntrp->tr_szc == szc) {
9827 9818 break;
9828 9819 }
9829 9820 /*
9830 9821 * Don't create different but overlapping in file offsets
9831 9822 * entries to avoid replication of the same file pages more
9832 9823 * than once per lgroup.
9833 9824 */
9834 9825 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9835 9826 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9836 9827 mutex_exit(&svntr_hashtab[hash].tr_lock);
9837 9828 svd->tr_state = SEGVN_TR_OFF;
9838 9829 SEGVN_TR_ADDSTAT(overlap);
9839 9830 return;
9840 9831 }
9841 9832 }
9842 9833 /*
9843 9834 * If we didn't find existing entry create a new one.
9844 9835 */
9845 9836 if (svntrp == NULL) {
9846 9837 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9847 9838 if (svntrp == NULL) {
9848 9839 mutex_exit(&svntr_hashtab[hash].tr_lock);
9849 9840 svd->tr_state = SEGVN_TR_OFF;
9850 9841 SEGVN_TR_ADDSTAT(nokmem);
9851 9842 return;
9852 9843 }
9853 9844 #ifdef DEBUG
9854 9845 {
9855 9846 lgrp_id_t i;
9856 9847 for (i = 0; i < NLGRPS_MAX; i++) {
9857 9848 ASSERT(svntrp->tr_amp[i] == NULL);
9858 9849 }
9859 9850 }
9860 9851 #endif /* DEBUG */
9861 9852 svntrp->tr_vp = vp;
9862 9853 svntrp->tr_off = off;
9863 9854 svntrp->tr_eoff = eoff;
9864 9855 svntrp->tr_szc = szc;
9865 9856 svntrp->tr_valid = 1;
9866 9857 svntrp->tr_mtime = va.va_mtime;
9867 9858 svntrp->tr_ctime = va.va_ctime;
9868 9859 svntrp->tr_refcnt = 0;
9869 9860 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9870 9861 svntr_hashtab[hash].tr_head = svntrp;
9871 9862 }
9872 9863 first = 1;
9873 9864 again:
9874 9865 /*
9875 9866 * We want to pick a replica with pages on main thread's (t_tid = 1,
9876 9867 * aka T1) lgrp. Currently text replication is only optimized for
9877 9868 * workloads that either have all threads of a process on the same
9878 9869 * lgrp or execute their large text primarily on main thread.
9879 9870 */
9880 9871 lgrp_id = p->p_t1_lgrpid;
9881 9872 if (lgrp_id == LGRP_NONE) {
9882 9873 /*
9883 9874 * In case exec() prefaults text on non main thread use
9884 9875 * current thread lgrpid. It will become main thread anyway
9885 9876 * soon.
9886 9877 */
9887 9878 lgrp_id = lgrp_home_id(curthread);
9888 9879 }
9889 9880 /*
9890 9881 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9891 9882 * just set it to NLGRPS_MAX if it's different from current process T1
9892 9883 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9893 9884 * replication and T1 new home is different from lgrp used for text
9894 9885 * replication. When this happens asyncronous segvn thread rechecks if
9895 9886 * segments should change lgrps used for text replication. If we fail
9896 9887 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9897 9888 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9898 9889 * we want to use. We don't need to use cas in this case because
9899 9890 * another thread that races in between our non atomic check and set
9900 9891 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9901 9892 */
9902 9893 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9903 9894 olid = p->p_tr_lgrpid;
9904 9895 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9905 9896 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9906 9897 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9907 9898 olid) {
9908 9899 olid = p->p_tr_lgrpid;
9909 9900 ASSERT(olid != LGRP_NONE);
9910 9901 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9911 9902 p->p_tr_lgrpid = NLGRPS_MAX;
9912 9903 }
9913 9904 }
9914 9905 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9915 9906 membar_producer();
9916 9907 /*
9917 9908 * lgrp_move_thread() won't schedule async recheck after
9918 9909 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9919 9910 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9920 9911 * is not LGRP_NONE.
9921 9912 */
9922 9913 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9923 9914 p->p_t1_lgrpid != lgrp_id) {
9924 9915 first = 0;
9925 9916 goto again;
9926 9917 }
9927 9918 }
9928 9919 /*
9929 9920 * If no amp was created yet for lgrp_id create a new one as long as
9930 9921 * we have enough memory to afford it.
9931 9922 */
9932 9923 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9933 9924 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9934 9925 if (trmem > segvn_textrepl_max_bytes) {
9935 9926 SEGVN_TR_ADDSTAT(normem);
9936 9927 goto fail;
9937 9928 }
9938 9929 if (anon_try_resv_zone(size, NULL) == 0) {
9939 9930 SEGVN_TR_ADDSTAT(noanon);
9940 9931 goto fail;
9941 9932 }
9942 9933 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9943 9934 if (amp == NULL) {
9944 9935 anon_unresv_zone(size, NULL);
9945 9936 SEGVN_TR_ADDSTAT(nokmem);
9946 9937 goto fail;
9947 9938 }
9948 9939 ASSERT(amp->refcnt == 1);
9949 9940 amp->a_szc = szc;
9950 9941 svntrp->tr_amp[lgrp_id] = amp;
9951 9942 SEGVN_TR_ADDSTAT(newamp);
9952 9943 }
9953 9944 svntrp->tr_refcnt++;
9954 9945 ASSERT(svd->svn_trnext == NULL);
9955 9946 ASSERT(svd->svn_trprev == NULL);
9956 9947 svd->svn_trnext = svntrp->tr_svnhead;
9957 9948 svd->svn_trprev = NULL;
9958 9949 if (svntrp->tr_svnhead != NULL) {
9959 9950 svntrp->tr_svnhead->svn_trprev = svd;
9960 9951 }
9961 9952 svntrp->tr_svnhead = svd;
9962 9953 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9963 9954 ASSERT(amp->refcnt >= 1);
9964 9955 svd->amp = amp;
9965 9956 svd->anon_index = 0;
9966 9957 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9967 9958 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9968 9959 svd->tr_state = SEGVN_TR_ON;
9969 9960 mutex_exit(&svntr_hashtab[hash].tr_lock);
9970 9961 SEGVN_TR_ADDSTAT(repl);
9971 9962 return;
9972 9963 fail:
9973 9964 ASSERT(segvn_textrepl_bytes >= size);
9974 9965 atomic_add_long(&segvn_textrepl_bytes, -size);
9975 9966 ASSERT(svntrp != NULL);
9976 9967 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9977 9968 if (svntrp->tr_refcnt == 0) {
9978 9969 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9979 9970 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9980 9971 mutex_exit(&svntr_hashtab[hash].tr_lock);
9981 9972 kmem_cache_free(svntr_cache, svntrp);
9982 9973 } else {
9983 9974 mutex_exit(&svntr_hashtab[hash].tr_lock);
9984 9975 }
9985 9976 svd->tr_state = SEGVN_TR_OFF;
9986 9977 }
9987 9978
9988 9979 /*
9989 9980 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9990 9981 * replication amp. This routine is most typically called when segment is
9991 9982 * unmapped but can also be called when segment no longer qualifies for text
9992 9983 * replication (e.g. due to protection changes). If unload_unmap is set use
9993 9984 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9994 9985 * svntr free all its anon maps and remove it from the hash table.
9995 9986 */
9996 9987 static void
9997 9988 segvn_textunrepl(struct seg *seg, int unload_unmap)
9998 9989 {
9999 9990 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
10000 9991 vnode_t *vp = svd->vp;
10001 9992 u_offset_t off = svd->offset;
10002 9993 size_t size = seg->s_size;
10003 9994 u_offset_t eoff = off + size;
10004 9995 uint_t szc = seg->s_szc;
10005 9996 ulong_t hash = SVNTR_HASH_FUNC(vp);
10006 9997 svntr_t *svntrp;
10007 9998 svntr_t **prv_svntrp;
10008 9999 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
10009 10000 lgrp_id_t i;
10010 10001
10011 10002 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
10012 10003 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
10013 10004 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
10014 10005 ASSERT(svd->tr_state == SEGVN_TR_ON);
10015 10006 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10016 10007 ASSERT(svd->amp != NULL);
10017 10008 ASSERT(svd->amp->refcnt >= 1);
10018 10009 ASSERT(svd->anon_index == 0);
10019 10010 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
10020 10011 ASSERT(svntr_hashtab != NULL);
10021 10012
10022 10013 mutex_enter(&svntr_hashtab[hash].tr_lock);
10023 10014 prv_svntrp = &svntr_hashtab[hash].tr_head;
10024 10015 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
10025 10016 ASSERT(svntrp->tr_refcnt != 0);
10026 10017 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
10027 10018 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
10028 10019 break;
10029 10020 }
10030 10021 }
10031 10022 if (svntrp == NULL) {
10032 10023 panic("segvn_textunrepl: svntr record not found");
10033 10024 }
10034 10025 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
10035 10026 panic("segvn_textunrepl: amp mismatch");
10036 10027 }
10037 10028 svd->tr_state = SEGVN_TR_OFF;
10038 10029 svd->amp = NULL;
10039 10030 if (svd->svn_trprev == NULL) {
10040 10031 ASSERT(svntrp->tr_svnhead == svd);
10041 10032 svntrp->tr_svnhead = svd->svn_trnext;
10042 10033 if (svntrp->tr_svnhead != NULL) {
10043 10034 svntrp->tr_svnhead->svn_trprev = NULL;
10044 10035 }
10045 10036 svd->svn_trnext = NULL;
10046 10037 } else {
10047 10038 svd->svn_trprev->svn_trnext = svd->svn_trnext;
10048 10039 if (svd->svn_trnext != NULL) {
10049 10040 svd->svn_trnext->svn_trprev = svd->svn_trprev;
10050 10041 svd->svn_trnext = NULL;
10051 10042 }
10052 10043 svd->svn_trprev = NULL;
10053 10044 }
10054 10045 if (--svntrp->tr_refcnt) {
10055 10046 mutex_exit(&svntr_hashtab[hash].tr_lock);
10056 10047 goto done;
10057 10048 }
10058 10049 *prv_svntrp = svntrp->tr_next;
10059 10050 mutex_exit(&svntr_hashtab[hash].tr_lock);
10060 10051 for (i = 0; i < NLGRPS_MAX; i++) {
10061 10052 struct anon_map *amp = svntrp->tr_amp[i];
10062 10053 if (amp == NULL) {
10063 10054 continue;
10064 10055 }
10065 10056 ASSERT(amp->refcnt == 1);
10066 10057 ASSERT(amp->swresv == size);
10067 10058 ASSERT(amp->size == size);
10068 10059 ASSERT(amp->a_szc == szc);
10069 10060 if (amp->a_szc != 0) {
10070 10061 anon_free_pages(amp->ahp, 0, size, szc);
10071 10062 } else {
10072 10063 anon_free(amp->ahp, 0, size);
10073 10064 }
10074 10065 svntrp->tr_amp[i] = NULL;
10075 10066 ASSERT(segvn_textrepl_bytes >= size);
10076 10067 atomic_add_long(&segvn_textrepl_bytes, -size);
10077 10068 anon_unresv_zone(amp->swresv, NULL);
10078 10069 amp->refcnt = 0;
10079 10070 anonmap_free(amp);
10080 10071 }
10081 10072 kmem_cache_free(svntr_cache, svntrp);
10082 10073 done:
10083 10074 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
10084 10075 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
10085 10076 }
10086 10077
10087 10078 /*
10088 10079 * This is called when a MAP_SHARED writable mapping is created to a vnode
10089 10080 * that is currently used for execution (VVMEXEC flag is set). In this case we
10090 10081 * need to prevent further use of existing replicas.
10091 10082 */
10092 10083 static void
10093 10084 segvn_inval_trcache(vnode_t *vp)
10094 10085 {
10095 10086 ulong_t hash = SVNTR_HASH_FUNC(vp);
10096 10087 svntr_t *svntrp;
10097 10088
10098 10089 ASSERT(vp->v_flag & VVMEXEC);
10099 10090
10100 10091 if (svntr_hashtab == NULL) {
10101 10092 return;
10102 10093 }
10103 10094
10104 10095 mutex_enter(&svntr_hashtab[hash].tr_lock);
10105 10096 svntrp = svntr_hashtab[hash].tr_head;
10106 10097 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10107 10098 ASSERT(svntrp->tr_refcnt != 0);
10108 10099 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
10109 10100 svntrp->tr_valid = 0;
10110 10101 }
10111 10102 }
10112 10103 mutex_exit(&svntr_hashtab[hash].tr_lock);
10113 10104 }
10114 10105
10115 10106 static void
10116 10107 segvn_trasync_thread(void)
10117 10108 {
10118 10109 callb_cpr_t cpr_info;
10119 10110 kmutex_t cpr_lock; /* just for CPR stuff */
10120 10111
10121 10112 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
10122 10113
10123 10114 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
10124 10115 callb_generic_cpr, "segvn_async");
10125 10116
10126 10117 if (segvn_update_textrepl_interval == 0) {
10127 10118 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
10128 10119 } else {
10129 10120 segvn_update_textrepl_interval *= hz;
10130 10121 }
10131 10122 (void) timeout(segvn_trupdate_wakeup, NULL,
10132 10123 segvn_update_textrepl_interval);
10133 10124
10134 10125 for (;;) {
10135 10126 mutex_enter(&cpr_lock);
10136 10127 CALLB_CPR_SAFE_BEGIN(&cpr_info);
10137 10128 mutex_exit(&cpr_lock);
10138 10129 sema_p(&segvn_trasync_sem);
10139 10130 mutex_enter(&cpr_lock);
10140 10131 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
10141 10132 mutex_exit(&cpr_lock);
10142 10133 segvn_trupdate();
10143 10134 }
10144 10135 }
10145 10136
10146 10137 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
10147 10138
10148 10139 static void
10149 10140 segvn_trupdate_wakeup(void *dummy)
10150 10141 {
10151 10142 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
10152 10143
10153 10144 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
10154 10145 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
10155 10146 sema_v(&segvn_trasync_sem);
10156 10147 }
10157 10148
10158 10149 if (!segvn_disable_textrepl_update &&
10159 10150 segvn_update_textrepl_interval != 0) {
10160 10151 (void) timeout(segvn_trupdate_wakeup, dummy,
10161 10152 segvn_update_textrepl_interval);
10162 10153 }
10163 10154 }
10164 10155
10165 10156 static void
10166 10157 segvn_trupdate(void)
10167 10158 {
10168 10159 ulong_t hash;
10169 10160 svntr_t *svntrp;
10170 10161 segvn_data_t *svd;
10171 10162
10172 10163 ASSERT(svntr_hashtab != NULL);
10173 10164
10174 10165 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
10175 10166 mutex_enter(&svntr_hashtab[hash].tr_lock);
10176 10167 svntrp = svntr_hashtab[hash].tr_head;
10177 10168 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10178 10169 ASSERT(svntrp->tr_refcnt != 0);
10179 10170 svd = svntrp->tr_svnhead;
10180 10171 for (; svd != NULL; svd = svd->svn_trnext) {
10181 10172 segvn_trupdate_seg(svd->seg, svd, svntrp,
10182 10173 hash);
10183 10174 }
10184 10175 }
10185 10176 mutex_exit(&svntr_hashtab[hash].tr_lock);
10186 10177 }
10187 10178 }
10188 10179
10189 10180 static void
10190 10181 segvn_trupdate_seg(struct seg *seg,
10191 10182 segvn_data_t *svd,
10192 10183 svntr_t *svntrp,
10193 10184 ulong_t hash)
10194 10185 {
10195 10186 proc_t *p;
10196 10187 lgrp_id_t lgrp_id;
10197 10188 struct as *as;
10198 10189 size_t size;
10199 10190 struct anon_map *amp;
10200 10191
10201 10192 ASSERT(svd->vp != NULL);
10202 10193 ASSERT(svd->vp == svntrp->tr_vp);
10203 10194 ASSERT(svd->offset == svntrp->tr_off);
10204 10195 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10205 10196 ASSERT(seg != NULL);
10206 10197 ASSERT(svd->seg == seg);
10207 10198 ASSERT(seg->s_data == (void *)svd);
10208 10199 ASSERT(seg->s_szc == svntrp->tr_szc);
10209 10200 ASSERT(svd->tr_state == SEGVN_TR_ON);
10210 10201 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10211 10202 ASSERT(svd->amp != NULL);
10212 10203 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10213 10204 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10214 10205 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10215 10206 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10216 10207 ASSERT(svntrp->tr_refcnt != 0);
10217 10208 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10218 10209
10219 10210 as = seg->s_as;
10220 10211 ASSERT(as != NULL && as != &kas);
10221 10212 p = as->a_proc;
10222 10213 ASSERT(p != NULL);
10223 10214 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10224 10215 lgrp_id = p->p_t1_lgrpid;
10225 10216 if (lgrp_id == LGRP_NONE) {
10226 10217 return;
10227 10218 }
10228 10219 ASSERT(lgrp_id < NLGRPS_MAX);
10229 10220 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10230 10221 return;
10231 10222 }
10232 10223
10233 10224 /*
10234 10225 * Use tryenter locking since we are locking as/seg and svntr hash
10235 10226 * lock in reverse from syncrounous thread order.
10236 10227 */
10237 10228 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10238 10229 SEGVN_TR_ADDSTAT(nolock);
10239 10230 if (segvn_lgrp_trthr_migrs_snpsht) {
10240 10231 segvn_lgrp_trthr_migrs_snpsht = 0;
10241 10232 }
10242 10233 return;
10243 10234 }
10244 10235 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10245 10236 AS_LOCK_EXIT(as, &as->a_lock);
10246 10237 SEGVN_TR_ADDSTAT(nolock);
10247 10238 if (segvn_lgrp_trthr_migrs_snpsht) {
10248 10239 segvn_lgrp_trthr_migrs_snpsht = 0;
10249 10240 }
10250 10241 return;
10251 10242 }
10252 10243 size = seg->s_size;
10253 10244 if (svntrp->tr_amp[lgrp_id] == NULL) {
10254 10245 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10255 10246 if (trmem > segvn_textrepl_max_bytes) {
10256 10247 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10257 10248 AS_LOCK_EXIT(as, &as->a_lock);
10258 10249 atomic_add_long(&segvn_textrepl_bytes, -size);
10259 10250 SEGVN_TR_ADDSTAT(normem);
10260 10251 return;
10261 10252 }
10262 10253 if (anon_try_resv_zone(size, NULL) == 0) {
10263 10254 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10264 10255 AS_LOCK_EXIT(as, &as->a_lock);
10265 10256 atomic_add_long(&segvn_textrepl_bytes, -size);
10266 10257 SEGVN_TR_ADDSTAT(noanon);
10267 10258 return;
10268 10259 }
10269 10260 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10270 10261 if (amp == NULL) {
10271 10262 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10272 10263 AS_LOCK_EXIT(as, &as->a_lock);
10273 10264 atomic_add_long(&segvn_textrepl_bytes, -size);
10274 10265 anon_unresv_zone(size, NULL);
10275 10266 SEGVN_TR_ADDSTAT(nokmem);
10276 10267 return;
10277 10268 }
10278 10269 ASSERT(amp->refcnt == 1);
10279 10270 amp->a_szc = seg->s_szc;
10280 10271 svntrp->tr_amp[lgrp_id] = amp;
10281 10272 }
10282 10273 /*
10283 10274 * We don't need to drop the bucket lock but here we give other
10284 10275 * threads a chance. svntr and svd can't be unlinked as long as
10285 10276 * segment lock is held as a writer and AS held as well. After we
10286 10277 * retake bucket lock we'll continue from where we left. We'll be able
10287 10278 * to reach the end of either list since new entries are always added
10288 10279 * to the beginning of the lists.
10289 10280 */
10290 10281 mutex_exit(&svntr_hashtab[hash].tr_lock);
10291 10282 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10292 10283 mutex_enter(&svntr_hashtab[hash].tr_lock);
10293 10284
10294 10285 ASSERT(svd->tr_state == SEGVN_TR_ON);
10295 10286 ASSERT(svd->amp != NULL);
10296 10287 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10297 10288 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10298 10289 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10299 10290
10300 10291 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10301 10292 svd->amp = svntrp->tr_amp[lgrp_id];
10302 10293 p->p_tr_lgrpid = NLGRPS_MAX;
10303 10294 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10304 10295 AS_LOCK_EXIT(as, &as->a_lock);
10305 10296
10306 10297 ASSERT(svntrp->tr_refcnt != 0);
10307 10298 ASSERT(svd->vp == svntrp->tr_vp);
10308 10299 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10309 10300 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10310 10301 ASSERT(svd->seg == seg);
10311 10302 ASSERT(svd->tr_state == SEGVN_TR_ON);
10312 10303
10313 10304 SEGVN_TR_ADDSTAT(asyncrepl);
10314 10305 }
↓ open down ↓ |
606 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX