78 static void segkp_badop(void);
79 static void segkp_dump(struct seg *seg);
80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 uint_t prot);
82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 struct page ***page, enum lock_type type,
85 enum seg_rw rw);
86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 struct segkp_data **tkpd, struct anon_map *amp);
90 static void segkp_release_internal(struct seg *seg,
91 struct segkp_data *kpd, size_t len);
92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 size_t len, struct segkp_data *kpd, uint_t flags);
94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 size_t len, struct segkp_data *kpd, uint_t flags);
96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98 static int segkp_capable(struct seg *seg, segcapability_t capability);
99
100 /*
101 * Lock used to protect the hash table(s) and caches.
102 */
103 static kmutex_t segkp_lock;
104
105 /*
106 * The segkp caches
107 */
108 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
109
110 #define SEGKP_BADOP(t) (t(*)())segkp_badop
111
112 /*
113 * When there are fewer than red_minavail bytes left on the stack,
114 * segkp_map_red() will map in the redzone (if called). 5000 seems
115 * to work reasonably well...
116 */
117 long red_minavail = 5000;
118
148 .unmap = SEGKP_BADOP(int),
149 .free = SEGKP_BADOP(void),
150 .fault = segkp_fault,
151 .faulta = SEGKP_BADOP(faultcode_t),
152 .setprot = SEGKP_BADOP(int),
153 .checkprot = segkp_checkprot,
154 .kluster = segkp_kluster,
155 .swapout = SEGKP_BADOP(size_t),
156 .sync = SEGKP_BADOP(int),
157 .incore = SEGKP_BADOP(size_t),
158 .lockop = SEGKP_BADOP(int),
159 .getprot = SEGKP_BADOP(int),
160 .getoffset = SEGKP_BADOP(u_offset_t),
161 .gettype = SEGKP_BADOP(int),
162 .getvp = SEGKP_BADOP(int),
163 .advise = SEGKP_BADOP(int),
164 .dump = segkp_dump,
165 .pagelock = segkp_pagelock,
166 .setpagesize = SEGKP_BADOP(int),
167 .getmemid = segkp_getmemid,
168 .capable = segkp_capable,
169 };
170
171
172 static void
173 segkp_badop(void)
174 {
175 panic("segkp_badop");
176 /*NOTREACHED*/
177 }
178
179 static void segkpinit_mem_config(struct seg *);
180
181 static uint32_t segkp_indel;
182
183 /*
184 * Allocate the segment specific private data struct and fill it in
185 * with the per kp segment mutex, anon ptr. array and hash table.
186 */
187 int
188 segkp_create(struct seg *seg)
1383 addr += PAGESIZE;
1384 dump_timeleft = dump_timeout;
1385 }
1386 }
1387 }
1388 }
1389
1390 /*ARGSUSED*/
1391 static int
1392 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1393 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1394 {
1395 return (ENOTSUP);
1396 }
1397
1398 /*ARGSUSED*/
1399 static int
1400 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1401 {
1402 return (ENODEV);
1403 }
1404
1405 /*ARGSUSED*/
1406 static int
1407 segkp_capable(struct seg *seg, segcapability_t capability)
1408 {
1409 return (0);
1410 }
1411
1412 #include <sys/mem_config.h>
1413
1414 /*ARGSUSED*/
1415 static void
1416 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1417 {}
1418
1419 /*
1420 * During memory delete, turn off caches so that pages are not held.
1421 * A better solution may be to unlock the pages while they are
1422 * in the cache so that they may be collected naturally.
1423 */
1424
1425 /*ARGSUSED*/
1426 static int
1427 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1428 {
1429 atomic_inc_32(&segkp_indel);
|
78 static void segkp_badop(void);
79 static void segkp_dump(struct seg *seg);
80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 uint_t prot);
82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 struct page ***page, enum lock_type type,
85 enum seg_rw rw);
86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 struct segkp_data **tkpd, struct anon_map *amp);
90 static void segkp_release_internal(struct seg *seg,
91 struct segkp_data *kpd, size_t len);
92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 size_t len, struct segkp_data *kpd, uint_t flags);
94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 size_t len, struct segkp_data *kpd, uint_t flags);
96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98
99 /*
100 * Lock used to protect the hash table(s) and caches.
101 */
102 static kmutex_t segkp_lock;
103
104 /*
105 * The segkp caches
106 */
107 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
108
109 #define SEGKP_BADOP(t) (t(*)())segkp_badop
110
111 /*
112 * When there are fewer than red_minavail bytes left on the stack,
113 * segkp_map_red() will map in the redzone (if called). 5000 seems
114 * to work reasonably well...
115 */
116 long red_minavail = 5000;
117
147 .unmap = SEGKP_BADOP(int),
148 .free = SEGKP_BADOP(void),
149 .fault = segkp_fault,
150 .faulta = SEGKP_BADOP(faultcode_t),
151 .setprot = SEGKP_BADOP(int),
152 .checkprot = segkp_checkprot,
153 .kluster = segkp_kluster,
154 .swapout = SEGKP_BADOP(size_t),
155 .sync = SEGKP_BADOP(int),
156 .incore = SEGKP_BADOP(size_t),
157 .lockop = SEGKP_BADOP(int),
158 .getprot = SEGKP_BADOP(int),
159 .getoffset = SEGKP_BADOP(u_offset_t),
160 .gettype = SEGKP_BADOP(int),
161 .getvp = SEGKP_BADOP(int),
162 .advise = SEGKP_BADOP(int),
163 .dump = segkp_dump,
164 .pagelock = segkp_pagelock,
165 .setpagesize = SEGKP_BADOP(int),
166 .getmemid = segkp_getmemid,
167 };
168
169
170 static void
171 segkp_badop(void)
172 {
173 panic("segkp_badop");
174 /*NOTREACHED*/
175 }
176
177 static void segkpinit_mem_config(struct seg *);
178
179 static uint32_t segkp_indel;
180
181 /*
182 * Allocate the segment specific private data struct and fill it in
183 * with the per kp segment mutex, anon ptr. array and hash table.
184 */
185 int
186 segkp_create(struct seg *seg)
1381 addr += PAGESIZE;
1382 dump_timeleft = dump_timeout;
1383 }
1384 }
1385 }
1386 }
1387
1388 /*ARGSUSED*/
1389 static int
1390 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1391 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1392 {
1393 return (ENOTSUP);
1394 }
1395
1396 /*ARGSUSED*/
1397 static int
1398 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1399 {
1400 return (ENODEV);
1401 }
1402
1403 #include <sys/mem_config.h>
1404
1405 /*ARGSUSED*/
1406 static void
1407 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1408 {}
1409
1410 /*
1411 * During memory delete, turn off caches so that pages are not held.
1412 * A better solution may be to unlock the pages while they are
1413 * in the cache so that they may be collected naturally.
1414 */
1415
1416 /*ARGSUSED*/
1417 static int
1418 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1419 {
1420 atomic_inc_32(&segkp_indel);
|