78 static void segkp_badop(void);
79 static void segkp_dump(struct seg *seg);
80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 uint_t prot);
82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 struct page ***page, enum lock_type type,
85 enum seg_rw rw);
86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 struct segkp_data **tkpd, struct anon_map *amp);
90 static void segkp_release_internal(struct seg *seg,
91 struct segkp_data *kpd, size_t len);
92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 size_t len, struct segkp_data *kpd, uint_t flags);
94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 size_t len, struct segkp_data *kpd, uint_t flags);
96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
99 caddr_t addr);
100 static int segkp_capable(struct seg *seg, segcapability_t capability);
101
102 /*
103 * Lock used to protect the hash table(s) and caches.
104 */
105 static kmutex_t segkp_lock;
106
107 /*
108 * The segkp caches
109 */
110 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
111
112 #define SEGKP_BADOP(t) (t(*)())segkp_badop
113
114 /*
115 * When there are fewer than red_minavail bytes left on the stack,
116 * segkp_map_red() will map in the redzone (if called). 5000 seems
117 * to work reasonably well...
118 */
119 long red_minavail = 5000;
150 .unmap = SEGKP_BADOP(int),
151 .free = SEGKP_BADOP(void),
152 .fault = segkp_fault,
153 .faulta = SEGKP_BADOP(faultcode_t),
154 .setprot = SEGKP_BADOP(int),
155 .checkprot = segkp_checkprot,
156 .kluster = segkp_kluster,
157 .swapout = SEGKP_BADOP(size_t),
158 .sync = SEGKP_BADOP(int),
159 .incore = SEGKP_BADOP(size_t),
160 .lockop = SEGKP_BADOP(int),
161 .getprot = SEGKP_BADOP(int),
162 .getoffset = SEGKP_BADOP(u_offset_t),
163 .gettype = SEGKP_BADOP(int),
164 .getvp = SEGKP_BADOP(int),
165 .advise = SEGKP_BADOP(int),
166 .dump = segkp_dump,
167 .pagelock = segkp_pagelock,
168 .setpagesize = SEGKP_BADOP(int),
169 .getmemid = segkp_getmemid,
170 .getpolicy = segkp_getpolicy,
171 .capable = segkp_capable,
172 };
173
174
175 static void
176 segkp_badop(void)
177 {
178 panic("segkp_badop");
179 /*NOTREACHED*/
180 }
181
182 static void segkpinit_mem_config(struct seg *);
183
184 static uint32_t segkp_indel;
185
186 /*
187 * Allocate the segment specific private data struct and fill it in
188 * with the per kp segment mutex, anon ptr. array and hash table.
189 */
190 int
1386 addr += PAGESIZE;
1387 dump_timeleft = dump_timeout;
1388 }
1389 }
1390 }
1391 }
1392
1393 /*ARGSUSED*/
1394 static int
1395 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1396 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1397 {
1398 return (ENOTSUP);
1399 }
1400
1401 /*ARGSUSED*/
1402 static int
1403 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1404 {
1405 return (ENODEV);
1406 }
1407
1408 /*ARGSUSED*/
1409 static lgrp_mem_policy_info_t *
1410 segkp_getpolicy(struct seg *seg, caddr_t addr)
1411 {
1412 return (NULL);
1413 }
1414
1415 /*ARGSUSED*/
1416 static int
1417 segkp_capable(struct seg *seg, segcapability_t capability)
1418 {
1419 return (0);
1420 }
1421
1422 #include <sys/mem_config.h>
1423
1424 /*ARGSUSED*/
1425 static void
1426 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1427 {}
1428
1429 /*
1430 * During memory delete, turn off caches so that pages are not held.
1431 * A better solution may be to unlock the pages while they are
1432 * in the cache so that they may be collected naturally.
|
78 static void segkp_badop(void);
79 static void segkp_dump(struct seg *seg);
80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 uint_t prot);
82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 struct page ***page, enum lock_type type,
85 enum seg_rw rw);
86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 struct segkp_data **tkpd, struct anon_map *amp);
90 static void segkp_release_internal(struct seg *seg,
91 struct segkp_data *kpd, size_t len);
92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 size_t len, struct segkp_data *kpd, uint_t flags);
94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 size_t len, struct segkp_data *kpd, uint_t flags);
96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98 static int segkp_capable(struct seg *seg, segcapability_t capability);
99
100 /*
101 * Lock used to protect the hash table(s) and caches.
102 */
103 static kmutex_t segkp_lock;
104
105 /*
106 * The segkp caches
107 */
108 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
109
110 #define SEGKP_BADOP(t) (t(*)())segkp_badop
111
112 /*
113 * When there are fewer than red_minavail bytes left on the stack,
114 * segkp_map_red() will map in the redzone (if called). 5000 seems
115 * to work reasonably well...
116 */
117 long red_minavail = 5000;
148 .unmap = SEGKP_BADOP(int),
149 .free = SEGKP_BADOP(void),
150 .fault = segkp_fault,
151 .faulta = SEGKP_BADOP(faultcode_t),
152 .setprot = SEGKP_BADOP(int),
153 .checkprot = segkp_checkprot,
154 .kluster = segkp_kluster,
155 .swapout = SEGKP_BADOP(size_t),
156 .sync = SEGKP_BADOP(int),
157 .incore = SEGKP_BADOP(size_t),
158 .lockop = SEGKP_BADOP(int),
159 .getprot = SEGKP_BADOP(int),
160 .getoffset = SEGKP_BADOP(u_offset_t),
161 .gettype = SEGKP_BADOP(int),
162 .getvp = SEGKP_BADOP(int),
163 .advise = SEGKP_BADOP(int),
164 .dump = segkp_dump,
165 .pagelock = segkp_pagelock,
166 .setpagesize = SEGKP_BADOP(int),
167 .getmemid = segkp_getmemid,
168 .capable = segkp_capable,
169 };
170
171
172 static void
173 segkp_badop(void)
174 {
175 panic("segkp_badop");
176 /*NOTREACHED*/
177 }
178
179 static void segkpinit_mem_config(struct seg *);
180
181 static uint32_t segkp_indel;
182
183 /*
184 * Allocate the segment specific private data struct and fill it in
185 * with the per kp segment mutex, anon ptr. array and hash table.
186 */
187 int
1383 addr += PAGESIZE;
1384 dump_timeleft = dump_timeout;
1385 }
1386 }
1387 }
1388 }
1389
1390 /*ARGSUSED*/
1391 static int
1392 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1393 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1394 {
1395 return (ENOTSUP);
1396 }
1397
1398 /*ARGSUSED*/
1399 static int
1400 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1401 {
1402 return (ENODEV);
1403 }
1404
1405 /*ARGSUSED*/
1406 static int
1407 segkp_capable(struct seg *seg, segcapability_t capability)
1408 {
1409 return (0);
1410 }
1411
1412 #include <sys/mem_config.h>
1413
1414 /*ARGSUSED*/
1415 static void
1416 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1417 {}
1418
1419 /*
1420 * During memory delete, turn off caches so that pages are not held.
1421 * A better solution may be to unlock the pages while they are
1422 * in the cache so that they may be collected naturally.
|