69
70 void
71 mem_node_add_slice(pfn_t start, pfn_t end)
72 {
73 int mnode;
74 mnodeset_t newmask, oldmask;
75
76 /*
77 * DR will pass us the first pfn that is allocatable.
78 * We need to round down to get the real start of
79 * the slice.
80 */
81 if (mem_node_physalign) {
82 start &= ~(btop(mem_node_physalign) - 1);
83 end = roundup(end, btop(mem_node_physalign)) - 1;
84 }
85
86 mnode = PFN_2_MEM_NODE(start);
87 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
88
89 if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
90 /*
91 * Add slice to existing node.
92 */
93 if (start < mem_node_config[mnode].physbase)
94 mem_node_config[mnode].physbase = start;
95 if (end > mem_node_config[mnode].physmax)
96 mem_node_config[mnode].physmax = end;
97 } else {
98 mem_node_config[mnode].physbase = start;
99 mem_node_config[mnode].physmax = end;
100 atomic_add_16(&num_memnodes, 1);
101 do {
102 oldmask = memnodes_mask;
103 newmask = memnodes_mask | (1ull << mnode);
104 } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
105 }
106
107 /*
108 * Inform the common lgrp framework about the new memory
109 */
110 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
111 }
112
113 /*
114 * Remove a PFN range from a memnode. On some platforms,
115 * the memnode will be created with physbase at the first
116 * allocatable PFN, but later deleted with the MC slice
117 * base address converted to a PFN, in which case we need
118 * to assume physbase and up.
119 */
120 void
121 mem_node_del_slice(pfn_t start, pfn_t end)
122 {
123 int mnode;
124 pgcnt_t delta_pgcnt, node_size;
144 if (start <= mem_node_config[mnode].physbase)
145 mem_node_config[mnode].physbase = end + 1;
146 ASSERT(end <= mem_node_config[mnode].physmax);
147 if (end == mem_node_config[mnode].physmax)
148 mem_node_config[mnode].physmax = start - 1;
149 } else {
150 /*
151 * Let the common lgrp framework know this mnode is
152 * leaving
153 */
154 lgrp_config(LGRP_CONFIG_MEM_DEL,
155 mnode, MEM_NODE_2_LGRPHAND(mnode));
156
157 /*
158 * Delete the whole node.
159 */
160 ASSERT(MNODE_PGCNT(mnode) == 0);
161 do {
162 omask = memnodes_mask;
163 nmask = omask & ~(1ull << mnode);
164 } while (cas64(&memnodes_mask, omask, nmask) != omask);
165 atomic_add_16(&num_memnodes, -1);
166 mem_node_config[mnode].exists = 0;
167 }
168 }
169
170 void
171 mem_node_add_range(pfn_t start, pfn_t end)
172 {
173 if (&plat_slice_add)
174 plat_slice_add(start, end);
175 else
176 mem_node_add_slice(start, end);
177 }
178
179 void
180 mem_node_del_range(pfn_t start, pfn_t end)
181 {
182 if (&plat_slice_del)
183 plat_slice_del(start, end);
184 else
212 }
213 mem_node_physalign = 0;
214 mem_node_pfn_shift = 0;
215 }
216 }
217
218 /*
219 * Allocate an unassigned memnode.
220 */
221 int
222 mem_node_alloc()
223 {
224 int mnode;
225 mnodeset_t newmask, oldmask;
226
227 /*
228 * Find an unused memnode. Update it atomically to prevent
229 * a first time memnode creation race.
230 */
231 for (mnode = 0; mnode < max_mem_nodes; mnode++)
232 if (cas32((uint32_t *)&mem_node_config[mnode].exists,
233 0, 1) == 0)
234 break;
235
236 if (mnode >= max_mem_nodes)
237 panic("Out of free memnodes\n");
238
239 mem_node_config[mnode].physbase = (pfn_t)-1l;
240 mem_node_config[mnode].physmax = 0;
241 atomic_add_16(&num_memnodes, 1);
242 do {
243 oldmask = memnodes_mask;
244 newmask = memnodes_mask | (1ull << mnode);
245 } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
246
247 return (mnode);
248 }
249
250 /*
251 * Find the intersection between a memnode and a memlist
252 * and returns the number of pages that overlap.
253 *
254 * Assumes the list is protected from DR operations by
255 * the memlist lock.
256 */
257 pgcnt_t
258 mem_node_memlist_pages(int mnode, struct memlist *mlist)
259 {
260 pfn_t base, end;
261 pfn_t cur_base, cur_end;
262 pgcnt_t npgs;
263 struct memlist *pmem;
264
265 base = mem_node_config[mnode].physbase;
|
69
70 void
71 mem_node_add_slice(pfn_t start, pfn_t end)
72 {
73 int mnode;
74 mnodeset_t newmask, oldmask;
75
76 /*
77 * DR will pass us the first pfn that is allocatable.
78 * We need to round down to get the real start of
79 * the slice.
80 */
81 if (mem_node_physalign) {
82 start &= ~(btop(mem_node_physalign) - 1);
83 end = roundup(end, btop(mem_node_physalign)) - 1;
84 }
85
86 mnode = PFN_2_MEM_NODE(start);
87 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
88
89 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
90 /*
91 * Add slice to existing node.
92 */
93 if (start < mem_node_config[mnode].physbase)
94 mem_node_config[mnode].physbase = start;
95 if (end > mem_node_config[mnode].physmax)
96 mem_node_config[mnode].physmax = end;
97 } else {
98 mem_node_config[mnode].physbase = start;
99 mem_node_config[mnode].physmax = end;
100 atomic_add_16(&num_memnodes, 1);
101 do {
102 oldmask = memnodes_mask;
103 newmask = memnodes_mask | (1ull << mnode);
104 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
105 oldmask);
106 }
107
108 /*
109 * Inform the common lgrp framework about the new memory
110 */
111 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
112 }
113
114 /*
115 * Remove a PFN range from a memnode. On some platforms,
116 * the memnode will be created with physbase at the first
117 * allocatable PFN, but later deleted with the MC slice
118 * base address converted to a PFN, in which case we need
119 * to assume physbase and up.
120 */
121 void
122 mem_node_del_slice(pfn_t start, pfn_t end)
123 {
124 int mnode;
125 pgcnt_t delta_pgcnt, node_size;
145 if (start <= mem_node_config[mnode].physbase)
146 mem_node_config[mnode].physbase = end + 1;
147 ASSERT(end <= mem_node_config[mnode].physmax);
148 if (end == mem_node_config[mnode].physmax)
149 mem_node_config[mnode].physmax = start - 1;
150 } else {
151 /*
152 * Let the common lgrp framework know this mnode is
153 * leaving
154 */
155 lgrp_config(LGRP_CONFIG_MEM_DEL,
156 mnode, MEM_NODE_2_LGRPHAND(mnode));
157
158 /*
159 * Delete the whole node.
160 */
161 ASSERT(MNODE_PGCNT(mnode) == 0);
162 do {
163 omask = memnodes_mask;
164 nmask = omask & ~(1ull << mnode);
165 } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
166 atomic_add_16(&num_memnodes, -1);
167 mem_node_config[mnode].exists = 0;
168 }
169 }
170
171 void
172 mem_node_add_range(pfn_t start, pfn_t end)
173 {
174 if (&plat_slice_add)
175 plat_slice_add(start, end);
176 else
177 mem_node_add_slice(start, end);
178 }
179
180 void
181 mem_node_del_range(pfn_t start, pfn_t end)
182 {
183 if (&plat_slice_del)
184 plat_slice_del(start, end);
185 else
213 }
214 mem_node_physalign = 0;
215 mem_node_pfn_shift = 0;
216 }
217 }
218
219 /*
220 * Allocate an unassigned memnode.
221 */
222 int
223 mem_node_alloc()
224 {
225 int mnode;
226 mnodeset_t newmask, oldmask;
227
228 /*
229 * Find an unused memnode. Update it atomically to prevent
230 * a first time memnode creation race.
231 */
232 for (mnode = 0; mnode < max_mem_nodes; mnode++)
233 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
234 0, 1) == 0)
235 break;
236
237 if (mnode >= max_mem_nodes)
238 panic("Out of free memnodes\n");
239
240 mem_node_config[mnode].physbase = (pfn_t)-1l;
241 mem_node_config[mnode].physmax = 0;
242 atomic_add_16(&num_memnodes, 1);
243 do {
244 oldmask = memnodes_mask;
245 newmask = memnodes_mask | (1ull << mnode);
246 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
247
248 return (mnode);
249 }
250
251 /*
252 * Find the intersection between a memnode and a memlist
253 * and returns the number of pages that overlap.
254 *
255 * Assumes the list is protected from DR operations by
256 * the memlist lock.
257 */
258 pgcnt_t
259 mem_node_memlist_pages(int mnode, struct memlist *mlist)
260 {
261 pfn_t base, end;
262 pfn_t cur_base, cur_end;
263 pgcnt_t npgs;
264 struct memlist *pmem;
265
266 base = mem_node_config[mnode].physbase;
|