Print this page
5255 uts shouldn't open-code ISP2
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/os/ddi_impl.c
+++ new/usr/src/uts/i86pc/os/ddi_impl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2012 Garrett D'Amore <garrett@damore.org>
25 25 * Copyright 2014 Pluribus Networks, Inc.
26 26 */
27 27
28 28 /*
29 29 * PC specific DDI implementation
30 30 */
31 31 #include <sys/types.h>
32 32 #include <sys/autoconf.h>
33 33 #include <sys/avintr.h>
34 34 #include <sys/bootconf.h>
35 35 #include <sys/conf.h>
36 36 #include <sys/cpuvar.h>
37 37 #include <sys/ddi_impldefs.h>
38 38 #include <sys/ddi_subrdefs.h>
39 39 #include <sys/ethernet.h>
40 40 #include <sys/fp.h>
41 41 #include <sys/instance.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/machsystm.h>
44 44 #include <sys/modctl.h>
45 45 #include <sys/promif.h>
46 46 #include <sys/prom_plat.h>
47 47 #include <sys/sunndi.h>
48 48 #include <sys/ndi_impldefs.h>
49 49 #include <sys/ddi_impldefs.h>
50 50 #include <sys/sysmacros.h>
51 51 #include <sys/systeminfo.h>
52 52 #include <sys/utsname.h>
53 53 #include <sys/atomic.h>
54 54 #include <sys/spl.h>
55 55 #include <sys/archsystm.h>
56 56 #include <vm/seg_kmem.h>
57 57 #include <sys/ontrap.h>
58 58 #include <sys/fm/protocol.h>
59 59 #include <sys/ramdisk.h>
60 60 #include <sys/sunndi.h>
61 61 #include <sys/vmem.h>
62 62 #include <sys/pci_impl.h>
63 63 #if defined(__xpv)
64 64 #include <sys/hypervisor.h>
65 65 #endif
66 66 #include <sys/mach_intr.h>
67 67 #include <vm/hat_i86.h>
68 68 #include <sys/x86_archext.h>
69 69 #include <sys/avl.h>
70 70
71 71 /*
72 72 * DDI Boot Configuration
73 73 */
74 74
75 75 /*
76 76 * Platform drivers on this platform
77 77 */
78 78 char *platform_module_list[] = {
79 79 "acpippm",
80 80 "ppm",
81 81 (char *)0
82 82 };
83 83
84 84 /* pci bus resource maps */
85 85 struct pci_bus_resource *pci_bus_res;
86 86
87 87 size_t dma_max_copybuf_size = 0x101000; /* 1M + 4K */
88 88
89 89 uint64_t ramdisk_start, ramdisk_end;
90 90
91 91 int pseudo_isa = 0;
92 92
93 93 /*
94 94 * Forward declarations
95 95 */
96 96 static int getlongprop_buf();
97 97 static void get_boot_properties(void);
98 98 static void impl_bus_initialprobe(void);
99 99 static void impl_bus_reprobe(void);
100 100
101 101 static int poke_mem(peekpoke_ctlops_t *in_args);
102 102 static int peek_mem(peekpoke_ctlops_t *in_args);
103 103
104 104 static int kmem_override_cache_attrs(caddr_t, size_t, uint_t);
105 105
106 106 #if defined(__amd64) && !defined(__xpv)
107 107 extern void immu_init(void);
108 108 #endif
109 109
110 110 /*
111 111 * We use an AVL tree to store contiguous address allocations made with the
112 112 * kalloca() routine, so that we can return the size to free with kfreea().
113 113 * Note that in the future it would be vastly faster if we could eliminate
114 114 * this lookup by insisting that all callers keep track of their own sizes,
115 115 * just as for kmem_alloc().
116 116 */
117 117 struct ctgas {
118 118 avl_node_t ctg_link;
119 119 void *ctg_addr;
120 120 size_t ctg_size;
121 121 };
122 122
123 123 static avl_tree_t ctgtree;
124 124
125 125 static kmutex_t ctgmutex;
126 126 #define CTGLOCK() mutex_enter(&ctgmutex)
127 127 #define CTGUNLOCK() mutex_exit(&ctgmutex)
128 128
129 129 /*
130 130 * Minimum pfn value of page_t's put on the free list. This is to simplify
131 131 * support of ddi dma memory requests which specify small, non-zero addr_lo
132 132 * values.
133 133 *
134 134 * The default value of 2, which corresponds to the only known non-zero addr_lo
135 135 * value used, means a single page will be sacrificed (pfn typically starts
136 136 * at 1). ddiphysmin can be set to 0 to disable. It cannot be set above 0x100
137 137 * otherwise mp startup panics.
138 138 */
139 139 pfn_t ddiphysmin = 2;
140 140
141 141 static void
142 142 check_driver_disable(void)
143 143 {
144 144 int proplen = 128;
145 145 char *prop_name;
146 146 char *drv_name, *propval;
147 147 major_t major;
148 148
149 149 prop_name = kmem_alloc(proplen, KM_SLEEP);
150 150 for (major = 0; major < devcnt; major++) {
151 151 drv_name = ddi_major_to_name(major);
152 152 if (drv_name == NULL)
153 153 continue;
154 154 (void) snprintf(prop_name, proplen, "disable-%s", drv_name);
155 155 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
156 156 DDI_PROP_DONTPASS, prop_name, &propval) == DDI_SUCCESS) {
157 157 if (strcmp(propval, "true") == 0) {
158 158 devnamesp[major].dn_flags |= DN_DRIVER_REMOVED;
159 159 cmn_err(CE_NOTE, "driver %s disabled",
160 160 drv_name);
161 161 }
162 162 ddi_prop_free(propval);
163 163 }
164 164 }
165 165 kmem_free(prop_name, proplen);
166 166 }
167 167
168 168
169 169 /*
170 170 * Configure the hardware on the system.
171 171 * Called before the rootfs is mounted
172 172 */
173 173 void
174 174 configure(void)
175 175 {
176 176 extern void i_ddi_init_root();
177 177
178 178 #if defined(__i386)
179 179 extern int fpu_pentium_fdivbug;
180 180 #endif /* __i386 */
181 181 extern int fpu_ignored;
182 182
183 183 /*
184 184 * Determine if an FPU is attached
185 185 */
186 186
187 187 fpu_probe();
188 188
189 189 #if defined(__i386)
190 190 if (fpu_pentium_fdivbug) {
191 191 printf("\
192 192 FP hardware exhibits Pentium floating point divide problem\n");
193 193 }
194 194 #endif /* __i386 */
195 195
196 196 if (fpu_ignored) {
197 197 printf("FP hardware will not be used\n");
198 198 } else if (!fpu_exists) {
199 199 printf("No FPU in configuration\n");
200 200 }
201 201
202 202 /*
203 203 * Initialize devices on the machine.
204 204 * Uses configuration tree built by the PROMs to determine what
205 205 * is present, and builds a tree of prototype dev_info nodes
206 206 * corresponding to the hardware which identified itself.
207 207 */
208 208
209 209 /*
210 210 * Initialize root node.
211 211 */
212 212 i_ddi_init_root();
213 213
214 214 /* reprogram devices not set up by firmware (BIOS) */
215 215 impl_bus_reprobe();
216 216
217 217 #if defined(__amd64) && !defined(__xpv)
218 218 /*
219 219 * Setup but don't startup the IOMMU
220 220 * Startup happens later via a direct call
221 221 * to IOMMU code by boot code.
222 222 * At this point, all PCI bus renumbering
223 223 * is done, so safe to init the IMMU
224 224 * AKA Intel IOMMU.
225 225 */
226 226 immu_init();
227 227 #endif
228 228
229 229 /*
230 230 * attach the isa nexus to get ACPI resource usage
231 231 * isa is "kind of" a pseudo node
232 232 */
233 233 #if defined(__xpv)
234 234 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
235 235 if (pseudo_isa)
236 236 (void) i_ddi_attach_pseudo_node("isa");
237 237 else
238 238 (void) i_ddi_attach_hw_nodes("isa");
239 239 }
240 240 #else
241 241 if (pseudo_isa)
242 242 (void) i_ddi_attach_pseudo_node("isa");
243 243 else
244 244 (void) i_ddi_attach_hw_nodes("isa");
245 245 #endif
246 246 }
247 247
248 248 /*
249 249 * The "status" property indicates the operational status of a device.
250 250 * If this property is present, the value is a string indicating the
251 251 * status of the device as follows:
252 252 *
253 253 * "okay" operational.
254 254 * "disabled" not operational, but might become operational.
255 255 * "fail" not operational because a fault has been detected,
256 256 * and it is unlikely that the device will become
257 257 * operational without repair. no additional details
258 258 * are available.
259 259 * "fail-xxx" not operational because a fault has been detected,
260 260 * and it is unlikely that the device will become
261 261 * operational without repair. "xxx" is additional
262 262 * human-readable information about the particular
263 263 * fault condition that was detected.
264 264 *
265 265 * The absence of this property means that the operational status is
266 266 * unknown or okay.
267 267 *
268 268 * This routine checks the status property of the specified device node
269 269 * and returns 0 if the operational status indicates failure, and 1 otherwise.
270 270 *
271 271 * The property may exist on plug-in cards the existed before IEEE 1275-1994.
272 272 * And, in that case, the property may not even be a string. So we carefully
273 273 * check for the value "fail", in the beginning of the string, noting
274 274 * the property length.
275 275 */
276 276 int
277 277 status_okay(int id, char *buf, int buflen)
278 278 {
279 279 char status_buf[OBP_MAXPROPNAME];
280 280 char *bufp = buf;
281 281 int len = buflen;
282 282 int proplen;
283 283 static const char *status = "status";
284 284 static const char *fail = "fail";
285 285 int fail_len = (int)strlen(fail);
286 286
287 287 /*
288 288 * Get the proplen ... if it's smaller than "fail",
289 289 * or doesn't exist ... then we don't care, since
290 290 * the value can't begin with the char string "fail".
291 291 *
292 292 * NB: proplen, if it's a string, includes the NULL in the
293 293 * the size of the property, and fail_len does not.
294 294 */
295 295 proplen = prom_getproplen((pnode_t)id, (caddr_t)status);
296 296 if (proplen <= fail_len) /* nonexistant or uninteresting len */
297 297 return (1);
298 298
299 299 /*
300 300 * if a buffer was provided, use it
301 301 */
302 302 if ((buf == (char *)NULL) || (buflen <= 0)) {
303 303 bufp = status_buf;
304 304 len = sizeof (status_buf);
305 305 }
306 306 *bufp = (char)0;
307 307
308 308 /*
309 309 * Get the property into the buffer, to the extent of the buffer,
310 310 * and in case the buffer is smaller than the property size,
311 311 * NULL terminate the buffer. (This handles the case where
312 312 * a buffer was passed in and the caller wants to print the
313 313 * value, but the buffer was too small).
314 314 */
315 315 (void) prom_bounded_getprop((pnode_t)id, (caddr_t)status,
316 316 (caddr_t)bufp, len);
317 317 *(bufp + len - 1) = (char)0;
318 318
319 319 /*
320 320 * If the value begins with the char string "fail",
321 321 * then it means the node is failed. We don't care
322 322 * about any other values. We assume the node is ok
323 323 * although it might be 'disabled'.
324 324 */
325 325 if (strncmp(bufp, fail, fail_len) == 0)
326 326 return (0);
327 327
328 328 return (1);
329 329 }
330 330
331 331 /*
332 332 * Check the status of the device node passed as an argument.
333 333 *
334 334 * if ((status is OKAY) || (status is DISABLED))
335 335 * return DDI_SUCCESS
336 336 * else
337 337 * print a warning and return DDI_FAILURE
338 338 */
339 339 /*ARGSUSED1*/
340 340 int
341 341 check_status(int id, char *name, dev_info_t *parent)
342 342 {
343 343 char status_buf[64];
344 344 char devtype_buf[OBP_MAXPROPNAME];
345 345 int retval = DDI_FAILURE;
346 346
347 347 /*
348 348 * is the status okay?
349 349 */
350 350 if (status_okay(id, status_buf, sizeof (status_buf)))
351 351 return (DDI_SUCCESS);
352 352
353 353 /*
354 354 * a status property indicating bad memory will be associated
355 355 * with a node which has a "device_type" property with a value of
356 356 * "memory-controller". in this situation, return DDI_SUCCESS
357 357 */
358 358 if (getlongprop_buf(id, OBP_DEVICETYPE, devtype_buf,
359 359 sizeof (devtype_buf)) > 0) {
360 360 if (strcmp(devtype_buf, "memory-controller") == 0)
361 361 retval = DDI_SUCCESS;
362 362 }
363 363
364 364 /*
365 365 * print the status property information
366 366 */
367 367 cmn_err(CE_WARN, "status '%s' for '%s'", status_buf, name);
368 368 return (retval);
369 369 }
370 370
371 371 /*ARGSUSED*/
372 372 uint_t
373 373 softlevel1(caddr_t arg1, caddr_t arg2)
374 374 {
375 375 softint();
376 376 return (1);
377 377 }
378 378
379 379 /*
380 380 * Allow for implementation specific correction of PROM property values.
381 381 */
382 382
383 383 /*ARGSUSED*/
384 384 void
385 385 impl_fix_props(dev_info_t *dip, dev_info_t *ch_dip, char *name, int len,
386 386 caddr_t buffer)
387 387 {
388 388 /*
389 389 * There are no adjustments needed in this implementation.
390 390 */
391 391 }
392 392
393 393 static int
394 394 getlongprop_buf(int id, char *name, char *buf, int maxlen)
395 395 {
396 396 int size;
397 397
398 398 size = prom_getproplen((pnode_t)id, name);
399 399 if (size <= 0 || (size > maxlen - 1))
400 400 return (-1);
401 401
402 402 if (-1 == prom_getprop((pnode_t)id, name, buf))
403 403 return (-1);
404 404
405 405 if (strcmp("name", name) == 0) {
406 406 if (buf[size - 1] != '\0') {
407 407 buf[size] = '\0';
408 408 size += 1;
409 409 }
410 410 }
411 411
412 412 return (size);
413 413 }
414 414
415 415 static int
416 416 get_prop_int_array(dev_info_t *di, char *pname, int **pval, uint_t *plen)
417 417 {
418 418 int ret;
419 419
420 420 if ((ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, di,
421 421 DDI_PROP_DONTPASS, pname, pval, plen))
422 422 == DDI_PROP_SUCCESS) {
423 423 *plen = (*plen) * (sizeof (int));
424 424 }
425 425 return (ret);
426 426 }
427 427
428 428
429 429 /*
430 430 * Node Configuration
431 431 */
432 432
433 433 struct prop_ispec {
434 434 uint_t pri, vec;
435 435 };
436 436
437 437 /*
438 438 * For the x86, we're prepared to claim that the interrupt string
439 439 * is in the form of a list of <ipl,vec> specifications.
440 440 */
441 441
442 442 #define VEC_MIN 1
443 443 #define VEC_MAX 255
444 444
445 445 static int
446 446 impl_xlate_intrs(dev_info_t *child, int *in,
447 447 struct ddi_parent_private_data *pdptr)
448 448 {
449 449 size_t size;
450 450 int n;
451 451 struct intrspec *new;
452 452 caddr_t got_prop;
453 453 int *inpri;
454 454 int got_len;
455 455 extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */
456 456
457 457 static char bad_intr_fmt[] =
458 458 "bad interrupt spec from %s%d - ipl %d, irq %d\n";
459 459
460 460 /*
461 461 * determine if the driver is expecting the new style "interrupts"
462 462 * property which just contains the IRQ, or the old style which
463 463 * contains pairs of <IPL,IRQ>. if it is the new style, we always
464 464 * assign IPL 5 unless an "interrupt-priorities" property exists.
465 465 * in that case, the "interrupt-priorities" property contains the
466 466 * IPL values that match, one for one, the IRQ values in the
467 467 * "interrupts" property.
468 468 */
469 469 inpri = NULL;
470 470 if ((ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
471 471 "ignore-hardware-nodes", -1) != -1) || ignore_hardware_nodes) {
472 472 /* the old style "interrupts" property... */
473 473
474 474 /*
475 475 * The list consists of <ipl,vec> elements
476 476 */
477 477 if ((n = (*in++ >> 1)) < 1)
478 478 return (DDI_FAILURE);
479 479
480 480 pdptr->par_nintr = n;
481 481 size = n * sizeof (struct intrspec);
482 482 new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP);
483 483
484 484 while (n--) {
485 485 int level = *in++;
486 486 int vec = *in++;
487 487
488 488 if (level < 1 || level > MAXIPL ||
489 489 vec < VEC_MIN || vec > VEC_MAX) {
490 490 cmn_err(CE_CONT, bad_intr_fmt,
491 491 DEVI(child)->devi_name,
492 492 DEVI(child)->devi_instance, level, vec);
493 493 goto broken;
494 494 }
495 495 new->intrspec_pri = level;
496 496 if (vec != 2)
497 497 new->intrspec_vec = vec;
498 498 else
499 499 /*
500 500 * irq 2 on the PC bus is tied to irq 9
501 501 * on ISA, EISA and MicroChannel
502 502 */
503 503 new->intrspec_vec = 9;
504 504 new++;
505 505 }
506 506
507 507 return (DDI_SUCCESS);
508 508 } else {
509 509 /* the new style "interrupts" property... */
510 510
511 511 /*
512 512 * The list consists of <vec> elements
513 513 */
514 514 if ((n = (*in++)) < 1)
515 515 return (DDI_FAILURE);
516 516
517 517 pdptr->par_nintr = n;
518 518 size = n * sizeof (struct intrspec);
519 519 new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP);
520 520
521 521 /* XXX check for "interrupt-priorities" property... */
522 522 if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
523 523 "interrupt-priorities", (caddr_t)&got_prop, &got_len)
524 524 == DDI_PROP_SUCCESS) {
525 525 if (n != (got_len / sizeof (int))) {
526 526 cmn_err(CE_CONT,
527 527 "bad interrupt-priorities length"
528 528 " from %s%d: expected %d, got %d\n",
529 529 DEVI(child)->devi_name,
530 530 DEVI(child)->devi_instance, n,
531 531 (int)(got_len / sizeof (int)));
532 532 goto broken;
533 533 }
534 534 inpri = (int *)got_prop;
535 535 }
536 536
537 537 while (n--) {
538 538 int level;
539 539 int vec = *in++;
540 540
541 541 if (inpri == NULL)
542 542 level = 5;
543 543 else
544 544 level = *inpri++;
545 545
546 546 if (level < 1 || level > MAXIPL ||
547 547 vec < VEC_MIN || vec > VEC_MAX) {
548 548 cmn_err(CE_CONT, bad_intr_fmt,
549 549 DEVI(child)->devi_name,
550 550 DEVI(child)->devi_instance, level, vec);
551 551 goto broken;
552 552 }
553 553 new->intrspec_pri = level;
554 554 if (vec != 2)
555 555 new->intrspec_vec = vec;
556 556 else
557 557 /*
558 558 * irq 2 on the PC bus is tied to irq 9
559 559 * on ISA, EISA and MicroChannel
560 560 */
561 561 new->intrspec_vec = 9;
562 562 new++;
563 563 }
564 564
565 565 if (inpri != NULL)
566 566 kmem_free(got_prop, got_len);
567 567 return (DDI_SUCCESS);
568 568 }
569 569
570 570 broken:
571 571 kmem_free(pdptr->par_intr, size);
572 572 pdptr->par_intr = NULL;
573 573 pdptr->par_nintr = 0;
574 574 if (inpri != NULL)
575 575 kmem_free(got_prop, got_len);
576 576
577 577 return (DDI_FAILURE);
578 578 }
579 579
580 580 /*
581 581 * Create a ddi_parent_private_data structure from the ddi properties of
582 582 * the dev_info node.
583 583 *
584 584 * The "reg" and either an "intr" or "interrupts" properties are required
585 585 * if the driver wishes to create mappings or field interrupts on behalf
586 586 * of the device.
587 587 *
588 588 * The "reg" property is assumed to be a list of at least one triple
589 589 *
590 590 * <bustype, address, size>*1
591 591 *
592 592 * The "intr" property is assumed to be a list of at least one duple
593 593 *
594 594 * <SPARC ipl, vector#>*1
595 595 *
596 596 * The "interrupts" property is assumed to be a list of at least one
597 597 * n-tuples that describes the interrupt capabilities of the bus the device
598 598 * is connected to. For SBus, this looks like
599 599 *
600 600 * <SBus-level>*1
601 601 *
602 602 * (This property obsoletes the 'intr' property).
603 603 *
604 604 * The "ranges" property is optional.
605 605 */
606 606 void
607 607 make_ddi_ppd(dev_info_t *child, struct ddi_parent_private_data **ppd)
608 608 {
609 609 struct ddi_parent_private_data *pdptr;
610 610 int n;
611 611 int *reg_prop, *rng_prop, *intr_prop, *irupts_prop;
612 612 uint_t reg_len, rng_len, intr_len, irupts_len;
613 613
614 614 *ppd = pdptr = kmem_zalloc(sizeof (*pdptr), KM_SLEEP);
615 615
616 616 /*
617 617 * Handle the 'reg' property.
618 618 */
619 619 if ((get_prop_int_array(child, "reg", ®_prop, ®_len) ==
620 620 DDI_PROP_SUCCESS) && (reg_len != 0)) {
621 621 pdptr->par_nreg = reg_len / (int)sizeof (struct regspec);
622 622 pdptr->par_reg = (struct regspec *)reg_prop;
623 623 }
624 624
625 625 /*
626 626 * See if I have a range (adding one where needed - this
627 627 * means to add one for sbus node in sun4c, when romvec > 0,
628 628 * if no range is already defined in the PROM node.
629 629 * (Currently no sun4c PROMS define range properties,
630 630 * but they should and may in the future.) For the SBus
631 631 * node, the range is defined by the SBus reg property.
632 632 */
633 633 if (get_prop_int_array(child, "ranges", &rng_prop, &rng_len)
634 634 == DDI_PROP_SUCCESS) {
635 635 pdptr->par_nrng = rng_len / (int)(sizeof (struct rangespec));
636 636 pdptr->par_rng = (struct rangespec *)rng_prop;
637 637 }
638 638
639 639 /*
640 640 * Handle the 'intr' and 'interrupts' properties
641 641 */
642 642
643 643 /*
644 644 * For backwards compatibility
645 645 * we first look for the 'intr' property for the device.
646 646 */
647 647 if (get_prop_int_array(child, "intr", &intr_prop, &intr_len)
648 648 != DDI_PROP_SUCCESS) {
649 649 intr_len = 0;
650 650 }
651 651
652 652 /*
653 653 * If we're to support bus adapters and future platforms cleanly,
654 654 * we need to support the generalized 'interrupts' property.
655 655 */
656 656 if (get_prop_int_array(child, "interrupts", &irupts_prop,
657 657 &irupts_len) != DDI_PROP_SUCCESS) {
658 658 irupts_len = 0;
659 659 } else if (intr_len != 0) {
660 660 /*
661 661 * If both 'intr' and 'interrupts' are defined,
662 662 * then 'interrupts' wins and we toss the 'intr' away.
663 663 */
664 664 ddi_prop_free((void *)intr_prop);
665 665 intr_len = 0;
666 666 }
667 667
668 668 if (intr_len != 0) {
669 669
670 670 /*
671 671 * Translate the 'intr' property into an array
672 672 * an array of struct intrspec's. There's not really
673 673 * very much to do here except copy what's out there.
674 674 */
675 675
676 676 struct intrspec *new;
677 677 struct prop_ispec *l;
678 678
679 679 n = pdptr->par_nintr = intr_len / sizeof (struct prop_ispec);
680 680 l = (struct prop_ispec *)intr_prop;
681 681 pdptr->par_intr =
682 682 new = kmem_zalloc(n * sizeof (struct intrspec), KM_SLEEP);
683 683 while (n--) {
684 684 new->intrspec_pri = l->pri;
685 685 new->intrspec_vec = l->vec;
686 686 new++;
687 687 l++;
688 688 }
689 689 ddi_prop_free((void *)intr_prop);
690 690
691 691 } else if ((n = irupts_len) != 0) {
692 692 size_t size;
693 693 int *out;
694 694
695 695 /*
696 696 * Translate the 'interrupts' property into an array
697 697 * of intrspecs for the rest of the DDI framework to
698 698 * toy with. Only our ancestors really know how to
699 699 * do this, so ask 'em. We massage the 'interrupts'
700 700 * property so that it is pre-pended by a count of
701 701 * the number of integers in the argument.
702 702 */
703 703 size = sizeof (int) + n;
704 704 out = kmem_alloc(size, KM_SLEEP);
705 705 *out = n / sizeof (int);
706 706 bcopy(irupts_prop, out + 1, (size_t)n);
707 707 ddi_prop_free((void *)irupts_prop);
708 708 if (impl_xlate_intrs(child, out, pdptr) != DDI_SUCCESS) {
709 709 cmn_err(CE_CONT,
710 710 "Unable to translate 'interrupts' for %s%d\n",
711 711 DEVI(child)->devi_binding_name,
712 712 DEVI(child)->devi_instance);
713 713 }
714 714 kmem_free(out, size);
715 715 }
716 716 }
717 717
718 718 /*
719 719 * Name a child
720 720 */
721 721 static int
722 722 impl_sunbus_name_child(dev_info_t *child, char *name, int namelen)
723 723 {
724 724 /*
725 725 * Fill in parent-private data and this function returns to us
726 726 * an indication if it used "registers" to fill in the data.
727 727 */
728 728 if (ddi_get_parent_data(child) == NULL) {
729 729 struct ddi_parent_private_data *pdptr;
730 730 make_ddi_ppd(child, &pdptr);
731 731 ddi_set_parent_data(child, pdptr);
732 732 }
733 733
734 734 name[0] = '\0';
735 735 if (sparc_pd_getnreg(child) > 0) {
736 736 (void) snprintf(name, namelen, "%x,%x",
737 737 (uint_t)sparc_pd_getreg(child, 0)->regspec_bustype,
738 738 (uint_t)sparc_pd_getreg(child, 0)->regspec_addr);
739 739 }
740 740
741 741 return (DDI_SUCCESS);
742 742 }
743 743
744 744 /*
745 745 * Called from the bus_ctl op of sunbus (sbus, obio, etc) nexus drivers
746 746 * to implement the DDI_CTLOPS_INITCHILD operation. That is, it names
747 747 * the children of sun busses based on the reg spec.
748 748 *
749 749 * Handles the following properties (in make_ddi_ppd):
750 750 * Property value
751 751 * Name type
752 752 * reg register spec
753 753 * intr old-form interrupt spec
754 754 * interrupts new (bus-oriented) interrupt spec
755 755 * ranges range spec
756 756 */
757 757 int
758 758 impl_ddi_sunbus_initchild(dev_info_t *child)
759 759 {
760 760 char name[MAXNAMELEN];
761 761 void impl_ddi_sunbus_removechild(dev_info_t *);
762 762
763 763 /*
764 764 * Name the child, also makes parent private data
765 765 */
766 766 (void) impl_sunbus_name_child(child, name, MAXNAMELEN);
767 767 ddi_set_name_addr(child, name);
768 768
769 769 /*
770 770 * Attempt to merge a .conf node; if successful, remove the
771 771 * .conf node.
772 772 */
773 773 if ((ndi_dev_is_persistent_node(child) == 0) &&
774 774 (ndi_merge_node(child, impl_sunbus_name_child) == DDI_SUCCESS)) {
775 775 /*
776 776 * Return failure to remove node
777 777 */
778 778 impl_ddi_sunbus_removechild(child);
779 779 return (DDI_FAILURE);
780 780 }
781 781 return (DDI_SUCCESS);
782 782 }
783 783
784 784 void
785 785 impl_free_ddi_ppd(dev_info_t *dip)
786 786 {
787 787 struct ddi_parent_private_data *pdptr;
788 788 size_t n;
789 789
790 790 if ((pdptr = ddi_get_parent_data(dip)) == NULL)
791 791 return;
792 792
793 793 if ((n = (size_t)pdptr->par_nintr) != 0)
794 794 /*
795 795 * Note that kmem_free is used here (instead of
796 796 * ddi_prop_free) because the contents of the
797 797 * property were placed into a separate buffer and
798 798 * mucked with a bit before being stored in par_intr.
799 799 * The actual return value from the prop lookup
800 800 * was freed with ddi_prop_free previously.
801 801 */
802 802 kmem_free(pdptr->par_intr, n * sizeof (struct intrspec));
803 803
804 804 if ((n = (size_t)pdptr->par_nrng) != 0)
805 805 ddi_prop_free((void *)pdptr->par_rng);
806 806
807 807 if ((n = pdptr->par_nreg) != 0)
808 808 ddi_prop_free((void *)pdptr->par_reg);
809 809
810 810 kmem_free(pdptr, sizeof (*pdptr));
811 811 ddi_set_parent_data(dip, NULL);
812 812 }
813 813
814 814 void
815 815 impl_ddi_sunbus_removechild(dev_info_t *dip)
816 816 {
817 817 impl_free_ddi_ppd(dip);
818 818 ddi_set_name_addr(dip, NULL);
819 819 /*
820 820 * Strip the node to properly convert it back to prototype form
821 821 */
822 822 impl_rem_dev_props(dip);
823 823 }
824 824
825 825 /*
826 826 * DDI Interrupt
827 827 */
828 828
829 829 /*
830 830 * turn this on to force isa, eisa, and mca device to ignore the new
831 831 * hardware nodes in the device tree (normally turned on only for
832 832 * drivers that need it by setting the property "ignore-hardware-nodes"
833 833 * in their driver.conf file).
834 834 *
835 835 * 7/31/96 -- Turned off globally. Leaving variable in for the moment
836 836 * as safety valve.
837 837 */
838 838 int ignore_hardware_nodes = 0;
839 839
840 840 /*
841 841 * Local data
842 842 */
843 843 static struct impl_bus_promops *impl_busp;
844 844
845 845
846 846 /*
847 847 * New DDI interrupt framework
848 848 */
849 849
850 850 /*
851 851 * i_ddi_intr_ops:
852 852 *
853 853 * This is the interrupt operator function wrapper for the bus function
854 854 * bus_intr_op.
855 855 */
856 856 int
857 857 i_ddi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t op,
858 858 ddi_intr_handle_impl_t *hdlp, void * result)
859 859 {
860 860 dev_info_t *pdip = (dev_info_t *)DEVI(dip)->devi_parent;
861 861 int ret = DDI_FAILURE;
862 862
863 863 /* request parent to process this interrupt op */
864 864 if (NEXUS_HAS_INTR_OP(pdip))
865 865 ret = (*(DEVI(pdip)->devi_ops->devo_bus_ops->bus_intr_op))(
866 866 pdip, rdip, op, hdlp, result);
867 867 else
868 868 cmn_err(CE_WARN, "Failed to process interrupt "
869 869 "for %s%d due to down-rev nexus driver %s%d",
870 870 ddi_get_name(rdip), ddi_get_instance(rdip),
871 871 ddi_get_name(pdip), ddi_get_instance(pdip));
872 872 return (ret);
873 873 }
874 874
875 875 /*
876 876 * i_ddi_add_softint - allocate and add a soft interrupt to the system
877 877 */
878 878 int
879 879 i_ddi_add_softint(ddi_softint_hdl_impl_t *hdlp)
880 880 {
881 881 int ret;
882 882
883 883 /* add soft interrupt handler */
884 884 ret = add_avsoftintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func,
885 885 DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
886 886 return (ret ? DDI_SUCCESS : DDI_FAILURE);
887 887 }
888 888
889 889
890 890 void
891 891 i_ddi_remove_softint(ddi_softint_hdl_impl_t *hdlp)
892 892 {
893 893 (void) rem_avsoftintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func);
894 894 }
895 895
896 896
897 897 extern void (*setsoftint)(int, struct av_softinfo *);
898 898 extern boolean_t av_check_softint_pending(struct av_softinfo *, boolean_t);
899 899
900 900 int
901 901 i_ddi_trigger_softint(ddi_softint_hdl_impl_t *hdlp, void *arg2)
902 902 {
903 903 if (av_check_softint_pending(hdlp->ih_pending, B_FALSE))
904 904 return (DDI_EPENDING);
905 905
906 906 update_avsoftintr_args((void *)hdlp, hdlp->ih_pri, arg2);
907 907
908 908 (*setsoftint)(hdlp->ih_pri, hdlp->ih_pending);
909 909 return (DDI_SUCCESS);
910 910 }
911 911
912 912 /*
913 913 * i_ddi_set_softint_pri:
914 914 *
915 915 * The way this works is that it first tries to add a softint vector
916 916 * at the new priority in hdlp. If that succeeds; then it removes the
917 917 * existing softint vector at the old priority.
918 918 */
919 919 int
920 920 i_ddi_set_softint_pri(ddi_softint_hdl_impl_t *hdlp, uint_t old_pri)
921 921 {
922 922 int ret;
923 923
924 924 /*
925 925 * If a softint is pending at the old priority then fail the request.
926 926 */
927 927 if (av_check_softint_pending(hdlp->ih_pending, B_TRUE))
928 928 return (DDI_FAILURE);
929 929
930 930 ret = av_softint_movepri((void *)hdlp, old_pri);
931 931 return (ret ? DDI_SUCCESS : DDI_FAILURE);
932 932 }
933 933
934 934 void
935 935 i_ddi_alloc_intr_phdl(ddi_intr_handle_impl_t *hdlp)
936 936 {
937 937 hdlp->ih_private = (void *)kmem_zalloc(sizeof (ihdl_plat_t), KM_SLEEP);
938 938 }
939 939
940 940 void
941 941 i_ddi_free_intr_phdl(ddi_intr_handle_impl_t *hdlp)
942 942 {
943 943 kmem_free(hdlp->ih_private, sizeof (ihdl_plat_t));
944 944 hdlp->ih_private = NULL;
945 945 }
946 946
947 947 int
948 948 i_ddi_get_intx_nintrs(dev_info_t *dip)
949 949 {
950 950 struct ddi_parent_private_data *pdp;
951 951
952 952 if ((pdp = ddi_get_parent_data(dip)) == NULL)
953 953 return (0);
954 954
955 955 return (pdp->par_nintr);
956 956 }
957 957
958 958 /*
959 959 * DDI Memory/DMA
960 960 */
961 961
962 962 /*
963 963 * Support for allocating DMAable memory to implement
964 964 * ddi_dma_mem_alloc(9F) interface.
965 965 */
966 966
967 967 #define KA_ALIGN_SHIFT 7
968 968 #define KA_ALIGN (1 << KA_ALIGN_SHIFT)
969 969 #define KA_NCACHE (PAGESHIFT + 1 - KA_ALIGN_SHIFT)
970 970
971 971 /*
972 972 * Dummy DMA attribute template for kmem_io[].kmem_io_attr. We only
973 973 * care about addr_lo, addr_hi, and align. addr_hi will be dynamically set.
974 974 */
975 975
976 976 static ddi_dma_attr_t kmem_io_attr = {
977 977 DMA_ATTR_V0,
978 978 0x0000000000000000ULL, /* dma_attr_addr_lo */
979 979 0x0000000000000000ULL, /* dma_attr_addr_hi */
980 980 0x00ffffff,
981 981 0x1000, /* dma_attr_align */
982 982 1, 1, 0xffffffffULL, 0xffffffffULL, 0x1, 1, 0
983 983 };
984 984
985 985 /* kmem io memory ranges and indices */
986 986 enum {
987 987 IO_4P, IO_64G, IO_4G, IO_2G, IO_1G, IO_512M,
988 988 IO_256M, IO_128M, IO_64M, IO_32M, IO_16M, MAX_MEM_RANGES
989 989 };
990 990
991 991 static struct {
992 992 vmem_t *kmem_io_arena;
993 993 kmem_cache_t *kmem_io_cache[KA_NCACHE];
994 994 ddi_dma_attr_t kmem_io_attr;
995 995 } kmem_io[MAX_MEM_RANGES];
996 996
997 997 static int kmem_io_idx; /* index of first populated kmem_io[] */
998 998
999 999 static page_t *
1000 1000 page_create_io_wrapper(void *addr, size_t len, int vmflag, void *arg)
1001 1001 {
1002 1002 extern page_t *page_create_io(vnode_t *, u_offset_t, uint_t,
1003 1003 uint_t, struct as *, caddr_t, ddi_dma_attr_t *);
1004 1004
1005 1005 return (page_create_io(&kvp, (u_offset_t)(uintptr_t)addr, len,
1006 1006 PG_EXCL | ((vmflag & VM_NOSLEEP) ? 0 : PG_WAIT), &kas, addr, arg));
1007 1007 }
1008 1008
1009 1009 #ifdef __xpv
1010 1010 static void
1011 1011 segkmem_free_io(vmem_t *vmp, void * ptr, size_t size)
1012 1012 {
1013 1013 extern void page_destroy_io(page_t *);
1014 1014 segkmem_xfree(vmp, ptr, size, page_destroy_io);
1015 1015 }
1016 1016 #endif
1017 1017
1018 1018 static void *
1019 1019 segkmem_alloc_io_4P(vmem_t *vmp, size_t size, int vmflag)
1020 1020 {
1021 1021 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1022 1022 page_create_io_wrapper, &kmem_io[IO_4P].kmem_io_attr));
1023 1023 }
1024 1024
1025 1025 static void *
1026 1026 segkmem_alloc_io_64G(vmem_t *vmp, size_t size, int vmflag)
1027 1027 {
1028 1028 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1029 1029 page_create_io_wrapper, &kmem_io[IO_64G].kmem_io_attr));
1030 1030 }
1031 1031
1032 1032 static void *
1033 1033 segkmem_alloc_io_4G(vmem_t *vmp, size_t size, int vmflag)
1034 1034 {
1035 1035 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1036 1036 page_create_io_wrapper, &kmem_io[IO_4G].kmem_io_attr));
1037 1037 }
1038 1038
1039 1039 static void *
1040 1040 segkmem_alloc_io_2G(vmem_t *vmp, size_t size, int vmflag)
1041 1041 {
1042 1042 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1043 1043 page_create_io_wrapper, &kmem_io[IO_2G].kmem_io_attr));
1044 1044 }
1045 1045
1046 1046 static void *
1047 1047 segkmem_alloc_io_1G(vmem_t *vmp, size_t size, int vmflag)
1048 1048 {
1049 1049 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1050 1050 page_create_io_wrapper, &kmem_io[IO_1G].kmem_io_attr));
1051 1051 }
1052 1052
1053 1053 static void *
1054 1054 segkmem_alloc_io_512M(vmem_t *vmp, size_t size, int vmflag)
1055 1055 {
1056 1056 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1057 1057 page_create_io_wrapper, &kmem_io[IO_512M].kmem_io_attr));
1058 1058 }
1059 1059
1060 1060 static void *
1061 1061 segkmem_alloc_io_256M(vmem_t *vmp, size_t size, int vmflag)
1062 1062 {
1063 1063 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1064 1064 page_create_io_wrapper, &kmem_io[IO_256M].kmem_io_attr));
1065 1065 }
1066 1066
1067 1067 static void *
1068 1068 segkmem_alloc_io_128M(vmem_t *vmp, size_t size, int vmflag)
1069 1069 {
1070 1070 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1071 1071 page_create_io_wrapper, &kmem_io[IO_128M].kmem_io_attr));
1072 1072 }
1073 1073
1074 1074 static void *
1075 1075 segkmem_alloc_io_64M(vmem_t *vmp, size_t size, int vmflag)
1076 1076 {
1077 1077 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1078 1078 page_create_io_wrapper, &kmem_io[IO_64M].kmem_io_attr));
1079 1079 }
1080 1080
1081 1081 static void *
1082 1082 segkmem_alloc_io_32M(vmem_t *vmp, size_t size, int vmflag)
1083 1083 {
1084 1084 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1085 1085 page_create_io_wrapper, &kmem_io[IO_32M].kmem_io_attr));
1086 1086 }
1087 1087
1088 1088 static void *
1089 1089 segkmem_alloc_io_16M(vmem_t *vmp, size_t size, int vmflag)
1090 1090 {
1091 1091 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
1092 1092 page_create_io_wrapper, &kmem_io[IO_16M].kmem_io_attr));
1093 1093 }
1094 1094
1095 1095 struct {
1096 1096 uint64_t io_limit;
1097 1097 char *io_name;
1098 1098 void *(*io_alloc)(vmem_t *, size_t, int);
1099 1099 int io_initial; /* kmem_io_init during startup */
1100 1100 } io_arena_params[MAX_MEM_RANGES] = {
1101 1101 {0x000fffffffffffffULL, "kmem_io_4P", segkmem_alloc_io_4P, 1},
1102 1102 {0x0000000fffffffffULL, "kmem_io_64G", segkmem_alloc_io_64G, 0},
1103 1103 {0x00000000ffffffffULL, "kmem_io_4G", segkmem_alloc_io_4G, 1},
1104 1104 {0x000000007fffffffULL, "kmem_io_2G", segkmem_alloc_io_2G, 1},
1105 1105 {0x000000003fffffffULL, "kmem_io_1G", segkmem_alloc_io_1G, 0},
1106 1106 {0x000000001fffffffULL, "kmem_io_512M", segkmem_alloc_io_512M, 0},
1107 1107 {0x000000000fffffffULL, "kmem_io_256M", segkmem_alloc_io_256M, 0},
1108 1108 {0x0000000007ffffffULL, "kmem_io_128M", segkmem_alloc_io_128M, 0},
1109 1109 {0x0000000003ffffffULL, "kmem_io_64M", segkmem_alloc_io_64M, 0},
1110 1110 {0x0000000001ffffffULL, "kmem_io_32M", segkmem_alloc_io_32M, 0},
1111 1111 {0x0000000000ffffffULL, "kmem_io_16M", segkmem_alloc_io_16M, 1}
1112 1112 };
1113 1113
1114 1114 void
1115 1115 kmem_io_init(int a)
1116 1116 {
1117 1117 int c;
1118 1118 char name[40];
1119 1119
1120 1120 kmem_io[a].kmem_io_arena = vmem_create(io_arena_params[a].io_name,
1121 1121 NULL, 0, PAGESIZE, io_arena_params[a].io_alloc,
1122 1122 #ifdef __xpv
1123 1123 segkmem_free_io,
1124 1124 #else
1125 1125 segkmem_free,
1126 1126 #endif
1127 1127 heap_arena, 0, VM_SLEEP);
1128 1128
1129 1129 for (c = 0; c < KA_NCACHE; c++) {
1130 1130 size_t size = KA_ALIGN << c;
1131 1131 (void) sprintf(name, "%s_%lu",
1132 1132 io_arena_params[a].io_name, size);
1133 1133 kmem_io[a].kmem_io_cache[c] = kmem_cache_create(name,
1134 1134 size, size, NULL, NULL, NULL, NULL,
1135 1135 kmem_io[a].kmem_io_arena, 0);
1136 1136 }
1137 1137 }
1138 1138
1139 1139 /*
1140 1140 * Return the index of the highest memory range for addr.
1141 1141 */
1142 1142 static int
1143 1143 kmem_io_index(uint64_t addr)
1144 1144 {
1145 1145 int n;
1146 1146
1147 1147 for (n = kmem_io_idx; n < MAX_MEM_RANGES; n++) {
1148 1148 if (kmem_io[n].kmem_io_attr.dma_attr_addr_hi <= addr) {
1149 1149 if (kmem_io[n].kmem_io_arena == NULL)
1150 1150 kmem_io_init(n);
1151 1151 return (n);
1152 1152 }
1153 1153 }
1154 1154 panic("kmem_io_index: invalid addr - must be at least 16m");
1155 1155
1156 1156 /*NOTREACHED*/
1157 1157 }
1158 1158
1159 1159 /*
1160 1160 * Return the index of the next kmem_io populated memory range
1161 1161 * after curindex.
1162 1162 */
1163 1163 static int
1164 1164 kmem_io_index_next(int curindex)
1165 1165 {
1166 1166 int n;
1167 1167
1168 1168 for (n = curindex + 1; n < MAX_MEM_RANGES; n++) {
1169 1169 if (kmem_io[n].kmem_io_arena)
1170 1170 return (n);
1171 1171 }
1172 1172 return (-1);
1173 1173 }
1174 1174
1175 1175 /*
1176 1176 * allow kmem to be mapped in with different PTE cache attribute settings.
1177 1177 * Used by i_ddi_mem_alloc()
1178 1178 */
1179 1179 int
1180 1180 kmem_override_cache_attrs(caddr_t kva, size_t size, uint_t order)
1181 1181 {
1182 1182 uint_t hat_flags;
1183 1183 caddr_t kva_end;
1184 1184 uint_t hat_attr;
1185 1185 pfn_t pfn;
1186 1186
1187 1187 if (hat_getattr(kas.a_hat, kva, &hat_attr) == -1) {
1188 1188 return (-1);
1189 1189 }
1190 1190
1191 1191 hat_attr &= ~HAT_ORDER_MASK;
1192 1192 hat_attr |= order | HAT_NOSYNC;
1193 1193 hat_flags = HAT_LOAD_LOCK;
1194 1194
1195 1195 kva_end = (caddr_t)(((uintptr_t)kva + size + PAGEOFFSET) &
1196 1196 (uintptr_t)PAGEMASK);
1197 1197 kva = (caddr_t)((uintptr_t)kva & (uintptr_t)PAGEMASK);
1198 1198
1199 1199 while (kva < kva_end) {
1200 1200 pfn = hat_getpfnum(kas.a_hat, kva);
1201 1201 hat_unload(kas.a_hat, kva, PAGESIZE, HAT_UNLOAD_UNLOCK);
1202 1202 hat_devload(kas.a_hat, kva, PAGESIZE, pfn, hat_attr, hat_flags);
1203 1203 kva += MMU_PAGESIZE;
1204 1204 }
1205 1205
1206 1206 return (0);
1207 1207 }
1208 1208
1209 1209 static int
1210 1210 ctgcompare(const void *a1, const void *a2)
1211 1211 {
1212 1212 /* we just want to compare virtual addresses */
1213 1213 a1 = ((struct ctgas *)a1)->ctg_addr;
1214 1214 a2 = ((struct ctgas *)a2)->ctg_addr;
1215 1215 return (a1 == a2 ? 0 : (a1 < a2 ? -1 : 1));
1216 1216 }
1217 1217
1218 1218 void
1219 1219 ka_init(void)
1220 1220 {
1221 1221 int a;
1222 1222 paddr_t maxphysaddr;
1223 1223 #if !defined(__xpv)
1224 1224 extern pfn_t physmax;
1225 1225
1226 1226 maxphysaddr = mmu_ptob((paddr_t)physmax) + MMU_PAGEOFFSET;
1227 1227 #else
1228 1228 maxphysaddr = mmu_ptob((paddr_t)HYPERVISOR_memory_op(
1229 1229 XENMEM_maximum_ram_page, NULL)) + MMU_PAGEOFFSET;
1230 1230 #endif
1231 1231
1232 1232 ASSERT(maxphysaddr <= io_arena_params[0].io_limit);
1233 1233
1234 1234 for (a = 0; a < MAX_MEM_RANGES; a++) {
1235 1235 if (maxphysaddr >= io_arena_params[a + 1].io_limit) {
1236 1236 if (maxphysaddr > io_arena_params[a + 1].io_limit)
1237 1237 io_arena_params[a].io_limit = maxphysaddr;
1238 1238 else
1239 1239 a++;
1240 1240 break;
1241 1241 }
1242 1242 }
1243 1243 kmem_io_idx = a;
1244 1244
1245 1245 for (; a < MAX_MEM_RANGES; a++) {
1246 1246 kmem_io[a].kmem_io_attr = kmem_io_attr;
1247 1247 kmem_io[a].kmem_io_attr.dma_attr_addr_hi =
1248 1248 io_arena_params[a].io_limit;
1249 1249 /*
1250 1250 * initialize kmem_io[] arena/cache corresponding to
1251 1251 * maxphysaddr and to the "common" io memory ranges that
1252 1252 * have io_initial set to a non-zero value.
1253 1253 */
1254 1254 if (io_arena_params[a].io_initial || a == kmem_io_idx)
1255 1255 kmem_io_init(a);
1256 1256 }
1257 1257
1258 1258 /* initialize ctgtree */
1259 1259 avl_create(&ctgtree, ctgcompare, sizeof (struct ctgas),
1260 1260 offsetof(struct ctgas, ctg_link));
1261 1261 }
1262 1262
1263 1263 /*
1264 1264 * put contig address/size
1265 1265 */
1266 1266 static void *
1267 1267 putctgas(void *addr, size_t size)
1268 1268 {
1269 1269 struct ctgas *ctgp;
1270 1270 if ((ctgp = kmem_zalloc(sizeof (*ctgp), KM_NOSLEEP)) != NULL) {
1271 1271 ctgp->ctg_addr = addr;
1272 1272 ctgp->ctg_size = size;
1273 1273 CTGLOCK();
1274 1274 avl_add(&ctgtree, ctgp);
1275 1275 CTGUNLOCK();
1276 1276 }
1277 1277 return (ctgp);
1278 1278 }
1279 1279
1280 1280 /*
1281 1281 * get contig size by addr
1282 1282 */
1283 1283 static size_t
1284 1284 getctgsz(void *addr)
1285 1285 {
1286 1286 struct ctgas *ctgp;
1287 1287 struct ctgas find;
1288 1288 size_t sz = 0;
1289 1289
1290 1290 find.ctg_addr = addr;
1291 1291 CTGLOCK();
1292 1292 if ((ctgp = avl_find(&ctgtree, &find, NULL)) != NULL) {
1293 1293 avl_remove(&ctgtree, ctgp);
1294 1294 }
1295 1295 CTGUNLOCK();
1296 1296
1297 1297 if (ctgp != NULL) {
1298 1298 sz = ctgp->ctg_size;
1299 1299 kmem_free(ctgp, sizeof (*ctgp));
1300 1300 }
1301 1301
1302 1302 return (sz);
1303 1303 }
1304 1304
1305 1305 /*
1306 1306 * contig_alloc:
1307 1307 *
1308 1308 * allocates contiguous memory to satisfy the 'size' and dma attributes
1309 1309 * specified in 'attr'.
1310 1310 *
1311 1311 * Not all of memory need to be physically contiguous if the
1312 1312 * scatter-gather list length is greater than 1.
1313 1313 */
1314 1314
1315 1315 /*ARGSUSED*/
1316 1316 void *
1317 1317 contig_alloc(size_t size, ddi_dma_attr_t *attr, uintptr_t align, int cansleep)
1318 1318 {
1319 1319 pgcnt_t pgcnt = btopr(size);
1320 1320 size_t asize = pgcnt * PAGESIZE;
1321 1321 page_t *ppl;
1322 1322 int pflag;
1323 1323 void *addr;
1324 1324
1325 1325 extern page_t *page_create_io(vnode_t *, u_offset_t, uint_t,
1326 1326 uint_t, struct as *, caddr_t, ddi_dma_attr_t *);
1327 1327
1328 1328 /* segkmem_xalloc */
1329 1329
1330 1330 if (align <= PAGESIZE)
1331 1331 addr = vmem_alloc(heap_arena, asize,
1332 1332 (cansleep) ? VM_SLEEP : VM_NOSLEEP);
1333 1333 else
1334 1334 addr = vmem_xalloc(heap_arena, asize, align, 0, 0, NULL, NULL,
1335 1335 (cansleep) ? VM_SLEEP : VM_NOSLEEP);
1336 1336 if (addr) {
1337 1337 ASSERT(!((uintptr_t)addr & (align - 1)));
1338 1338
1339 1339 if (page_resv(pgcnt, (cansleep) ? KM_SLEEP : KM_NOSLEEP) == 0) {
1340 1340 vmem_free(heap_arena, addr, asize);
1341 1341 return (NULL);
1342 1342 }
1343 1343 pflag = PG_EXCL;
1344 1344
1345 1345 if (cansleep)
1346 1346 pflag |= PG_WAIT;
1347 1347
1348 1348 /* 4k req gets from freelists rather than pfn search */
1349 1349 if (pgcnt > 1 || align > PAGESIZE)
1350 1350 pflag |= PG_PHYSCONTIG;
1351 1351
1352 1352 ppl = page_create_io(&kvp, (u_offset_t)(uintptr_t)addr,
1353 1353 asize, pflag, &kas, (caddr_t)addr, attr);
1354 1354
1355 1355 if (!ppl) {
1356 1356 vmem_free(heap_arena, addr, asize);
1357 1357 page_unresv(pgcnt);
1358 1358 return (NULL);
1359 1359 }
1360 1360
1361 1361 while (ppl != NULL) {
1362 1362 page_t *pp = ppl;
1363 1363 page_sub(&ppl, pp);
1364 1364 ASSERT(page_iolock_assert(pp));
1365 1365 page_io_unlock(pp);
1366 1366 page_downgrade(pp);
1367 1367 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset,
1368 1368 pp, (PROT_ALL & ~PROT_USER) |
1369 1369 HAT_NOSYNC, HAT_LOAD_LOCK);
1370 1370 }
1371 1371 }
1372 1372 return (addr);
1373 1373 }
1374 1374
1375 1375 void
1376 1376 contig_free(void *addr, size_t size)
1377 1377 {
1378 1378 pgcnt_t pgcnt = btopr(size);
1379 1379 size_t asize = pgcnt * PAGESIZE;
1380 1380 caddr_t a, ea;
1381 1381 page_t *pp;
1382 1382
1383 1383 hat_unload(kas.a_hat, addr, asize, HAT_UNLOAD_UNLOCK);
1384 1384
1385 1385 for (a = addr, ea = a + asize; a < ea; a += PAGESIZE) {
1386 1386 pp = page_find(&kvp, (u_offset_t)(uintptr_t)a);
1387 1387 if (!pp)
1388 1388 panic("contig_free: contig pp not found");
1389 1389
1390 1390 if (!page_tryupgrade(pp)) {
1391 1391 page_unlock(pp);
1392 1392 pp = page_lookup(&kvp,
1393 1393 (u_offset_t)(uintptr_t)a, SE_EXCL);
1394 1394 if (pp == NULL)
1395 1395 panic("contig_free: page freed");
1396 1396 }
1397 1397 page_destroy(pp, 0);
1398 1398 }
1399 1399
1400 1400 page_unresv(pgcnt);
1401 1401 vmem_free(heap_arena, addr, asize);
1402 1402 }
1403 1403
1404 1404 /*
1405 1405 * Allocate from the system, aligned on a specific boundary.
1406 1406 * The alignment, if non-zero, must be a power of 2.
1407 1407 */
1408 1408 static void *
1409 1409 kalloca(size_t size, size_t align, int cansleep, int physcontig,
1410 1410 ddi_dma_attr_t *attr)
1411 1411 {
1412 1412 size_t *addr, *raddr, rsize;
1413 1413 size_t hdrsize = 4 * sizeof (size_t); /* must be power of 2 */
1414 1414 int a, i, c;
1415 1415 vmem_t *vmp;
1416 1416 kmem_cache_t *cp = NULL;
1417 1417
1418 1418 if (attr->dma_attr_addr_lo > mmu_ptob((uint64_t)ddiphysmin))
1419 1419 return (NULL);
1420 1420
1421 1421 align = MAX(align, hdrsize);
1422 1422 ASSERT((align & (align - 1)) == 0);
1423 1423
1424 1424 /*
1425 1425 * All of our allocators guarantee 16-byte alignment, so we don't
1426 1426 * need to reserve additional space for the header.
1427 1427 * To simplify picking the correct kmem_io_cache, we round up to
1428 1428 * a multiple of KA_ALIGN.
1429 1429 */
1430 1430 rsize = P2ROUNDUP_TYPED(size + align, KA_ALIGN, size_t);
1431 1431
1432 1432 if (physcontig && rsize > PAGESIZE) {
1433 1433 if (addr = contig_alloc(size, attr, align, cansleep)) {
1434 1434 if (!putctgas(addr, size))
1435 1435 contig_free(addr, size);
1436 1436 else
1437 1437 return (addr);
1438 1438 }
1439 1439 return (NULL);
1440 1440 }
1441 1441
1442 1442 a = kmem_io_index(attr->dma_attr_addr_hi);
1443 1443
1444 1444 if (rsize > PAGESIZE) {
1445 1445 vmp = kmem_io[a].kmem_io_arena;
1446 1446 raddr = vmem_alloc(vmp, rsize,
1447 1447 (cansleep) ? VM_SLEEP : VM_NOSLEEP);
1448 1448 } else {
1449 1449 c = highbit((rsize >> KA_ALIGN_SHIFT) - 1);
1450 1450 cp = kmem_io[a].kmem_io_cache[c];
1451 1451 raddr = kmem_cache_alloc(cp, (cansleep) ? KM_SLEEP :
1452 1452 KM_NOSLEEP);
1453 1453 }
1454 1454
1455 1455 if (raddr == NULL) {
1456 1456 int na;
1457 1457
1458 1458 ASSERT(cansleep == 0);
1459 1459 if (rsize > PAGESIZE)
1460 1460 return (NULL);
1461 1461 /*
1462 1462 * System does not have memory in the requested range.
1463 1463 * Try smaller kmem io ranges and larger cache sizes
1464 1464 * to see if there might be memory available in
1465 1465 * these other caches.
1466 1466 */
1467 1467
1468 1468 for (na = kmem_io_index_next(a); na >= 0;
1469 1469 na = kmem_io_index_next(na)) {
1470 1470 ASSERT(kmem_io[na].kmem_io_arena);
1471 1471 cp = kmem_io[na].kmem_io_cache[c];
1472 1472 raddr = kmem_cache_alloc(cp, KM_NOSLEEP);
1473 1473 if (raddr)
1474 1474 goto kallocdone;
1475 1475 }
1476 1476 /* now try the larger kmem io cache sizes */
1477 1477 for (na = a; na >= 0; na = kmem_io_index_next(na)) {
1478 1478 for (i = c + 1; i < KA_NCACHE; i++) {
1479 1479 cp = kmem_io[na].kmem_io_cache[i];
1480 1480 raddr = kmem_cache_alloc(cp, KM_NOSLEEP);
1481 1481 if (raddr)
1482 1482 goto kallocdone;
1483 1483 }
1484 1484 }
1485 1485 return (NULL);
1486 1486 }
1487 1487
1488 1488 kallocdone:
1489 1489 ASSERT(!P2BOUNDARY((uintptr_t)raddr, rsize, PAGESIZE) ||
1490 1490 rsize > PAGESIZE);
1491 1491
1492 1492 addr = (size_t *)P2ROUNDUP((uintptr_t)raddr + hdrsize, align);
1493 1493 ASSERT((uintptr_t)addr + size - (uintptr_t)raddr <= rsize);
1494 1494
1495 1495 addr[-4] = (size_t)cp;
1496 1496 addr[-3] = (size_t)vmp;
1497 1497 addr[-2] = (size_t)raddr;
1498 1498 addr[-1] = rsize;
1499 1499
1500 1500 return (addr);
1501 1501 }
1502 1502
1503 1503 static void
1504 1504 kfreea(void *addr)
1505 1505 {
1506 1506 size_t size;
1507 1507
1508 1508 if (!((uintptr_t)addr & PAGEOFFSET) && (size = getctgsz(addr))) {
1509 1509 contig_free(addr, size);
1510 1510 } else {
1511 1511 size_t *saddr = addr;
1512 1512 if (saddr[-4] == 0)
1513 1513 vmem_free((vmem_t *)saddr[-3], (void *)saddr[-2],
1514 1514 saddr[-1]);
1515 1515 else
1516 1516 kmem_cache_free((kmem_cache_t *)saddr[-4],
1517 1517 (void *)saddr[-2]);
1518 1518 }
1519 1519 }
1520 1520
1521 1521 /*ARGSUSED*/
1522 1522 void
1523 1523 i_ddi_devacc_to_hatacc(ddi_device_acc_attr_t *devaccp, uint_t *hataccp)
1524 1524 {
1525 1525 }
1526 1526
1527 1527 /*
1528 1528 * Check if the specified cache attribute is supported on the platform.
↓ open down ↓ |
1528 lines elided |
↑ open up ↑ |
1529 1529 * This function must be called before i_ddi_cacheattr_to_hatacc().
1530 1530 */
1531 1531 boolean_t
1532 1532 i_ddi_check_cache_attr(uint_t flags)
1533 1533 {
1534 1534 /*
1535 1535 * The cache attributes are mutually exclusive. Any combination of
1536 1536 * the attributes leads to a failure.
1537 1537 */
1538 1538 uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1539 - if ((cache_attr != 0) && ((cache_attr & (cache_attr - 1)) != 0))
1539 + if ((cache_attr != 0) && !ISP2(cache_attr))
1540 1540 return (B_FALSE);
1541 1541
1542 1542 /* All cache attributes are supported on X86/X64 */
1543 1543 if (cache_attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_CACHED |
1544 1544 IOMEM_DATA_UC_WR_COMBINE))
1545 1545 return (B_TRUE);
1546 1546
1547 1547 /* undefined attributes */
1548 1548 return (B_FALSE);
1549 1549 }
1550 1550
1551 1551 /* set HAT cache attributes from the cache attributes */
1552 1552 void
1553 1553 i_ddi_cacheattr_to_hatacc(uint_t flags, uint_t *hataccp)
1554 1554 {
1555 1555 uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1556 1556 static char *fname = "i_ddi_cacheattr_to_hatacc";
1557 1557
1558 1558 /*
1559 1559 * If write-combining is not supported, then it falls back
1560 1560 * to uncacheable.
1561 1561 */
1562 1562 if (cache_attr == IOMEM_DATA_UC_WR_COMBINE &&
1563 1563 !is_x86_feature(x86_featureset, X86FSET_PAT))
1564 1564 cache_attr = IOMEM_DATA_UNCACHED;
1565 1565
1566 1566 /*
1567 1567 * set HAT attrs according to the cache attrs.
1568 1568 */
1569 1569 switch (cache_attr) {
1570 1570 case IOMEM_DATA_UNCACHED:
1571 1571 *hataccp &= ~HAT_ORDER_MASK;
1572 1572 *hataccp |= (HAT_STRICTORDER | HAT_PLAT_NOCACHE);
1573 1573 break;
1574 1574 case IOMEM_DATA_UC_WR_COMBINE:
1575 1575 *hataccp &= ~HAT_ORDER_MASK;
1576 1576 *hataccp |= (HAT_MERGING_OK | HAT_PLAT_NOCACHE);
1577 1577 break;
1578 1578 case IOMEM_DATA_CACHED:
1579 1579 *hataccp &= ~HAT_ORDER_MASK;
1580 1580 *hataccp |= HAT_UNORDERED_OK;
1581 1581 break;
1582 1582 /*
1583 1583 * This case must not occur because the cache attribute is scrutinized
1584 1584 * before this function is called.
1585 1585 */
1586 1586 default:
1587 1587 /*
1588 1588 * set cacheable to hat attrs.
1589 1589 */
1590 1590 *hataccp &= ~HAT_ORDER_MASK;
1591 1591 *hataccp |= HAT_UNORDERED_OK;
1592 1592 cmn_err(CE_WARN, "%s: cache_attr=0x%x is ignored.",
1593 1593 fname, cache_attr);
1594 1594 }
1595 1595 }
1596 1596
1597 1597 /*
1598 1598 * This should actually be called i_ddi_dma_mem_alloc. There should
1599 1599 * also be an i_ddi_pio_mem_alloc. i_ddi_dma_mem_alloc should call
1600 1600 * through the device tree with the DDI_CTLOPS_DMA_ALIGN ctl ops to
1601 1601 * get alignment requirements for DMA memory. i_ddi_pio_mem_alloc
1602 1602 * should use DDI_CTLOPS_PIO_ALIGN. Since we only have i_ddi_mem_alloc
1603 1603 * so far which is used for both, DMA and PIO, we have to use the DMA
1604 1604 * ctl ops to make everybody happy.
1605 1605 */
1606 1606 /*ARGSUSED*/
1607 1607 int
1608 1608 i_ddi_mem_alloc(dev_info_t *dip, ddi_dma_attr_t *attr,
1609 1609 size_t length, int cansleep, int flags,
1610 1610 ddi_device_acc_attr_t *accattrp, caddr_t *kaddrp,
1611 1611 size_t *real_length, ddi_acc_hdl_t *ap)
1612 1612 {
1613 1613 caddr_t a;
1614 1614 int iomin;
1615 1615 ddi_acc_impl_t *iap;
1616 1616 int physcontig = 0;
1617 1617 pgcnt_t npages;
1618 1618 pgcnt_t minctg;
1619 1619 uint_t order;
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
1620 1620 int e;
1621 1621
1622 1622 /*
1623 1623 * Check legality of arguments
1624 1624 */
1625 1625 if (length == 0 || kaddrp == NULL || attr == NULL) {
1626 1626 return (DDI_FAILURE);
1627 1627 }
1628 1628
1629 1629 if (attr->dma_attr_minxfer == 0 || attr->dma_attr_align == 0 ||
1630 - (attr->dma_attr_align & (attr->dma_attr_align - 1)) ||
1631 - (attr->dma_attr_minxfer & (attr->dma_attr_minxfer - 1))) {
1632 - return (DDI_FAILURE);
1630 + !ISP2(attr->dma_attr_align) || !ISP2(attr->dma_attr_minxfer)) {
1631 + return (DDI_FAILURE);
1633 1632 }
1634 1633
1635 1634 /*
1636 1635 * figure out most restrictive alignment requirement
1637 1636 */
1638 1637 iomin = attr->dma_attr_minxfer;
1639 1638 iomin = maxbit(iomin, attr->dma_attr_align);
1640 1639 if (iomin == 0)
1641 1640 return (DDI_FAILURE);
1642 1641
1643 1642 ASSERT((iomin & (iomin - 1)) == 0);
1644 1643
1645 1644 /*
1646 1645 * if we allocate memory with IOMEM_DATA_UNCACHED or
1647 1646 * IOMEM_DATA_UC_WR_COMBINE, make sure we allocate a page aligned
1648 1647 * memory that ends on a page boundry.
1649 1648 * Don't want to have to different cache mappings to the same
1650 1649 * physical page.
1651 1650 */
1652 1651 if (OVERRIDE_CACHE_ATTR(flags)) {
1653 1652 iomin = (iomin + MMU_PAGEOFFSET) & MMU_PAGEMASK;
1654 1653 length = (length + MMU_PAGEOFFSET) & (size_t)MMU_PAGEMASK;
1655 1654 }
1656 1655
1657 1656 /*
1658 1657 * Determine if we need to satisfy the request for physically
1659 1658 * contiguous memory or alignments larger than pagesize.
1660 1659 */
1661 1660 npages = btopr(length + attr->dma_attr_align);
1662 1661 minctg = howmany(npages, attr->dma_attr_sgllen);
1663 1662
1664 1663 if (minctg > 1) {
1665 1664 uint64_t pfnseg = attr->dma_attr_seg >> PAGESHIFT;
1666 1665 /*
1667 1666 * verify that the minimum contig requirement for the
1668 1667 * actual length does not cross segment boundary.
1669 1668 */
1670 1669 length = P2ROUNDUP_TYPED(length, attr->dma_attr_minxfer,
1671 1670 size_t);
1672 1671 npages = btopr(length);
1673 1672 minctg = howmany(npages, attr->dma_attr_sgllen);
1674 1673 if (minctg > pfnseg + 1)
1675 1674 return (DDI_FAILURE);
1676 1675 physcontig = 1;
1677 1676 } else {
1678 1677 length = P2ROUNDUP_TYPED(length, iomin, size_t);
1679 1678 }
1680 1679
1681 1680 /*
1682 1681 * Allocate the requested amount from the system.
1683 1682 */
1684 1683 a = kalloca(length, iomin, cansleep, physcontig, attr);
1685 1684
1686 1685 if ((*kaddrp = a) == NULL)
1687 1686 return (DDI_FAILURE);
1688 1687
1689 1688 /*
1690 1689 * if we to modify the cache attributes, go back and muck with the
1691 1690 * mappings.
1692 1691 */
1693 1692 if (OVERRIDE_CACHE_ATTR(flags)) {
1694 1693 order = 0;
1695 1694 i_ddi_cacheattr_to_hatacc(flags, &order);
1696 1695 e = kmem_override_cache_attrs(a, length, order);
1697 1696 if (e != 0) {
1698 1697 kfreea(a);
1699 1698 return (DDI_FAILURE);
1700 1699 }
1701 1700 }
1702 1701
1703 1702 if (real_length) {
1704 1703 *real_length = length;
1705 1704 }
1706 1705 if (ap) {
1707 1706 /*
1708 1707 * initialize access handle
1709 1708 */
1710 1709 iap = (ddi_acc_impl_t *)ap->ah_platform_private;
1711 1710 iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1712 1711 impl_acc_hdl_init(ap);
1713 1712 }
1714 1713
1715 1714 return (DDI_SUCCESS);
1716 1715 }
1717 1716
1718 1717 /* ARGSUSED */
1719 1718 void
1720 1719 i_ddi_mem_free(caddr_t kaddr, ddi_acc_hdl_t *ap)
1721 1720 {
1722 1721 if (ap != NULL) {
1723 1722 /*
1724 1723 * if we modified the cache attributes on alloc, go back and
1725 1724 * fix them since this memory could be returned to the
1726 1725 * general pool.
1727 1726 */
1728 1727 if (OVERRIDE_CACHE_ATTR(ap->ah_xfermodes)) {
1729 1728 uint_t order = 0;
1730 1729 int e;
1731 1730 i_ddi_cacheattr_to_hatacc(IOMEM_DATA_CACHED, &order);
1732 1731 e = kmem_override_cache_attrs(kaddr, ap->ah_len, order);
1733 1732 if (e != 0) {
1734 1733 cmn_err(CE_WARN, "i_ddi_mem_free() failed to "
1735 1734 "override cache attrs, memory leaked\n");
1736 1735 return;
1737 1736 }
1738 1737 }
1739 1738 }
1740 1739 kfreea(kaddr);
1741 1740 }
1742 1741
1743 1742 /*
1744 1743 * Access Barriers
1745 1744 *
1746 1745 */
1747 1746 /*ARGSUSED*/
1748 1747 int
1749 1748 i_ddi_ontrap(ddi_acc_handle_t hp)
1750 1749 {
1751 1750 return (DDI_FAILURE);
1752 1751 }
1753 1752
1754 1753 /*ARGSUSED*/
1755 1754 void
1756 1755 i_ddi_notrap(ddi_acc_handle_t hp)
1757 1756 {
1758 1757 }
1759 1758
1760 1759
1761 1760 /*
1762 1761 * Misc Functions
1763 1762 */
1764 1763
1765 1764 /*
1766 1765 * Implementation instance override functions
1767 1766 *
1768 1767 * No override on i86pc
1769 1768 */
1770 1769 /*ARGSUSED*/
1771 1770 uint_t
1772 1771 impl_assign_instance(dev_info_t *dip)
1773 1772 {
1774 1773 return ((uint_t)-1);
1775 1774 }
1776 1775
1777 1776 /*ARGSUSED*/
1778 1777 int
1779 1778 impl_keep_instance(dev_info_t *dip)
1780 1779 {
1781 1780
1782 1781 #if defined(__xpv)
1783 1782 /*
1784 1783 * Do not persist instance numbers assigned to devices in dom0
1785 1784 */
1786 1785 dev_info_t *pdip;
1787 1786 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1788 1787 if (((pdip = ddi_get_parent(dip)) != NULL) &&
1789 1788 (strcmp(ddi_get_name(pdip), "xpvd") == 0))
1790 1789 return (DDI_SUCCESS);
1791 1790 }
1792 1791 #endif
1793 1792 return (DDI_FAILURE);
1794 1793 }
1795 1794
1796 1795 /*ARGSUSED*/
1797 1796 int
1798 1797 impl_free_instance(dev_info_t *dip)
1799 1798 {
1800 1799 return (DDI_FAILURE);
1801 1800 }
1802 1801
1803 1802 /*ARGSUSED*/
1804 1803 int
1805 1804 impl_check_cpu(dev_info_t *devi)
1806 1805 {
1807 1806 return (DDI_SUCCESS);
1808 1807 }
1809 1808
1810 1809 /*
1811 1810 * Referenced in common/cpr_driver.c: Power off machine.
1812 1811 * Don't know how to power off i86pc.
1813 1812 */
1814 1813 void
1815 1814 arch_power_down()
1816 1815 {}
1817 1816
1818 1817 /*
1819 1818 * Copy name to property_name, since name
1820 1819 * is in the low address range below kernelbase.
1821 1820 */
1822 1821 static void
1823 1822 copy_boot_str(const char *boot_str, char *kern_str, int len)
1824 1823 {
1825 1824 int i = 0;
1826 1825
1827 1826 while (i < len - 1 && boot_str[i] != '\0') {
1828 1827 kern_str[i] = boot_str[i];
1829 1828 i++;
1830 1829 }
1831 1830
1832 1831 kern_str[i] = 0; /* null terminate */
1833 1832 if (boot_str[i] != '\0')
1834 1833 cmn_err(CE_WARN,
1835 1834 "boot property string is truncated to %s", kern_str);
1836 1835 }
1837 1836
1838 1837 static void
1839 1838 get_boot_properties(void)
1840 1839 {
1841 1840 extern char hw_provider[];
1842 1841 dev_info_t *devi;
1843 1842 char *name;
1844 1843 int length;
1845 1844 char property_name[50], property_val[50];
1846 1845 void *bop_staging_area;
1847 1846
1848 1847 bop_staging_area = kmem_zalloc(MMU_PAGESIZE, KM_NOSLEEP);
1849 1848
1850 1849 /*
1851 1850 * Import "root" properties from the boot.
1852 1851 *
1853 1852 * We do this by invoking BOP_NEXTPROP until the list
1854 1853 * is completely copied in.
1855 1854 */
1856 1855
1857 1856 devi = ddi_root_node();
1858 1857 for (name = BOP_NEXTPROP(bootops, ""); /* get first */
1859 1858 name; /* NULL => DONE */
1860 1859 name = BOP_NEXTPROP(bootops, name)) { /* get next */
1861 1860
1862 1861 /* copy string to memory above kernelbase */
1863 1862 copy_boot_str(name, property_name, 50);
1864 1863
1865 1864 /*
1866 1865 * Skip vga properties. They will be picked up later
1867 1866 * by get_vga_properties.
1868 1867 */
1869 1868 if (strcmp(property_name, "display-edif-block") == 0 ||
1870 1869 strcmp(property_name, "display-edif-id") == 0) {
1871 1870 continue;
1872 1871 }
1873 1872
1874 1873 length = BOP_GETPROPLEN(bootops, property_name);
1875 1874 if (length == 0)
1876 1875 continue;
1877 1876 if (length > MMU_PAGESIZE) {
1878 1877 cmn_err(CE_NOTE,
1879 1878 "boot property %s longer than 0x%x, ignored\n",
1880 1879 property_name, MMU_PAGESIZE);
1881 1880 continue;
1882 1881 }
1883 1882 BOP_GETPROP(bootops, property_name, bop_staging_area);
1884 1883
1885 1884 /*
1886 1885 * special properties:
1887 1886 * si-machine, si-hw-provider
1888 1887 * goes to kernel data structures.
1889 1888 * bios-boot-device and stdout
1890 1889 * goes to hardware property list so it may show up
1891 1890 * in the prtconf -vp output. This is needed by
1892 1891 * Install/Upgrade. Once we fix install upgrade,
1893 1892 * this can be taken out.
1894 1893 */
1895 1894 if (strcmp(name, "si-machine") == 0) {
1896 1895 (void) strncpy(utsname.machine, bop_staging_area,
1897 1896 SYS_NMLN);
1898 1897 utsname.machine[SYS_NMLN - 1] = (char)NULL;
1899 1898 } else if (strcmp(name, "si-hw-provider") == 0) {
1900 1899 (void) strncpy(hw_provider, bop_staging_area, SYS_NMLN);
1901 1900 hw_provider[SYS_NMLN - 1] = (char)NULL;
1902 1901 } else if (strcmp(name, "bios-boot-device") == 0) {
1903 1902 copy_boot_str(bop_staging_area, property_val, 50);
1904 1903 (void) ndi_prop_update_string(DDI_DEV_T_NONE, devi,
1905 1904 property_name, property_val);
1906 1905 } else if (strcmp(name, "stdout") == 0) {
1907 1906 (void) ndi_prop_update_int(DDI_DEV_T_NONE, devi,
1908 1907 property_name, *((int *)bop_staging_area));
1909 1908 } else {
1910 1909 /* Property type unknown, use old prop interface */
1911 1910 (void) e_ddi_prop_create(DDI_DEV_T_NONE, devi,
1912 1911 DDI_PROP_CANSLEEP, property_name, bop_staging_area,
1913 1912 length);
1914 1913 }
1915 1914 }
1916 1915
1917 1916 kmem_free(bop_staging_area, MMU_PAGESIZE);
1918 1917 }
1919 1918
1920 1919 static void
1921 1920 get_vga_properties(void)
1922 1921 {
1923 1922 dev_info_t *devi;
1924 1923 major_t major;
1925 1924 char *name;
1926 1925 int length;
1927 1926 char property_val[50];
1928 1927 void *bop_staging_area;
1929 1928
1930 1929 /*
1931 1930 * XXXX Hack Allert!
1932 1931 * There really needs to be a better way for identifying various
1933 1932 * console framebuffers and their related issues. Till then,
1934 1933 * check for this one as a replacement to vgatext.
1935 1934 */
1936 1935 major = ddi_name_to_major("ragexl");
1937 1936 if (major == (major_t)-1) {
1938 1937 major = ddi_name_to_major("vgatext");
1939 1938 if (major == (major_t)-1)
1940 1939 return;
1941 1940 }
1942 1941 devi = devnamesp[major].dn_head;
1943 1942 if (devi == NULL)
1944 1943 return;
1945 1944
1946 1945 bop_staging_area = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
1947 1946
1948 1947 /*
1949 1948 * Import "vga" properties from the boot.
1950 1949 */
1951 1950 name = "display-edif-block";
1952 1951 length = BOP_GETPROPLEN(bootops, name);
1953 1952 if (length > 0 && length < MMU_PAGESIZE) {
1954 1953 BOP_GETPROP(bootops, name, bop_staging_area);
1955 1954 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
1956 1955 devi, name, bop_staging_area, length);
1957 1956 }
1958 1957
1959 1958 /*
1960 1959 * kdmconfig is also looking for display-type and
1961 1960 * video-adapter-type. We default to color and svga.
1962 1961 *
1963 1962 * Could it be "monochrome", "vga"?
1964 1963 * Nah, you've got to come to the 21st century...
1965 1964 * And you can set monitor type manually in kdmconfig
1966 1965 * if you are really an old junky.
1967 1966 */
1968 1967 (void) ndi_prop_update_string(DDI_DEV_T_NONE,
1969 1968 devi, "display-type", "color");
1970 1969 (void) ndi_prop_update_string(DDI_DEV_T_NONE,
1971 1970 devi, "video-adapter-type", "svga");
1972 1971
1973 1972 name = "display-edif-id";
1974 1973 length = BOP_GETPROPLEN(bootops, name);
1975 1974 if (length > 0 && length < MMU_PAGESIZE) {
1976 1975 BOP_GETPROP(bootops, name, bop_staging_area);
1977 1976 copy_boot_str(bop_staging_area, property_val, length);
1978 1977 (void) ndi_prop_update_string(DDI_DEV_T_NONE,
1979 1978 devi, name, property_val);
1980 1979 }
1981 1980
1982 1981 kmem_free(bop_staging_area, MMU_PAGESIZE);
1983 1982 }
1984 1983
1985 1984
1986 1985 /*
1987 1986 * This is temporary, but absolutely necessary. If we are being
1988 1987 * booted with a device tree created by the DevConf project's bootconf
1989 1988 * program, then we have device information nodes that reflect
1990 1989 * reality. At this point in time in the Solaris release schedule, the
1991 1990 * kernel drivers aren't prepared for reality. They still depend on their
1992 1991 * own ad-hoc interpretations of the properties created when their .conf
1993 1992 * files were interpreted. These drivers use an "ignore-hardware-nodes"
1994 1993 * property to prevent them from using the nodes passed up from the bootconf
1995 1994 * device tree.
1996 1995 *
1997 1996 * Trying to assemble root file system drivers as we are booting from
1998 1997 * devconf will fail if the kernel driver is basing its name_addr's on the
1999 1998 * psuedo-node device info while the bootpath passed up from bootconf is using
2000 1999 * reality-based name_addrs. We help the boot along in this case by
2001 2000 * looking at the pre-bootconf bootpath and determining if we would have
2002 2001 * successfully matched if that had been the bootpath we had chosen.
2003 2002 *
2004 2003 * Note that we only even perform this extra check if we've booted
2005 2004 * using bootconf's 1275 compliant bootpath, this is the boot device, and
2006 2005 * we're trying to match the name_addr specified in the 1275 bootpath.
2007 2006 */
2008 2007
2009 2008 #define MAXCOMPONENTLEN 32
2010 2009
2011 2010 int
2012 2011 x86_old_bootpath_name_addr_match(dev_info_t *cdip, char *caddr, char *naddr)
2013 2012 {
2014 2013 /*
2015 2014 * There are multiple criteria to be met before we can even
2016 2015 * consider allowing a name_addr match here.
2017 2016 *
2018 2017 * 1) We must have been booted such that the bootconf program
2019 2018 * created device tree nodes and properties. This can be
2020 2019 * determined by examining the 'bootpath' property. This
2021 2020 * property will be a non-null string iff bootconf was
2022 2021 * involved in the boot.
2023 2022 *
2024 2023 * 2) The module that we want to match must be the boot device.
2025 2024 *
2026 2025 * 3) The instance of the module we are thinking of letting be
2027 2026 * our match must be ignoring hardware nodes.
2028 2027 *
2029 2028 * 4) The name_addr we want to match must be the name_addr
2030 2029 * specified in the 1275 bootpath.
2031 2030 */
2032 2031 static char bootdev_module[MAXCOMPONENTLEN];
2033 2032 static char bootdev_oldmod[MAXCOMPONENTLEN];
2034 2033 static char bootdev_newaddr[MAXCOMPONENTLEN];
2035 2034 static char bootdev_oldaddr[MAXCOMPONENTLEN];
2036 2035 static int quickexit;
2037 2036
2038 2037 char *daddr;
2039 2038 int dlen;
2040 2039
2041 2040 char *lkupname;
2042 2041 int rv = DDI_FAILURE;
2043 2042
2044 2043 if ((ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
2045 2044 "devconf-addr", (caddr_t)&daddr, &dlen) == DDI_PROP_SUCCESS) &&
2046 2045 (ddi_getprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
2047 2046 "ignore-hardware-nodes", -1) != -1)) {
2048 2047 if (strcmp(daddr, caddr) == 0) {
2049 2048 return (DDI_SUCCESS);
2050 2049 }
2051 2050 }
2052 2051
2053 2052 if (quickexit)
2054 2053 return (rv);
2055 2054
2056 2055 if (bootdev_module[0] == '\0') {
2057 2056 char *addrp, *eoaddrp;
2058 2057 char *busp, *modp, *atp;
2059 2058 char *bp1275, *bp;
2060 2059 int bp1275len, bplen;
2061 2060
2062 2061 bp1275 = bp = addrp = eoaddrp = busp = modp = atp = NULL;
2063 2062
2064 2063 if (ddi_getlongprop(DDI_DEV_T_ANY,
2065 2064 ddi_root_node(), 0, "bootpath",
2066 2065 (caddr_t)&bp1275, &bp1275len) != DDI_PROP_SUCCESS ||
2067 2066 bp1275len <= 1) {
2068 2067 /*
2069 2068 * We didn't boot from bootconf so we never need to
2070 2069 * do any special matches.
2071 2070 */
2072 2071 quickexit = 1;
2073 2072 if (bp1275)
2074 2073 kmem_free(bp1275, bp1275len);
2075 2074 return (rv);
2076 2075 }
2077 2076
2078 2077 if (ddi_getlongprop(DDI_DEV_T_ANY,
2079 2078 ddi_root_node(), 0, "boot-path",
2080 2079 (caddr_t)&bp, &bplen) != DDI_PROP_SUCCESS || bplen <= 1) {
2081 2080 /*
2082 2081 * No fallback position for matching. This is
2083 2082 * certainly unexpected, but we'll handle it
2084 2083 * just in case.
2085 2084 */
2086 2085 quickexit = 1;
2087 2086 kmem_free(bp1275, bp1275len);
2088 2087 if (bp)
2089 2088 kmem_free(bp, bplen);
2090 2089 return (rv);
2091 2090 }
2092 2091
2093 2092 /*
2094 2093 * Determine boot device module and 1275 name_addr
2095 2094 *
2096 2095 * bootpath assumed to be of the form /bus/module@name_addr
2097 2096 */
2098 2097 if (busp = strchr(bp1275, '/')) {
2099 2098 if (modp = strchr(busp + 1, '/')) {
2100 2099 if (atp = strchr(modp + 1, '@')) {
2101 2100 *atp = '\0';
2102 2101 addrp = atp + 1;
2103 2102 if (eoaddrp = strchr(addrp, '/'))
2104 2103 *eoaddrp = '\0';
2105 2104 }
2106 2105 }
2107 2106 }
2108 2107
2109 2108 if (modp && addrp) {
2110 2109 (void) strncpy(bootdev_module, modp + 1,
2111 2110 MAXCOMPONENTLEN);
2112 2111 bootdev_module[MAXCOMPONENTLEN - 1] = '\0';
2113 2112
2114 2113 (void) strncpy(bootdev_newaddr, addrp, MAXCOMPONENTLEN);
2115 2114 bootdev_newaddr[MAXCOMPONENTLEN - 1] = '\0';
2116 2115 } else {
2117 2116 quickexit = 1;
2118 2117 kmem_free(bp1275, bp1275len);
2119 2118 kmem_free(bp, bplen);
2120 2119 return (rv);
2121 2120 }
2122 2121
2123 2122 /*
2124 2123 * Determine fallback name_addr
2125 2124 *
2126 2125 * 10/3/96 - Also save fallback module name because it
2127 2126 * might actually be different than the current module
2128 2127 * name. E.G., ISA pnp drivers have new names.
2129 2128 *
2130 2129 * bootpath assumed to be of the form /bus/module@name_addr
2131 2130 */
2132 2131 addrp = NULL;
2133 2132 if (busp = strchr(bp, '/')) {
2134 2133 if (modp = strchr(busp + 1, '/')) {
2135 2134 if (atp = strchr(modp + 1, '@')) {
2136 2135 *atp = '\0';
2137 2136 addrp = atp + 1;
2138 2137 if (eoaddrp = strchr(addrp, '/'))
2139 2138 *eoaddrp = '\0';
2140 2139 }
2141 2140 }
2142 2141 }
2143 2142
2144 2143 if (modp && addrp) {
2145 2144 (void) strncpy(bootdev_oldmod, modp + 1,
2146 2145 MAXCOMPONENTLEN);
2147 2146 bootdev_module[MAXCOMPONENTLEN - 1] = '\0';
2148 2147
2149 2148 (void) strncpy(bootdev_oldaddr, addrp, MAXCOMPONENTLEN);
2150 2149 bootdev_oldaddr[MAXCOMPONENTLEN - 1] = '\0';
2151 2150 }
2152 2151
2153 2152 /* Free up the bootpath storage now that we're done with it. */
2154 2153 kmem_free(bp1275, bp1275len);
2155 2154 kmem_free(bp, bplen);
2156 2155
2157 2156 if (bootdev_oldaddr[0] == '\0') {
2158 2157 quickexit = 1;
2159 2158 return (rv);
2160 2159 }
2161 2160 }
2162 2161
2163 2162 if (((lkupname = ddi_get_name(cdip)) != NULL) &&
2164 2163 (strcmp(bootdev_module, lkupname) == 0 ||
2165 2164 strcmp(bootdev_oldmod, lkupname) == 0) &&
2166 2165 ((ddi_getprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
2167 2166 "ignore-hardware-nodes", -1) != -1) ||
2168 2167 ignore_hardware_nodes) &&
2169 2168 strcmp(bootdev_newaddr, caddr) == 0 &&
2170 2169 strcmp(bootdev_oldaddr, naddr) == 0) {
2171 2170 rv = DDI_SUCCESS;
2172 2171 }
2173 2172
2174 2173 return (rv);
2175 2174 }
2176 2175
2177 2176 /*
2178 2177 * Perform a copy from a memory mapped device (whose devinfo pointer is devi)
2179 2178 * separately mapped at devaddr in the kernel to a kernel buffer at kaddr.
2180 2179 */
2181 2180 /*ARGSUSED*/
2182 2181 int
2183 2182 e_ddi_copyfromdev(dev_info_t *devi,
2184 2183 off_t off, const void *devaddr, void *kaddr, size_t len)
2185 2184 {
2186 2185 bcopy(devaddr, kaddr, len);
2187 2186 return (0);
2188 2187 }
2189 2188
2190 2189 /*
2191 2190 * Perform a copy to a memory mapped device (whose devinfo pointer is devi)
2192 2191 * separately mapped at devaddr in the kernel from a kernel buffer at kaddr.
2193 2192 */
2194 2193 /*ARGSUSED*/
2195 2194 int
2196 2195 e_ddi_copytodev(dev_info_t *devi,
2197 2196 off_t off, const void *kaddr, void *devaddr, size_t len)
2198 2197 {
2199 2198 bcopy(kaddr, devaddr, len);
2200 2199 return (0);
2201 2200 }
2202 2201
2203 2202
2204 2203 static int
2205 2204 poke_mem(peekpoke_ctlops_t *in_args)
2206 2205 {
2207 2206 int err = DDI_SUCCESS;
2208 2207 on_trap_data_t otd;
2209 2208
2210 2209 /* Set up protected environment. */
2211 2210 if (!on_trap(&otd, OT_DATA_ACCESS)) {
2212 2211 switch (in_args->size) {
2213 2212 case sizeof (uint8_t):
2214 2213 *(uint8_t *)(in_args->dev_addr) =
2215 2214 *(uint8_t *)in_args->host_addr;
2216 2215 break;
2217 2216
2218 2217 case sizeof (uint16_t):
2219 2218 *(uint16_t *)(in_args->dev_addr) =
2220 2219 *(uint16_t *)in_args->host_addr;
2221 2220 break;
2222 2221
2223 2222 case sizeof (uint32_t):
2224 2223 *(uint32_t *)(in_args->dev_addr) =
2225 2224 *(uint32_t *)in_args->host_addr;
2226 2225 break;
2227 2226
2228 2227 case sizeof (uint64_t):
2229 2228 *(uint64_t *)(in_args->dev_addr) =
2230 2229 *(uint64_t *)in_args->host_addr;
2231 2230 break;
2232 2231
2233 2232 default:
2234 2233 err = DDI_FAILURE;
2235 2234 break;
2236 2235 }
2237 2236 } else
2238 2237 err = DDI_FAILURE;
2239 2238
2240 2239 /* Take down protected environment. */
2241 2240 no_trap();
2242 2241
2243 2242 return (err);
2244 2243 }
2245 2244
2246 2245
2247 2246 static int
2248 2247 peek_mem(peekpoke_ctlops_t *in_args)
2249 2248 {
2250 2249 int err = DDI_SUCCESS;
2251 2250 on_trap_data_t otd;
2252 2251
2253 2252 if (!on_trap(&otd, OT_DATA_ACCESS)) {
2254 2253 switch (in_args->size) {
2255 2254 case sizeof (uint8_t):
2256 2255 *(uint8_t *)in_args->host_addr =
2257 2256 *(uint8_t *)in_args->dev_addr;
2258 2257 break;
2259 2258
2260 2259 case sizeof (uint16_t):
2261 2260 *(uint16_t *)in_args->host_addr =
2262 2261 *(uint16_t *)in_args->dev_addr;
2263 2262 break;
2264 2263
2265 2264 case sizeof (uint32_t):
2266 2265 *(uint32_t *)in_args->host_addr =
2267 2266 *(uint32_t *)in_args->dev_addr;
2268 2267 break;
2269 2268
2270 2269 case sizeof (uint64_t):
2271 2270 *(uint64_t *)in_args->host_addr =
2272 2271 *(uint64_t *)in_args->dev_addr;
2273 2272 break;
2274 2273
2275 2274 default:
2276 2275 err = DDI_FAILURE;
2277 2276 break;
2278 2277 }
2279 2278 } else
2280 2279 err = DDI_FAILURE;
2281 2280
2282 2281 no_trap();
2283 2282 return (err);
2284 2283 }
2285 2284
2286 2285
2287 2286 /*
2288 2287 * This is called only to process peek/poke when the DIP is NULL.
2289 2288 * Assume that this is for memory, as nexi take care of device safe accesses.
2290 2289 */
2291 2290 int
2292 2291 peekpoke_mem(ddi_ctl_enum_t cmd, peekpoke_ctlops_t *in_args)
2293 2292 {
2294 2293 return (cmd == DDI_CTLOPS_PEEK ? peek_mem(in_args) : poke_mem(in_args));
2295 2294 }
2296 2295
2297 2296 /*
2298 2297 * we've just done a cautious put/get. Check if it was successful by
2299 2298 * calling pci_ereport_post() on all puts and for any gets that return -1
2300 2299 */
2301 2300 static int
2302 2301 pci_peekpoke_check_fma(dev_info_t *dip, void *arg, ddi_ctl_enum_t ctlop,
2303 2302 void (*scan)(dev_info_t *, ddi_fm_error_t *))
2304 2303 {
2305 2304 int rval = DDI_SUCCESS;
2306 2305 peekpoke_ctlops_t *in_args = (peekpoke_ctlops_t *)arg;
2307 2306 ddi_fm_error_t de;
2308 2307 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
2309 2308 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle;
2310 2309 int check_err = 0;
2311 2310 int repcount = in_args->repcount;
2312 2311
2313 2312 if (ctlop == DDI_CTLOPS_POKE &&
2314 2313 hdlp->ah_acc.devacc_attr_access != DDI_CAUTIOUS_ACC)
2315 2314 return (DDI_SUCCESS);
2316 2315
2317 2316 if (ctlop == DDI_CTLOPS_PEEK &&
2318 2317 hdlp->ah_acc.devacc_attr_access != DDI_CAUTIOUS_ACC) {
2319 2318 for (; repcount; repcount--) {
2320 2319 switch (in_args->size) {
2321 2320 case sizeof (uint8_t):
2322 2321 if (*(uint8_t *)in_args->host_addr == 0xff)
2323 2322 check_err = 1;
2324 2323 break;
2325 2324 case sizeof (uint16_t):
2326 2325 if (*(uint16_t *)in_args->host_addr == 0xffff)
2327 2326 check_err = 1;
2328 2327 break;
2329 2328 case sizeof (uint32_t):
2330 2329 if (*(uint32_t *)in_args->host_addr ==
2331 2330 0xffffffff)
2332 2331 check_err = 1;
2333 2332 break;
2334 2333 case sizeof (uint64_t):
2335 2334 if (*(uint64_t *)in_args->host_addr ==
2336 2335 0xffffffffffffffff)
2337 2336 check_err = 1;
2338 2337 break;
2339 2338 }
2340 2339 }
2341 2340 if (check_err == 0)
2342 2341 return (DDI_SUCCESS);
2343 2342 }
2344 2343 /*
2345 2344 * for a cautious put or get or a non-cautious get that returned -1 call
2346 2345 * io framework to see if there really was an error
2347 2346 */
2348 2347 bzero(&de, sizeof (ddi_fm_error_t));
2349 2348 de.fme_version = DDI_FME_VERSION;
2350 2349 de.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2351 2350 if (hdlp->ah_acc.devacc_attr_access == DDI_CAUTIOUS_ACC) {
2352 2351 de.fme_flag = DDI_FM_ERR_EXPECTED;
2353 2352 de.fme_acc_handle = in_args->handle;
2354 2353 } else if (hdlp->ah_acc.devacc_attr_access == DDI_DEFAULT_ACC) {
2355 2354 /*
2356 2355 * We only get here with DDI_DEFAULT_ACC for config space gets.
2357 2356 * Non-hardened drivers may be probing the hardware and
2358 2357 * expecting -1 returned. So need to treat errors on
2359 2358 * DDI_DEFAULT_ACC as DDI_FM_ERR_EXPECTED.
2360 2359 */
2361 2360 de.fme_flag = DDI_FM_ERR_EXPECTED;
2362 2361 de.fme_acc_handle = in_args->handle;
2363 2362 } else {
2364 2363 /*
2365 2364 * Hardened driver doing protected accesses shouldn't
2366 2365 * get errors unless there's a hardware problem. Treat
2367 2366 * as nonfatal if there's an error, but set UNEXPECTED
2368 2367 * so we raise ereports on any errors and potentially
2369 2368 * fault the device
2370 2369 */
2371 2370 de.fme_flag = DDI_FM_ERR_UNEXPECTED;
2372 2371 }
2373 2372 (void) scan(dip, &de);
2374 2373 if (hdlp->ah_acc.devacc_attr_access != DDI_DEFAULT_ACC &&
2375 2374 de.fme_status != DDI_FM_OK) {
2376 2375 ndi_err_t *errp = (ndi_err_t *)hp->ahi_err;
2377 2376 rval = DDI_FAILURE;
2378 2377 errp->err_ena = de.fme_ena;
2379 2378 errp->err_expected = de.fme_flag;
2380 2379 errp->err_status = DDI_FM_NONFATAL;
2381 2380 }
2382 2381 return (rval);
2383 2382 }
2384 2383
2385 2384 /*
2386 2385 * pci_peekpoke_check_nofma() is for when an error occurs on a register access
2387 2386 * during pci_ereport_post(). We can't call pci_ereport_post() again or we'd
2388 2387 * recurse, so assume all puts are OK and gets have failed if they return -1
2389 2388 */
2390 2389 static int
2391 2390 pci_peekpoke_check_nofma(void *arg, ddi_ctl_enum_t ctlop)
2392 2391 {
2393 2392 int rval = DDI_SUCCESS;
2394 2393 peekpoke_ctlops_t *in_args = (peekpoke_ctlops_t *)arg;
2395 2394 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
2396 2395 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle;
2397 2396 int repcount = in_args->repcount;
2398 2397
2399 2398 if (ctlop == DDI_CTLOPS_POKE)
2400 2399 return (rval);
2401 2400
2402 2401 for (; repcount; repcount--) {
2403 2402 switch (in_args->size) {
2404 2403 case sizeof (uint8_t):
2405 2404 if (*(uint8_t *)in_args->host_addr == 0xff)
2406 2405 rval = DDI_FAILURE;
2407 2406 break;
2408 2407 case sizeof (uint16_t):
2409 2408 if (*(uint16_t *)in_args->host_addr == 0xffff)
2410 2409 rval = DDI_FAILURE;
2411 2410 break;
2412 2411 case sizeof (uint32_t):
2413 2412 if (*(uint32_t *)in_args->host_addr == 0xffffffff)
2414 2413 rval = DDI_FAILURE;
2415 2414 break;
2416 2415 case sizeof (uint64_t):
2417 2416 if (*(uint64_t *)in_args->host_addr ==
2418 2417 0xffffffffffffffff)
2419 2418 rval = DDI_FAILURE;
2420 2419 break;
2421 2420 }
2422 2421 }
2423 2422 if (hdlp->ah_acc.devacc_attr_access != DDI_DEFAULT_ACC &&
2424 2423 rval == DDI_FAILURE) {
2425 2424 ndi_err_t *errp = (ndi_err_t *)hp->ahi_err;
2426 2425 errp->err_ena = fm_ena_generate(0, FM_ENA_FMT1);
2427 2426 errp->err_expected = DDI_FM_ERR_UNEXPECTED;
2428 2427 errp->err_status = DDI_FM_NONFATAL;
2429 2428 }
2430 2429 return (rval);
2431 2430 }
2432 2431
2433 2432 int
2434 2433 pci_peekpoke_check(dev_info_t *dip, dev_info_t *rdip,
2435 2434 ddi_ctl_enum_t ctlop, void *arg, void *result,
2436 2435 int (*handler)(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
2437 2436 void *), kmutex_t *err_mutexp, kmutex_t *peek_poke_mutexp,
2438 2437 void (*scan)(dev_info_t *, ddi_fm_error_t *))
2439 2438 {
2440 2439 int rval;
2441 2440 peekpoke_ctlops_t *in_args = (peekpoke_ctlops_t *)arg;
2442 2441 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
2443 2442
2444 2443 /*
2445 2444 * this function only supports cautious accesses, not peeks/pokes
2446 2445 * which don't have a handle
2447 2446 */
2448 2447 if (hp == NULL)
2449 2448 return (DDI_FAILURE);
2450 2449
2451 2450 if (hp->ahi_acc_attr & DDI_ACCATTR_CONFIG_SPACE) {
2452 2451 if (!mutex_tryenter(err_mutexp)) {
2453 2452 /*
2454 2453 * As this may be a recursive call from within
2455 2454 * pci_ereport_post() we can't wait for the mutexes.
2456 2455 * Fortunately we know someone is already calling
2457 2456 * pci_ereport_post() which will handle the error bits
2458 2457 * for us, and as this is a config space access we can
2459 2458 * just do the access and check return value for -1
2460 2459 * using pci_peekpoke_check_nofma().
2461 2460 */
2462 2461 rval = handler(dip, rdip, ctlop, arg, result);
2463 2462 if (rval == DDI_SUCCESS)
2464 2463 rval = pci_peekpoke_check_nofma(arg, ctlop);
2465 2464 return (rval);
2466 2465 }
2467 2466 /*
2468 2467 * This can't be a recursive call. Drop the err_mutex and get
2469 2468 * both mutexes in the right order. If an error hasn't already
2470 2469 * been detected by the ontrap code, use pci_peekpoke_check_fma
2471 2470 * which will call pci_ereport_post() to check error status.
2472 2471 */
2473 2472 mutex_exit(err_mutexp);
2474 2473 }
2475 2474 mutex_enter(peek_poke_mutexp);
2476 2475 rval = handler(dip, rdip, ctlop, arg, result);
2477 2476 if (rval == DDI_SUCCESS) {
2478 2477 mutex_enter(err_mutexp);
2479 2478 rval = pci_peekpoke_check_fma(dip, arg, ctlop, scan);
2480 2479 mutex_exit(err_mutexp);
2481 2480 }
2482 2481 mutex_exit(peek_poke_mutexp);
2483 2482 return (rval);
2484 2483 }
2485 2484
2486 2485 void
2487 2486 impl_setup_ddi(void)
2488 2487 {
2489 2488 #if !defined(__xpv)
2490 2489 extern void startup_bios_disk(void);
2491 2490 extern int post_fastreboot;
2492 2491 #endif
2493 2492 dev_info_t *xdip, *isa_dip;
2494 2493 rd_existing_t rd_mem_prop;
2495 2494 int err;
2496 2495
2497 2496 ndi_devi_alloc_sleep(ddi_root_node(), "ramdisk",
2498 2497 (pnode_t)DEVI_SID_NODEID, &xdip);
2499 2498
2500 2499 (void) BOP_GETPROP(bootops,
2501 2500 "ramdisk_start", (void *)&ramdisk_start);
2502 2501 (void) BOP_GETPROP(bootops,
2503 2502 "ramdisk_end", (void *)&ramdisk_end);
2504 2503
2505 2504 #ifdef __xpv
2506 2505 ramdisk_start -= ONE_GIG;
2507 2506 ramdisk_end -= ONE_GIG;
2508 2507 #endif
2509 2508 rd_mem_prop.phys = ramdisk_start;
2510 2509 rd_mem_prop.size = ramdisk_end - ramdisk_start + 1;
2511 2510
2512 2511 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, xdip,
2513 2512 RD_EXISTING_PROP_NAME, (uchar_t *)&rd_mem_prop,
2514 2513 sizeof (rd_mem_prop));
2515 2514 err = ndi_devi_bind_driver(xdip, 0);
2516 2515 ASSERT(err == 0);
2517 2516
2518 2517 /* isa node */
2519 2518 if (pseudo_isa) {
2520 2519 ndi_devi_alloc_sleep(ddi_root_node(), "isa",
2521 2520 (pnode_t)DEVI_SID_NODEID, &isa_dip);
2522 2521 (void) ndi_prop_update_string(DDI_DEV_T_NONE, isa_dip,
2523 2522 "device_type", "isa");
2524 2523 (void) ndi_prop_update_string(DDI_DEV_T_NONE, isa_dip,
2525 2524 "bus-type", "isa");
2526 2525 (void) ndi_devi_bind_driver(isa_dip, 0);
2527 2526 }
2528 2527
2529 2528 /*
2530 2529 * Read in the properties from the boot.
2531 2530 */
2532 2531 get_boot_properties();
2533 2532
2534 2533 /* not framebuffer should be enumerated, if present */
2535 2534 get_vga_properties();
2536 2535
2537 2536 /*
2538 2537 * Check for administratively disabled drivers.
2539 2538 */
2540 2539 check_driver_disable();
2541 2540
2542 2541 #if !defined(__xpv)
2543 2542 if (!post_fastreboot)
2544 2543 startup_bios_disk();
2545 2544 #endif
2546 2545 /* do bus dependent probes. */
2547 2546 impl_bus_initialprobe();
2548 2547 }
2549 2548
2550 2549 dev_t
2551 2550 getrootdev(void)
2552 2551 {
2553 2552 /*
2554 2553 * Precedence given to rootdev if set in /etc/system
2555 2554 */
2556 2555 if (root_is_svm == B_TRUE) {
2557 2556 return (ddi_pathname_to_dev_t(svm_bootpath));
2558 2557 }
2559 2558
2560 2559 /*
2561 2560 * Usually rootfs.bo_name is initialized by the
2562 2561 * the bootpath property from bootenv.rc, but
2563 2562 * defaults to "/ramdisk:a" otherwise.
2564 2563 */
2565 2564 return (ddi_pathname_to_dev_t(rootfs.bo_name));
2566 2565 }
2567 2566
2568 2567 static struct bus_probe {
2569 2568 struct bus_probe *next;
2570 2569 void (*probe)(int);
2571 2570 } *bus_probes;
2572 2571
2573 2572 void
2574 2573 impl_bus_add_probe(void (*func)(int))
2575 2574 {
2576 2575 struct bus_probe *probe;
2577 2576 struct bus_probe *lastprobe = NULL;
2578 2577
2579 2578 probe = kmem_alloc(sizeof (*probe), KM_SLEEP);
2580 2579 probe->probe = func;
2581 2580 probe->next = NULL;
2582 2581
2583 2582 if (!bus_probes) {
2584 2583 bus_probes = probe;
2585 2584 return;
2586 2585 }
2587 2586
2588 2587 lastprobe = bus_probes;
2589 2588 while (lastprobe->next)
2590 2589 lastprobe = lastprobe->next;
2591 2590 lastprobe->next = probe;
2592 2591 }
2593 2592
2594 2593 /*ARGSUSED*/
2595 2594 void
2596 2595 impl_bus_delete_probe(void (*func)(int))
2597 2596 {
2598 2597 struct bus_probe *prev = NULL;
2599 2598 struct bus_probe *probe = bus_probes;
2600 2599
2601 2600 while (probe) {
2602 2601 if (probe->probe == func)
2603 2602 break;
2604 2603 prev = probe;
2605 2604 probe = probe->next;
2606 2605 }
2607 2606
2608 2607 if (probe == NULL)
2609 2608 return;
2610 2609
2611 2610 if (prev)
2612 2611 prev->next = probe->next;
2613 2612 else
2614 2613 bus_probes = probe->next;
2615 2614
2616 2615 kmem_free(probe, sizeof (struct bus_probe));
2617 2616 }
2618 2617
2619 2618 /*
2620 2619 * impl_bus_initialprobe
2621 2620 * Modload the prom simulator, then let it probe to verify existence
2622 2621 * and type of PCI support.
2623 2622 */
2624 2623 static void
2625 2624 impl_bus_initialprobe(void)
2626 2625 {
2627 2626 struct bus_probe *probe;
2628 2627
2629 2628 /* load modules to install bus probes */
2630 2629 #if defined(__xpv)
2631 2630 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
2632 2631 if (modload("misc", "pci_autoconfig") < 0) {
2633 2632 panic("failed to load misc/pci_autoconfig");
2634 2633 }
2635 2634
2636 2635 if (modload("drv", "isa") < 0)
2637 2636 panic("failed to load drv/isa");
2638 2637 }
2639 2638
2640 2639 (void) modload("misc", "xpv_autoconfig");
2641 2640 #else
2642 2641 if (modload("misc", "pci_autoconfig") < 0) {
2643 2642 panic("failed to load misc/pci_autoconfig");
2644 2643 }
2645 2644
2646 2645 (void) modload("misc", "acpidev");
2647 2646
2648 2647 if (modload("drv", "isa") < 0)
2649 2648 panic("failed to load drv/isa");
2650 2649 #endif
2651 2650
2652 2651 probe = bus_probes;
2653 2652 while (probe) {
2654 2653 /* run the probe functions */
2655 2654 (*probe->probe)(0);
2656 2655 probe = probe->next;
2657 2656 }
2658 2657 }
2659 2658
2660 2659 /*
2661 2660 * impl_bus_reprobe
2662 2661 * Reprogram devices not set up by firmware.
2663 2662 */
2664 2663 static void
2665 2664 impl_bus_reprobe(void)
2666 2665 {
2667 2666 struct bus_probe *probe;
2668 2667
2669 2668 probe = bus_probes;
2670 2669 while (probe) {
2671 2670 /* run the probe function */
2672 2671 (*probe->probe)(1);
2673 2672 probe = probe->next;
2674 2673 }
2675 2674 }
2676 2675
2677 2676
2678 2677 /*
2679 2678 * The following functions ready a cautious request to go up to the nexus
2680 2679 * driver. It is up to the nexus driver to decide how to process the request.
2681 2680 * It may choose to call i_ddi_do_caut_get/put in this file, or do it
2682 2681 * differently.
2683 2682 */
2684 2683
2685 2684 static void
2686 2685 i_ddi_caut_getput_ctlops(ddi_acc_impl_t *hp, uint64_t host_addr,
2687 2686 uint64_t dev_addr, size_t size, size_t repcount, uint_t flags,
2688 2687 ddi_ctl_enum_t cmd)
2689 2688 {
2690 2689 peekpoke_ctlops_t cautacc_ctlops_arg;
2691 2690
2692 2691 cautacc_ctlops_arg.size = size;
2693 2692 cautacc_ctlops_arg.dev_addr = dev_addr;
2694 2693 cautacc_ctlops_arg.host_addr = host_addr;
2695 2694 cautacc_ctlops_arg.handle = (ddi_acc_handle_t)hp;
2696 2695 cautacc_ctlops_arg.repcount = repcount;
2697 2696 cautacc_ctlops_arg.flags = flags;
2698 2697
2699 2698 (void) ddi_ctlops(hp->ahi_common.ah_dip, hp->ahi_common.ah_dip, cmd,
2700 2699 &cautacc_ctlops_arg, NULL);
2701 2700 }
2702 2701
2703 2702 uint8_t
2704 2703 i_ddi_caut_get8(ddi_acc_impl_t *hp, uint8_t *addr)
2705 2704 {
2706 2705 uint8_t value;
2707 2706 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2708 2707 sizeof (uint8_t), 1, 0, DDI_CTLOPS_PEEK);
2709 2708
2710 2709 return (value);
2711 2710 }
2712 2711
2713 2712 uint16_t
2714 2713 i_ddi_caut_get16(ddi_acc_impl_t *hp, uint16_t *addr)
2715 2714 {
2716 2715 uint16_t value;
2717 2716 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2718 2717 sizeof (uint16_t), 1, 0, DDI_CTLOPS_PEEK);
2719 2718
2720 2719 return (value);
2721 2720 }
2722 2721
2723 2722 uint32_t
2724 2723 i_ddi_caut_get32(ddi_acc_impl_t *hp, uint32_t *addr)
2725 2724 {
2726 2725 uint32_t value;
2727 2726 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2728 2727 sizeof (uint32_t), 1, 0, DDI_CTLOPS_PEEK);
2729 2728
2730 2729 return (value);
2731 2730 }
2732 2731
2733 2732 uint64_t
2734 2733 i_ddi_caut_get64(ddi_acc_impl_t *hp, uint64_t *addr)
2735 2734 {
2736 2735 uint64_t value;
2737 2736 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2738 2737 sizeof (uint64_t), 1, 0, DDI_CTLOPS_PEEK);
2739 2738
2740 2739 return (value);
2741 2740 }
2742 2741
2743 2742 void
2744 2743 i_ddi_caut_put8(ddi_acc_impl_t *hp, uint8_t *addr, uint8_t value)
2745 2744 {
2746 2745 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2747 2746 sizeof (uint8_t), 1, 0, DDI_CTLOPS_POKE);
2748 2747 }
2749 2748
2750 2749 void
2751 2750 i_ddi_caut_put16(ddi_acc_impl_t *hp, uint16_t *addr, uint16_t value)
2752 2751 {
2753 2752 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2754 2753 sizeof (uint16_t), 1, 0, DDI_CTLOPS_POKE);
2755 2754 }
2756 2755
2757 2756 void
2758 2757 i_ddi_caut_put32(ddi_acc_impl_t *hp, uint32_t *addr, uint32_t value)
2759 2758 {
2760 2759 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2761 2760 sizeof (uint32_t), 1, 0, DDI_CTLOPS_POKE);
2762 2761 }
2763 2762
2764 2763 void
2765 2764 i_ddi_caut_put64(ddi_acc_impl_t *hp, uint64_t *addr, uint64_t value)
2766 2765 {
2767 2766 i_ddi_caut_getput_ctlops(hp, (uintptr_t)&value, (uintptr_t)addr,
2768 2767 sizeof (uint64_t), 1, 0, DDI_CTLOPS_POKE);
2769 2768 }
2770 2769
2771 2770 void
2772 2771 i_ddi_caut_rep_get8(ddi_acc_impl_t *hp, uint8_t *host_addr, uint8_t *dev_addr,
2773 2772 size_t repcount, uint_t flags)
2774 2773 {
2775 2774 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2776 2775 sizeof (uint8_t), repcount, flags, DDI_CTLOPS_PEEK);
2777 2776 }
2778 2777
2779 2778 void
2780 2779 i_ddi_caut_rep_get16(ddi_acc_impl_t *hp, uint16_t *host_addr,
2781 2780 uint16_t *dev_addr, size_t repcount, uint_t flags)
2782 2781 {
2783 2782 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2784 2783 sizeof (uint16_t), repcount, flags, DDI_CTLOPS_PEEK);
2785 2784 }
2786 2785
2787 2786 void
2788 2787 i_ddi_caut_rep_get32(ddi_acc_impl_t *hp, uint32_t *host_addr,
2789 2788 uint32_t *dev_addr, size_t repcount, uint_t flags)
2790 2789 {
2791 2790 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2792 2791 sizeof (uint32_t), repcount, flags, DDI_CTLOPS_PEEK);
2793 2792 }
2794 2793
2795 2794 void
2796 2795 i_ddi_caut_rep_get64(ddi_acc_impl_t *hp, uint64_t *host_addr,
2797 2796 uint64_t *dev_addr, size_t repcount, uint_t flags)
2798 2797 {
2799 2798 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2800 2799 sizeof (uint64_t), repcount, flags, DDI_CTLOPS_PEEK);
2801 2800 }
2802 2801
2803 2802 void
2804 2803 i_ddi_caut_rep_put8(ddi_acc_impl_t *hp, uint8_t *host_addr, uint8_t *dev_addr,
2805 2804 size_t repcount, uint_t flags)
2806 2805 {
2807 2806 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2808 2807 sizeof (uint8_t), repcount, flags, DDI_CTLOPS_POKE);
2809 2808 }
2810 2809
2811 2810 void
2812 2811 i_ddi_caut_rep_put16(ddi_acc_impl_t *hp, uint16_t *host_addr,
2813 2812 uint16_t *dev_addr, size_t repcount, uint_t flags)
2814 2813 {
2815 2814 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2816 2815 sizeof (uint16_t), repcount, flags, DDI_CTLOPS_POKE);
2817 2816 }
2818 2817
2819 2818 void
2820 2819 i_ddi_caut_rep_put32(ddi_acc_impl_t *hp, uint32_t *host_addr,
2821 2820 uint32_t *dev_addr, size_t repcount, uint_t flags)
2822 2821 {
2823 2822 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2824 2823 sizeof (uint32_t), repcount, flags, DDI_CTLOPS_POKE);
2825 2824 }
2826 2825
2827 2826 void
2828 2827 i_ddi_caut_rep_put64(ddi_acc_impl_t *hp, uint64_t *host_addr,
2829 2828 uint64_t *dev_addr, size_t repcount, uint_t flags)
2830 2829 {
2831 2830 i_ddi_caut_getput_ctlops(hp, (uintptr_t)host_addr, (uintptr_t)dev_addr,
2832 2831 sizeof (uint64_t), repcount, flags, DDI_CTLOPS_POKE);
2833 2832 }
2834 2833
2835 2834 boolean_t
2836 2835 i_ddi_copybuf_required(ddi_dma_attr_t *attrp)
2837 2836 {
2838 2837 uint64_t hi_pa;
2839 2838
2840 2839 hi_pa = ((uint64_t)physmax + 1ull) << PAGESHIFT;
2841 2840 if (attrp->dma_attr_addr_hi < hi_pa) {
2842 2841 return (B_TRUE);
2843 2842 }
2844 2843
2845 2844 return (B_FALSE);
2846 2845 }
2847 2846
2848 2847 size_t
2849 2848 i_ddi_copybuf_size()
2850 2849 {
2851 2850 return (dma_max_copybuf_size);
2852 2851 }
2853 2852
2854 2853 /*
2855 2854 * i_ddi_dma_max()
2856 2855 * returns the maximum DMA size which can be performed in a single DMA
2857 2856 * window taking into account the devices DMA contraints (attrp), the
2858 2857 * maximum copy buffer size (if applicable), and the worse case buffer
2859 2858 * fragmentation.
2860 2859 */
2861 2860 /*ARGSUSED*/
2862 2861 uint32_t
2863 2862 i_ddi_dma_max(dev_info_t *dip, ddi_dma_attr_t *attrp)
2864 2863 {
2865 2864 uint64_t maxxfer;
2866 2865
2867 2866
2868 2867 /*
2869 2868 * take the min of maxxfer and the the worse case fragementation
2870 2869 * (e.g. every cookie <= 1 page)
2871 2870 */
2872 2871 maxxfer = MIN(attrp->dma_attr_maxxfer,
2873 2872 ((uint64_t)(attrp->dma_attr_sgllen - 1) << PAGESHIFT));
2874 2873
2875 2874 /*
2876 2875 * If the DMA engine can't reach all off memory, we also need to take
2877 2876 * the max size of the copybuf into consideration.
2878 2877 */
2879 2878 if (i_ddi_copybuf_required(attrp)) {
2880 2879 maxxfer = MIN(i_ddi_copybuf_size(), maxxfer);
2881 2880 }
2882 2881
2883 2882 /*
2884 2883 * we only return a 32-bit value. Make sure it's not -1. Round to a
2885 2884 * page so it won't be mistaken for an error value during debug.
2886 2885 */
2887 2886 if (maxxfer >= 0xFFFFFFFF) {
2888 2887 maxxfer = 0xFFFFF000;
2889 2888 }
2890 2889
2891 2890 /*
2892 2891 * make sure the value we return is a whole multiple of the
2893 2892 * granlarity.
2894 2893 */
2895 2894 if (attrp->dma_attr_granular > 1) {
2896 2895 maxxfer = maxxfer - (maxxfer % attrp->dma_attr_granular);
2897 2896 }
2898 2897
2899 2898 return ((uint32_t)maxxfer);
2900 2899 }
2901 2900
2902 2901 /*ARGSUSED*/
2903 2902 void
2904 2903 translate_devid(dev_info_t *dip)
2905 2904 {
2906 2905 }
2907 2906
2908 2907 pfn_t
2909 2908 i_ddi_paddr_to_pfn(paddr_t paddr)
2910 2909 {
2911 2910 pfn_t pfn;
2912 2911
2913 2912 #ifdef __xpv
2914 2913 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
2915 2914 pfn = xen_assign_pfn(mmu_btop(paddr));
2916 2915 } else {
2917 2916 pfn = mmu_btop(paddr);
2918 2917 }
2919 2918 #else
2920 2919 pfn = mmu_btop(paddr);
2921 2920 #endif
2922 2921
2923 2922 return (pfn);
2924 2923 }
↓ open down ↓ |
1282 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX