Print this page
patch fixes
6345 remove xhat support

*** 57,67 **** #include <sys/debug.h> #include <sys/tnf_probe.h> #include <sys/vtrace.h> #include <vm/hat.h> - #include <vm/xhat.h> #include <vm/as.h> #include <vm/seg.h> #include <vm/seg_vn.h> #include <vm/seg_dev.h> #include <vm/seg_kmem.h> --- 57,66 ----
*** 669,680 **** AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); as->a_hat = hat_alloc(as); /* create hat for default system mmu */ AS_LOCK_EXIT(as, &as->a_lock); - as->a_xhat = NULL; - return (as); } /* * Free an address space data structure. --- 668,677 ----
*** 685,695 **** void as_free(struct as *as) { struct hat *hat = as->a_hat; struct seg *seg, *next; ! int called = 0; top: /* * Invoke ALL callbacks. as_do_callbacks will do one callback * per call, and not return (-1) until the callback has completed. --- 682,692 ---- void as_free(struct as *as) { struct hat *hat = as->a_hat; struct seg *seg, *next; ! boolean_t free_started = B_FALSE; top: /* * Invoke ALL callbacks. as_do_callbacks will do one callback * per call, and not return (-1) until the callback has completed.
*** 697,717 **** */ mutex_enter(&as->a_contents); while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0)) ; - /* This will prevent new XHATs from attaching to as */ - if (!called) - AS_SETBUSY(as); mutex_exit(&as->a_contents); AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); ! if (!called) { ! called = 1; hat_free_start(hat); - if (as->a_xhat != NULL) - xhat_free_start_all(as); } for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) { int err; next = AS_SEGNEXT(as, seg); --- 694,709 ---- */ mutex_enter(&as->a_contents); while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0)) ; mutex_exit(&as->a_contents); AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); ! if (!free_started) { ! free_started = B_TRUE; hat_free_start(hat); } for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) { int err; next = AS_SEGNEXT(as, seg);
*** 757,768 **** */ ASSERT(err == 0); } } hat_free_end(hat); - if (as->a_xhat != NULL) - xhat_free_end_all(as); AS_LOCK_EXIT(as, &as->a_lock); /* /proc stuff */ ASSERT(avl_numnodes(&as->a_wpage) == 0); if (as->a_objectdir) { --- 749,758 ----
*** 792,809 **** newas->a_userlimit = as->a_userlimit; newas->a_proc = forkedproc; AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER); - /* This will prevent new XHATs from attaching */ - mutex_enter(&as->a_contents); - AS_SETBUSY(as); - mutex_exit(&as->a_contents); - mutex_enter(&newas->a_contents); - AS_SETBUSY(newas); - mutex_exit(&newas->a_contents); - (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { if (seg->s_flags & S_PURGE) { --- 782,791 ----
*** 813,825 **** newseg = seg_alloc(newas, seg->s_base, seg->s_size); if (newseg == NULL) { AS_LOCK_EXIT(newas, &newas->a_lock); as_setwatch(as); - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); AS_LOCK_EXIT(as, &as->a_lock); as_free(newas); return (-1); } if ((error = SEGOP_DUP(seg, newseg)) != 0) { --- 795,804 ----
*** 827,839 **** * We call seg_free() on the new seg * because the segment is not set up * completely; i.e. it has no ops. */ as_setwatch(as); - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); AS_LOCK_EXIT(as, &as->a_lock); seg_free(newseg); AS_LOCK_EXIT(newas, &newas->a_lock); as_free(newas); return (error); --- 806,815 ----
*** 841,862 **** newas->a_size += seg->s_size; } newas->a_resvsize = as->a_resvsize - purgesize; error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL); - if (as->a_xhat != NULL) - error |= xhat_dup_all(as, newas, NULL, 0, HAT_DUP_ALL); - mutex_enter(&newas->a_contents); - AS_CLRBUSY(newas); - mutex_exit(&newas->a_contents); AS_LOCK_EXIT(newas, &newas->a_lock); as_setwatch(as); - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); AS_LOCK_EXIT(as, &as->a_lock); if (error != 0) { as_free(newas); return (error); } --- 817,830 ----
*** 878,916 **** faultcode_t res = 0; caddr_t addrsav; struct seg *segsav; int as_lock_held; klwp_t *lwp = ttolwp(curthread); - int is_xhat = 0; - int holding_wpage = 0; - extern struct seg_ops segdev_ops; - - - if (as->a_hat != hat) { - /* This must be an XHAT then */ - is_xhat = 1; - if ((type != F_INVAL) || (as == &kas)) - return (FC_NOSUPPORT); - } retry: - if (!is_xhat) { /* ! * Indicate that the lwp is not to be stopped while waiting ! * for a pagefault. This is to avoid deadlock while debugging ! * a process via /proc over NFS (in particular). */ if (lwp != NULL) lwp->lwp_nostop++; /* ! * same length must be used when we softlock and softunlock. ! * We don't support softunlocking lengths less than ! * the original length when there is largepage support. ! * See seg_dev.c for more comments. */ switch (type) { case F_SOFTLOCK: CPU_STATS_ADD_K(vm, softlock, 1); --- 846,872 ---- faultcode_t res = 0; caddr_t addrsav; struct seg *segsav; int as_lock_held; klwp_t *lwp = ttolwp(curthread); retry: /* ! * Indicate that the lwp is not to be stopped while waiting for a ! * pagefault. This is to avoid deadlock while debugging a process ! * via /proc over NFS (in particular). */ if (lwp != NULL) lwp->lwp_nostop++; /* ! * same length must be used when we softlock and softunlock. We ! * don't support softunlocking lengths less than the original length ! * when there is largepage support. See seg_dev.c for more ! * comments. */ switch (type) { case F_SOFTLOCK: CPU_STATS_ADD_K(vm, softlock, 1);
*** 929,939 **** if (as == &kas) CPU_STATS_ADDQ(CPU, vm, kernel_asflt, 1); CPU_STATS_EXIT_K(); break; } - } /* Kernel probe */ TNF_PROBE_3(address_fault, "vm pagefault", /* CSTYLED */, tnf_opaque, address, addr, tnf_fault_type, fault_type, type, --- 885,894 ----
*** 950,988 **** * filesystem, and then no-one will be able to exec new commands, * as exec'ing requires the write lock on the as. */ if (as == &kas && segkmap && segkmap->s_base <= raddr && raddr + size < segkmap->s_base + segkmap->s_size) { - /* - * if (as==&kas), this can't be XHAT: we've already returned - * FC_NOSUPPORT. - */ seg = segkmap; as_lock_held = 0; } else { AS_LOCK_ENTER(as, &as->a_lock, RW_READER); - if (is_xhat && avl_numnodes(&as->a_wpage) != 0) { - /* - * Grab and hold the writers' lock on the as - * if the fault is to a watched page. - * This will keep CPUs from "peeking" at the - * address range while we're temporarily boosting - * the permissions for the XHAT device to - * resolve the fault in the segment layer. - * - * We could check whether faulted address - * is within a watched page and only then grab - * the writer lock, but this is simpler. - */ - AS_LOCK_EXIT(as, &as->a_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); - } seg = as_segat(as, raddr); if (seg == NULL) { AS_LOCK_EXIT(as, &as->a_lock); ! if ((lwp != NULL) && (!is_xhat)) lwp->lwp_nostop--; return (FC_NOMAP); } as_lock_held = 1; --- 905,923 ---- * filesystem, and then no-one will be able to exec new commands, * as exec'ing requires the write lock on the as. */ if (as == &kas && segkmap && segkmap->s_base <= raddr && raddr + size < segkmap->s_base + segkmap->s_size) { seg = segkmap; as_lock_held = 0; } else { AS_LOCK_ENTER(as, &as->a_lock, RW_READER); seg = as_segat(as, raddr); if (seg == NULL) { AS_LOCK_EXIT(as, &as->a_lock); ! if (lwp != NULL) lwp->lwp_nostop--; return (FC_NOMAP); } as_lock_held = 1;
*** 1002,1040 **** if (raddr + rsize > seg->s_base + seg->s_size) ssize = seg->s_base + seg->s_size - raddr; else ssize = rsize; - if (!is_xhat || (seg->s_ops != &segdev_ops)) { - - if (is_xhat && avl_numnodes(&as->a_wpage) != 0 && - pr_is_watchpage_as(raddr, rw, as)) { - /* - * Handle watch pages. If we're faulting on a - * watched page from an X-hat, we have to - * restore the original permissions while we - * handle the fault. - */ - as_clearwatch(as); - holding_wpage = 1; - } - res = SEGOP_FAULT(hat, seg, raddr, ssize, type, rw); - - /* Restore watchpoints */ - if (holding_wpage) { - as_setwatch(as); - holding_wpage = 0; - } - if (res != 0) break; - } else { - /* XHAT does not support seg_dev */ - res = FC_NOSUPPORT; - break; - } } /* * If we were SOFTLOCKing and encountered a failure, * we must SOFTUNLOCK the range we already did. (Maybe we --- 937,949 ----
*** 1059,1069 **** F_SOFTUNLOCK, S_OTHER); } } if (as_lock_held) AS_LOCK_EXIT(as, &as->a_lock); ! if ((lwp != NULL) && (!is_xhat)) lwp->lwp_nostop--; /* * If the lower levels returned EDEADLK for a fault, * It means that we should retry the fault. Let's wait --- 968,978 ---- F_SOFTUNLOCK, S_OTHER); } } if (as_lock_held) AS_LOCK_EXIT(as, &as->a_lock); ! if (lwp != NULL) lwp->lwp_nostop--; /* * If the lower levels returned EDEADLK for a fault, * It means that we should retry the fault. Let's wait
*** 2164,2192 **** if (as == NULL) return (0); AS_LOCK_ENTER(as, &as->a_lock, RW_READER); - /* Prevent XHATs from attaching */ - mutex_enter(&as->a_contents); - AS_SETBUSY(as); - mutex_exit(&as->a_contents); - - /* * Free all mapping resources associated with the address * space. The segment-level swapout routines capitalize * on this unmapping by scavanging pages that have become * unmapped here. */ hat_swapout(as->a_hat); - if (as->a_xhat != NULL) - xhat_swapout_all(as); - - mutex_enter(&as->a_contents); - AS_CLRBUSY(as); - mutex_exit(&as->a_contents); /* * Call the swapout routines of all segments in the address * space to do the actual work, accumulating the amount of * space reclaimed. --- 2073,2089 ----