945 sigktou(&t->t_hold, &un.holdmask);
946 prunlock(pnp);
947 if (copyout(&un.holdmask, cmaddr, sizeof (un.holdmask)))
948 error = EFAULT;
949 break;
950
951 case PIOCSHOLD: /* set signal-hold mask */
952 pr_sethold(pnp, &un.holdmask);
953 prunlock(pnp);
954 break;
955
956 case PIOCNMAP: /* get number of memory mappings */
957 {
958 int n;
959 struct as *as = p->p_as;
960
961 if ((p->p_flag & SSYS) || as == &kas)
962 n = 0;
963 else {
964 mutex_exit(&p->p_lock);
965 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
966 n = prnsegs(as, 0);
967 AS_LOCK_EXIT(as, &as->a_lock);
968 mutex_enter(&p->p_lock);
969 }
970 prunlock(pnp);
971 if (copyout(&n, cmaddr, sizeof (int)))
972 error = EFAULT;
973 break;
974 }
975
976 case PIOCMAP: /* get memory map information */
977 {
978 list_t iolhead;
979 struct as *as = p->p_as;
980
981 if ((p->p_flag & SSYS) || as == &kas) {
982 error = 0;
983 prunlock(pnp);
984 } else {
985 mutex_exit(&p->p_lock);
986 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
987 error = oprgetmap(p, &iolhead);
988 AS_LOCK_EXIT(as, &as->a_lock);
989 mutex_enter(&p->p_lock);
990 prunlock(pnp);
991
992 error = pr_iol_copyout_and_free(&iolhead,
993 &cmaddr, error);
994 }
995 /*
996 * The procfs PIOCMAP ioctl returns an all-zero buffer
997 * to indicate the end of the prmap[] array.
998 * Append it to whatever has already been copied out.
999 */
1000 bzero(&un.prmap, sizeof (un.prmap));
1001 if (!error && copyout(&un.prmap, cmaddr, sizeof (un.prmap)))
1002 error = EFAULT;
1003
1004 break;
1005 }
1006
1007 case PIOCGFAULT: /* get mask of traced faults */
1008 prassignset(&un.fltmask, &p->p_fltmask);
1640 if ((t = tp) == NULL)
1641 t = p->p_tlist;
1642 cur_time = gethrtime_unscaled();
1643 do {
1644 pct += cpu_update_pct(t, cur_time);
1645 if (tp != NULL) /* just do the one lwp */
1646 break;
1647 } while ((t = t->t_forw) != p->p_tlist);
1648
1649 psp->pr_pctcpu = prgetpctcpu(pct);
1650 psp->pr_cpu = (psp->pr_pctcpu*100 + 0x6000) >> 15; /* [0..99] */
1651 if (psp->pr_cpu > 99)
1652 psp->pr_cpu = 99;
1653
1654 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
1655 psp->pr_size = 0;
1656 psp->pr_rssize = 0;
1657 psp->pr_pctmem = 0;
1658 } else {
1659 mutex_exit(&p->p_lock);
1660 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1661 psp->pr_size = (size32_t)btopr(as->a_resvsize);
1662 psp->pr_rssize = (size32_t)rm_asrss(as);
1663 psp->pr_pctmem = rm_pctmemory(as);
1664 AS_LOCK_EXIT(as, &as->a_lock);
1665 mutex_enter(&p->p_lock);
1666 }
1667 }
1668 psp->pr_bysize = (size32_t)ptob(psp->pr_size);
1669 psp->pr_byrssize = (size32_t)ptob(psp->pr_rssize);
1670
1671 /*
1672 * If we are looking at an LP64 process, zero out
1673 * the fields that cannot be represented in ILP32.
1674 */
1675 if (p->p_model != DATAMODEL_ILP32) {
1676 psp->pr_size = 0;
1677 psp->pr_rssize = 0;
1678 psp->pr_bysize = 0;
1679 psp->pr_byrssize = 0;
1680 psp->pr_argv = 0;
1681 psp->pr_envp = 0;
1682 }
1683 }
1684
2572 sigktou(&t->t_hold, &un32.holdmask);
2573 prunlock(pnp);
2574 if (copyout(&un32.holdmask, cmaddr, sizeof (un32.holdmask)))
2575 error = EFAULT;
2576 break;
2577
2578 case PIOCSHOLD: /* set signal-hold mask */
2579 pr_sethold(pnp, &un32.holdmask);
2580 prunlock(pnp);
2581 break;
2582
2583 case PIOCNMAP: /* get number of memory mappings */
2584 {
2585 int n;
2586 struct as *as = p->p_as;
2587
2588 if ((p->p_flag & SSYS) || as == &kas)
2589 n = 0;
2590 else {
2591 mutex_exit(&p->p_lock);
2592 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
2593 n = prnsegs(as, 0);
2594 AS_LOCK_EXIT(as, &as->a_lock);
2595 mutex_enter(&p->p_lock);
2596 }
2597 prunlock(pnp);
2598 if (copyout(&n, cmaddr, sizeof (int)))
2599 error = EFAULT;
2600 break;
2601 }
2602
2603 case PIOCMAP: /* get memory map information */
2604 {
2605 list_t iolhead;
2606 struct as *as = p->p_as;
2607
2608 if ((p->p_flag & SSYS) || as == &kas) {
2609 error = 0;
2610 prunlock(pnp);
2611 } else if (PROCESS_NOT_32BIT(p)) {
2612 error = EOVERFLOW;
2613 prunlock(pnp);
2614 } else {
2615 mutex_exit(&p->p_lock);
2616 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
2617 error = oprgetmap32(p, &iolhead);
2618 AS_LOCK_EXIT(as, &as->a_lock);
2619 mutex_enter(&p->p_lock);
2620 prunlock(pnp);
2621
2622 error = pr_iol_copyout_and_free(&iolhead,
2623 &cmaddr, error);
2624 }
2625 /*
2626 * The procfs PIOCMAP ioctl returns an all-zero buffer
2627 * to indicate the end of the prmap[] array.
2628 * Append it to whatever has already been copied out.
2629 */
2630 bzero(&un32.prmap, sizeof (un32.prmap));
2631 if (!error &&
2632 copyout(&un32.prmap, cmaddr, sizeof (un32.prmap)))
2633 error = EFAULT;
2634 break;
2635 }
2636
2637 case PIOCGFAULT: /* get mask of traced faults */
2638 prassignset(&un32.fltmask, &p->p_fltmask);
3125 struct as *as = p->p_as;
3126 int error = 0;
3127 struct seg *seg;
3128 struct vnode *xvp;
3129 int n;
3130
3131 /*
3132 * By fiat, a system process has no address space.
3133 */
3134 if ((p->p_flag & SSYS) || as == &kas) {
3135 error = EINVAL;
3136 } else if (cmaddr) {
3137 /*
3138 * We drop p_lock before grabbing the address
3139 * space lock in order to avoid a deadlock with
3140 * the clock thread. The process will not
3141 * disappear and its address space will not
3142 * change because it is marked P_PR_LOCK.
3143 */
3144 mutex_exit(&p->p_lock);
3145 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3146 seg = as_segat(as, va);
3147 if (seg != NULL &&
3148 seg->s_ops == &segvn_ops &&
3149 SEGOP_GETVP(seg, va, &xvp) == 0 &&
3150 xvp != NULL &&
3151 xvp->v_type == VREG) {
3152 VN_HOLD(xvp);
3153 } else {
3154 error = EINVAL;
3155 }
3156 AS_LOCK_EXIT(as, &as->a_lock);
3157 mutex_enter(&p->p_lock);
3158 } else if ((xvp = p->p_exec) == NULL) {
3159 error = EINVAL;
3160 } else {
3161 VN_HOLD(xvp);
3162 }
3163
3164 prunlock(pnp);
3165
3166 if (error == 0) {
3167 if ((error = VOP_ACCESS(xvp, VREAD, 0, cr, NULL)) == 0)
3168 error = fassign(&xvp, FREAD, &n);
3169 if (error) {
3170 VN_RELE(xvp);
3171 } else {
3172 *rvalp = n;
3173 }
3174 }
3175
3176 return (error);
3479 if ((t = tp) == NULL)
3480 t = p->p_tlist;
3481 cur_time = gethrtime_unscaled();
3482 do {
3483 pct += cpu_update_pct(t, cur_time);
3484 if (tp != NULL) /* just do the one lwp */
3485 break;
3486 } while ((t = t->t_forw) != p->p_tlist);
3487
3488 psp->pr_pctcpu = prgetpctcpu(pct);
3489 psp->pr_cpu = (psp->pr_pctcpu*100 + 0x6000) >> 15; /* [0..99] */
3490 if (psp->pr_cpu > 99)
3491 psp->pr_cpu = 99;
3492
3493 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
3494 psp->pr_size = 0;
3495 psp->pr_rssize = 0;
3496 psp->pr_pctmem = 0;
3497 } else {
3498 mutex_exit(&p->p_lock);
3499 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3500 psp->pr_size = btopr(as->a_resvsize);
3501 psp->pr_rssize = rm_asrss(as);
3502 psp->pr_pctmem = rm_pctmemory(as);
3503 AS_LOCK_EXIT(as, &as->a_lock);
3504 mutex_enter(&p->p_lock);
3505 }
3506 }
3507 psp->pr_bysize = ptob(psp->pr_size);
3508 psp->pr_byrssize = ptob(psp->pr_rssize);
3509 }
3510
3511 /*
3512 * Return an array of structures with memory map information.
3513 * We allocate here; the caller must deallocate.
3514 * The caller is also responsible to append the zero-filled entry
3515 * that terminates the PIOCMAP output buffer.
3516 */
3517 static int
3518 oprgetmap(proc_t *p, list_t *iolhead)
3519 {
3520 struct as *as = p->p_as;
3521 prmap_t *mp;
3522 struct seg *seg;
3523 struct seg *brkseg, *stkseg;
3524 uint_t prot;
3525
3526 ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
3527
3528 /*
3529 * Request an initial buffer size that doesn't waste memory
3530 * if the address space has only a small number of segments.
3531 */
3532 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
3533
3534 if ((seg = AS_SEGFIRST(as)) == NULL)
3535 return (0);
3536
3537 brkseg = break_seg(p);
3538 stkseg = as_segat(as, prgetstackbase(p));
3539
3540 do {
3541 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3542 caddr_t saddr, naddr;
3543 void *tmp = NULL;
3544
3545 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3546 prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3566 else if (seg == stkseg)
3567 mp->pr_mflags |= MA_STACK;
3568 mp->pr_pagesize = PAGESIZE;
3569 }
3570 ASSERT(tmp == NULL);
3571 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3572
3573 return (0);
3574 }
3575
3576 #ifdef _SYSCALL32_IMPL
3577 static int
3578 oprgetmap32(proc_t *p, list_t *iolhead)
3579 {
3580 struct as *as = p->p_as;
3581 ioc_prmap32_t *mp;
3582 struct seg *seg;
3583 struct seg *brkseg, *stkseg;
3584 uint_t prot;
3585
3586 ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
3587
3588 /*
3589 * Request an initial buffer size that doesn't waste memory
3590 * if the address space has only a small number of segments.
3591 */
3592 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
3593
3594 if ((seg = AS_SEGFIRST(as)) == NULL)
3595 return (0);
3596
3597 brkseg = break_seg(p);
3598 stkseg = as_segat(as, prgetstackbase(p));
3599
3600 do {
3601 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3602 caddr_t saddr, naddr;
3603 void *tmp = NULL;
3604
3605 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3606 prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3626 else if (seg == stkseg)
3627 mp->pr_mflags |= MA_STACK;
3628 mp->pr_pagesize = PAGESIZE;
3629 }
3630 ASSERT(tmp == NULL);
3631 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3632
3633 return (0);
3634 }
3635 #endif /* _SYSCALL32_IMPL */
3636
3637 /*
3638 * Return the size of the old /proc page data file.
3639 */
3640 size_t
3641 oprpdsize(struct as *as)
3642 {
3643 struct seg *seg;
3644 size_t size;
3645
3646 ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
3647
3648 if ((seg = AS_SEGFIRST(as)) == NULL)
3649 return (0);
3650
3651 size = sizeof (prpageheader_t);
3652 do {
3653 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3654 caddr_t saddr, naddr;
3655 void *tmp = NULL;
3656 size_t npage;
3657
3658 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3659 (void) pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3660 if ((npage = (naddr - saddr) / PAGESIZE) != 0)
3661 size += sizeof (prasmap_t) + roundlong(npage);
3662 }
3663 ASSERT(tmp == NULL);
3664 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3665
3666 return (size);
3667 }
3668
3669 #ifdef _SYSCALL32_IMPL
3670 size_t
3671 oprpdsize32(struct as *as)
3672 {
3673 struct seg *seg;
3674 size_t size;
3675
3676 ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
3677
3678 if ((seg = AS_SEGFIRST(as)) == NULL)
3679 return (0);
3680
3681 size = sizeof (ioc_prpageheader32_t);
3682 do {
3683 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3684 caddr_t saddr, naddr;
3685 void *tmp = NULL;
3686 size_t npage;
3687
3688 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3689 (void) pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3690 if ((npage = (naddr - saddr) / PAGESIZE) != 0)
3691 size += sizeof (ioc_prmap32_t) + round4(npage);
3692 }
3693 ASSERT(tmp == NULL);
3694 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3695
3696 return (size);
3697 }
3698 #endif /* _SYSCALL32_IMPL */
3699
3700 /*
3701 * Read old /proc page data information.
3702 */
3703 int
3704 oprpdread(struct as *as, uint_t hatid, struct uio *uiop)
3705 {
3706 caddr_t buf;
3707 size_t size;
3708 prpageheader_t *php;
3709 prasmap_t *pmp;
3710 struct seg *seg;
3711 int error;
3712
3713 again:
3714 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
3715
3716 if ((seg = AS_SEGFIRST(as)) == NULL) {
3717 AS_LOCK_EXIT(as, &as->a_lock);
3718 return (0);
3719 }
3720 size = oprpdsize(as);
3721 if (uiop->uio_resid < size) {
3722 AS_LOCK_EXIT(as, &as->a_lock);
3723 return (E2BIG);
3724 }
3725
3726 buf = kmem_zalloc(size, KM_SLEEP);
3727 php = (prpageheader_t *)buf;
3728 pmp = (prasmap_t *)(buf + sizeof (prpageheader_t));
3729
3730 hrt2ts(gethrtime(), &php->pr_tstamp);
3731 php->pr_nmap = 0;
3732 php->pr_npage = 0;
3733 do {
3734 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3735 caddr_t saddr, naddr;
3736 void *tmp = NULL;
3737
3738 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3739 size_t len;
3740 size_t npage;
3741 uint_t prot;
3742 uintptr_t next;
3748 next = (uintptr_t)(pmp + 1) + roundlong(npage);
3749 /*
3750 * It's possible that the address space can change
3751 * subtlely even though we're holding as->a_lock
3752 * due to the nondeterminism of page_exists() in
3753 * the presence of asychronously flushed pages or
3754 * mapped files whose sizes are changing.
3755 * page_exists() may be called indirectly from
3756 * pr_getprot() by a SEGOP_INCORE() routine.
3757 * If this happens we need to make sure we don't
3758 * overrun the buffer whose size we computed based
3759 * on the initial iteration through the segments.
3760 * Once we've detected an overflow, we need to clean
3761 * up the temporary memory allocated in pr_getprot()
3762 * and retry. If there's a pending signal, we return
3763 * EINTR so that this thread can be dislodged if
3764 * a latent bug causes us to spin indefinitely.
3765 */
3766 if (next > (uintptr_t)buf + size) {
3767 pr_getprot_done(&tmp);
3768 AS_LOCK_EXIT(as, &as->a_lock);
3769
3770 kmem_free(buf, size);
3771
3772 if (ISSIG(curthread, JUSTLOOKING))
3773 return (EINTR);
3774
3775 goto again;
3776 }
3777
3778 php->pr_nmap++;
3779 php->pr_npage += npage;
3780 pmp->pr_vaddr = saddr;
3781 pmp->pr_npage = npage;
3782 pmp->pr_off = SEGOP_GETOFFSET(seg, saddr);
3783 pmp->pr_mflags = 0;
3784 if (prot & PROT_READ)
3785 pmp->pr_mflags |= MA_READ;
3786 if (prot & PROT_WRITE)
3787 pmp->pr_mflags |= MA_WRITE;
3788 if (prot & PROT_EXEC)
3789 pmp->pr_mflags |= MA_EXEC;
3790 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
3791 pmp->pr_mflags |= MA_SHARED;
3792 pmp->pr_pagesize = PAGESIZE;
3793 hat_getstat(as, saddr, len, hatid,
3794 (char *)(pmp + 1), HAT_SYNC_ZERORM);
3795 pmp = (prasmap_t *)next;
3796 }
3797 ASSERT(tmp == NULL);
3798 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3799
3800 AS_LOCK_EXIT(as, &as->a_lock);
3801
3802 ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
3803 error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
3804 kmem_free(buf, size);
3805
3806 return (error);
3807 }
3808
3809 #ifdef _SYSCALL32_IMPL
3810 int
3811 oprpdread32(struct as *as, uint_t hatid, struct uio *uiop)
3812 {
3813 caddr_t buf;
3814 size_t size;
3815 ioc_prpageheader32_t *php;
3816 ioc_prasmap32_t *pmp;
3817 struct seg *seg;
3818 int error;
3819
3820 again:
3821 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
3822
3823 if ((seg = AS_SEGFIRST(as)) == NULL) {
3824 AS_LOCK_EXIT(as, &as->a_lock);
3825 return (0);
3826 }
3827 size = oprpdsize32(as);
3828 if (uiop->uio_resid < size) {
3829 AS_LOCK_EXIT(as, &as->a_lock);
3830 return (E2BIG);
3831 }
3832
3833 buf = kmem_zalloc(size, KM_SLEEP);
3834 php = (ioc_prpageheader32_t *)buf;
3835 pmp = (ioc_prasmap32_t *)(buf + sizeof (ioc_prpageheader32_t));
3836
3837 hrt2ts32(gethrtime(), &php->pr_tstamp);
3838 php->pr_nmap = 0;
3839 php->pr_npage = 0;
3840 do {
3841 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3842 caddr_t saddr, naddr;
3843 void *tmp = NULL;
3844
3845 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3846 size_t len;
3847 size_t npage;
3848 uint_t prot;
3849 uintptr_t next;
3855 next = (uintptr_t)(pmp + 1) + round4(npage);
3856 /*
3857 * It's possible that the address space can change
3858 * subtlely even though we're holding as->a_lock
3859 * due to the nondeterminism of page_exists() in
3860 * the presence of asychronously flushed pages or
3861 * mapped files whose sizes are changing.
3862 * page_exists() may be called indirectly from
3863 * pr_getprot() by a SEGOP_INCORE() routine.
3864 * If this happens we need to make sure we don't
3865 * overrun the buffer whose size we computed based
3866 * on the initial iteration through the segments.
3867 * Once we've detected an overflow, we need to clean
3868 * up the temporary memory allocated in pr_getprot()
3869 * and retry. If there's a pending signal, we return
3870 * EINTR so that this thread can be dislodged if
3871 * a latent bug causes us to spin indefinitely.
3872 */
3873 if (next > (uintptr_t)buf + size) {
3874 pr_getprot_done(&tmp);
3875 AS_LOCK_EXIT(as, &as->a_lock);
3876
3877 kmem_free(buf, size);
3878
3879 if (ISSIG(curthread, JUSTLOOKING))
3880 return (EINTR);
3881
3882 goto again;
3883 }
3884
3885 php->pr_nmap++;
3886 php->pr_npage += npage;
3887 pmp->pr_vaddr = (uint32_t)(uintptr_t)saddr;
3888 pmp->pr_npage = (uint32_t)npage;
3889 pmp->pr_off = (int32_t)SEGOP_GETOFFSET(seg, saddr);
3890 pmp->pr_mflags = 0;
3891 if (prot & PROT_READ)
3892 pmp->pr_mflags |= MA_READ;
3893 if (prot & PROT_WRITE)
3894 pmp->pr_mflags |= MA_WRITE;
3895 if (prot & PROT_EXEC)
3896 pmp->pr_mflags |= MA_EXEC;
3897 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
3898 pmp->pr_mflags |= MA_SHARED;
3899 pmp->pr_pagesize = PAGESIZE;
3900 hat_getstat(as, saddr, len, hatid,
3901 (char *)(pmp + 1), HAT_SYNC_ZERORM);
3902 pmp = (ioc_prasmap32_t *)next;
3903 }
3904 ASSERT(tmp == NULL);
3905 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3906
3907 AS_LOCK_EXIT(as, &as->a_lock);
3908
3909 ASSERT((uintptr_t)pmp == (uintptr_t)buf + size);
3910 error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
3911 kmem_free(buf, size);
3912
3913 return (error);
3914 }
3915 #endif /* _SYSCALL32_IMPL */
3916
3917 /*ARGSUSED*/
3918 #ifdef _SYSCALL32_IMPL
3919 int
3920 prioctl(
3921 struct vnode *vp,
3922 int cmd,
3923 intptr_t arg,
3924 int flag,
3925 cred_t *cr,
3926 int *rvalp,
3927 caller_context_t *ct)
|
945 sigktou(&t->t_hold, &un.holdmask);
946 prunlock(pnp);
947 if (copyout(&un.holdmask, cmaddr, sizeof (un.holdmask)))
948 error = EFAULT;
949 break;
950
951 case PIOCSHOLD: /* set signal-hold mask */
952 pr_sethold(pnp, &un.holdmask);
953 prunlock(pnp);
954 break;
955
956 case PIOCNMAP: /* get number of memory mappings */
957 {
958 int n;
959 struct as *as = p->p_as;
960
961 if ((p->p_flag & SSYS) || as == &kas)
962 n = 0;
963 else {
964 mutex_exit(&p->p_lock);
965 AS_LOCK_ENTER(as, RW_WRITER);
966 n = prnsegs(as, 0);
967 AS_LOCK_EXIT(as);
968 mutex_enter(&p->p_lock);
969 }
970 prunlock(pnp);
971 if (copyout(&n, cmaddr, sizeof (int)))
972 error = EFAULT;
973 break;
974 }
975
976 case PIOCMAP: /* get memory map information */
977 {
978 list_t iolhead;
979 struct as *as = p->p_as;
980
981 if ((p->p_flag & SSYS) || as == &kas) {
982 error = 0;
983 prunlock(pnp);
984 } else {
985 mutex_exit(&p->p_lock);
986 AS_LOCK_ENTER(as, RW_WRITER);
987 error = oprgetmap(p, &iolhead);
988 AS_LOCK_EXIT(as);
989 mutex_enter(&p->p_lock);
990 prunlock(pnp);
991
992 error = pr_iol_copyout_and_free(&iolhead,
993 &cmaddr, error);
994 }
995 /*
996 * The procfs PIOCMAP ioctl returns an all-zero buffer
997 * to indicate the end of the prmap[] array.
998 * Append it to whatever has already been copied out.
999 */
1000 bzero(&un.prmap, sizeof (un.prmap));
1001 if (!error && copyout(&un.prmap, cmaddr, sizeof (un.prmap)))
1002 error = EFAULT;
1003
1004 break;
1005 }
1006
1007 case PIOCGFAULT: /* get mask of traced faults */
1008 prassignset(&un.fltmask, &p->p_fltmask);
1640 if ((t = tp) == NULL)
1641 t = p->p_tlist;
1642 cur_time = gethrtime_unscaled();
1643 do {
1644 pct += cpu_update_pct(t, cur_time);
1645 if (tp != NULL) /* just do the one lwp */
1646 break;
1647 } while ((t = t->t_forw) != p->p_tlist);
1648
1649 psp->pr_pctcpu = prgetpctcpu(pct);
1650 psp->pr_cpu = (psp->pr_pctcpu*100 + 0x6000) >> 15; /* [0..99] */
1651 if (psp->pr_cpu > 99)
1652 psp->pr_cpu = 99;
1653
1654 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
1655 psp->pr_size = 0;
1656 psp->pr_rssize = 0;
1657 psp->pr_pctmem = 0;
1658 } else {
1659 mutex_exit(&p->p_lock);
1660 AS_LOCK_ENTER(as, RW_READER);
1661 psp->pr_size = (size32_t)btopr(as->a_resvsize);
1662 psp->pr_rssize = (size32_t)rm_asrss(as);
1663 psp->pr_pctmem = rm_pctmemory(as);
1664 AS_LOCK_EXIT(as);
1665 mutex_enter(&p->p_lock);
1666 }
1667 }
1668 psp->pr_bysize = (size32_t)ptob(psp->pr_size);
1669 psp->pr_byrssize = (size32_t)ptob(psp->pr_rssize);
1670
1671 /*
1672 * If we are looking at an LP64 process, zero out
1673 * the fields that cannot be represented in ILP32.
1674 */
1675 if (p->p_model != DATAMODEL_ILP32) {
1676 psp->pr_size = 0;
1677 psp->pr_rssize = 0;
1678 psp->pr_bysize = 0;
1679 psp->pr_byrssize = 0;
1680 psp->pr_argv = 0;
1681 psp->pr_envp = 0;
1682 }
1683 }
1684
2572 sigktou(&t->t_hold, &un32.holdmask);
2573 prunlock(pnp);
2574 if (copyout(&un32.holdmask, cmaddr, sizeof (un32.holdmask)))
2575 error = EFAULT;
2576 break;
2577
2578 case PIOCSHOLD: /* set signal-hold mask */
2579 pr_sethold(pnp, &un32.holdmask);
2580 prunlock(pnp);
2581 break;
2582
2583 case PIOCNMAP: /* get number of memory mappings */
2584 {
2585 int n;
2586 struct as *as = p->p_as;
2587
2588 if ((p->p_flag & SSYS) || as == &kas)
2589 n = 0;
2590 else {
2591 mutex_exit(&p->p_lock);
2592 AS_LOCK_ENTER(as, RW_WRITER);
2593 n = prnsegs(as, 0);
2594 AS_LOCK_EXIT(as);
2595 mutex_enter(&p->p_lock);
2596 }
2597 prunlock(pnp);
2598 if (copyout(&n, cmaddr, sizeof (int)))
2599 error = EFAULT;
2600 break;
2601 }
2602
2603 case PIOCMAP: /* get memory map information */
2604 {
2605 list_t iolhead;
2606 struct as *as = p->p_as;
2607
2608 if ((p->p_flag & SSYS) || as == &kas) {
2609 error = 0;
2610 prunlock(pnp);
2611 } else if (PROCESS_NOT_32BIT(p)) {
2612 error = EOVERFLOW;
2613 prunlock(pnp);
2614 } else {
2615 mutex_exit(&p->p_lock);
2616 AS_LOCK_ENTER(as, RW_WRITER);
2617 error = oprgetmap32(p, &iolhead);
2618 AS_LOCK_EXIT(as);
2619 mutex_enter(&p->p_lock);
2620 prunlock(pnp);
2621
2622 error = pr_iol_copyout_and_free(&iolhead,
2623 &cmaddr, error);
2624 }
2625 /*
2626 * The procfs PIOCMAP ioctl returns an all-zero buffer
2627 * to indicate the end of the prmap[] array.
2628 * Append it to whatever has already been copied out.
2629 */
2630 bzero(&un32.prmap, sizeof (un32.prmap));
2631 if (!error &&
2632 copyout(&un32.prmap, cmaddr, sizeof (un32.prmap)))
2633 error = EFAULT;
2634 break;
2635 }
2636
2637 case PIOCGFAULT: /* get mask of traced faults */
2638 prassignset(&un32.fltmask, &p->p_fltmask);
3125 struct as *as = p->p_as;
3126 int error = 0;
3127 struct seg *seg;
3128 struct vnode *xvp;
3129 int n;
3130
3131 /*
3132 * By fiat, a system process has no address space.
3133 */
3134 if ((p->p_flag & SSYS) || as == &kas) {
3135 error = EINVAL;
3136 } else if (cmaddr) {
3137 /*
3138 * We drop p_lock before grabbing the address
3139 * space lock in order to avoid a deadlock with
3140 * the clock thread. The process will not
3141 * disappear and its address space will not
3142 * change because it is marked P_PR_LOCK.
3143 */
3144 mutex_exit(&p->p_lock);
3145 AS_LOCK_ENTER(as, RW_READER);
3146 seg = as_segat(as, va);
3147 if (seg != NULL &&
3148 seg->s_ops == &segvn_ops &&
3149 SEGOP_GETVP(seg, va, &xvp) == 0 &&
3150 xvp != NULL &&
3151 xvp->v_type == VREG) {
3152 VN_HOLD(xvp);
3153 } else {
3154 error = EINVAL;
3155 }
3156 AS_LOCK_EXIT(as);
3157 mutex_enter(&p->p_lock);
3158 } else if ((xvp = p->p_exec) == NULL) {
3159 error = EINVAL;
3160 } else {
3161 VN_HOLD(xvp);
3162 }
3163
3164 prunlock(pnp);
3165
3166 if (error == 0) {
3167 if ((error = VOP_ACCESS(xvp, VREAD, 0, cr, NULL)) == 0)
3168 error = fassign(&xvp, FREAD, &n);
3169 if (error) {
3170 VN_RELE(xvp);
3171 } else {
3172 *rvalp = n;
3173 }
3174 }
3175
3176 return (error);
3479 if ((t = tp) == NULL)
3480 t = p->p_tlist;
3481 cur_time = gethrtime_unscaled();
3482 do {
3483 pct += cpu_update_pct(t, cur_time);
3484 if (tp != NULL) /* just do the one lwp */
3485 break;
3486 } while ((t = t->t_forw) != p->p_tlist);
3487
3488 psp->pr_pctcpu = prgetpctcpu(pct);
3489 psp->pr_cpu = (psp->pr_pctcpu*100 + 0x6000) >> 15; /* [0..99] */
3490 if (psp->pr_cpu > 99)
3491 psp->pr_cpu = 99;
3492
3493 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
3494 psp->pr_size = 0;
3495 psp->pr_rssize = 0;
3496 psp->pr_pctmem = 0;
3497 } else {
3498 mutex_exit(&p->p_lock);
3499 AS_LOCK_ENTER(as, RW_READER);
3500 psp->pr_size = btopr(as->a_resvsize);
3501 psp->pr_rssize = rm_asrss(as);
3502 psp->pr_pctmem = rm_pctmemory(as);
3503 AS_LOCK_EXIT(as);
3504 mutex_enter(&p->p_lock);
3505 }
3506 }
3507 psp->pr_bysize = ptob(psp->pr_size);
3508 psp->pr_byrssize = ptob(psp->pr_rssize);
3509 }
3510
3511 /*
3512 * Return an array of structures with memory map information.
3513 * We allocate here; the caller must deallocate.
3514 * The caller is also responsible to append the zero-filled entry
3515 * that terminates the PIOCMAP output buffer.
3516 */
3517 static int
3518 oprgetmap(proc_t *p, list_t *iolhead)
3519 {
3520 struct as *as = p->p_as;
3521 prmap_t *mp;
3522 struct seg *seg;
3523 struct seg *brkseg, *stkseg;
3524 uint_t prot;
3525
3526 ASSERT(as != &kas && AS_WRITE_HELD(as));
3527
3528 /*
3529 * Request an initial buffer size that doesn't waste memory
3530 * if the address space has only a small number of segments.
3531 */
3532 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
3533
3534 if ((seg = AS_SEGFIRST(as)) == NULL)
3535 return (0);
3536
3537 brkseg = break_seg(p);
3538 stkseg = as_segat(as, prgetstackbase(p));
3539
3540 do {
3541 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3542 caddr_t saddr, naddr;
3543 void *tmp = NULL;
3544
3545 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3546 prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3566 else if (seg == stkseg)
3567 mp->pr_mflags |= MA_STACK;
3568 mp->pr_pagesize = PAGESIZE;
3569 }
3570 ASSERT(tmp == NULL);
3571 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3572
3573 return (0);
3574 }
3575
3576 #ifdef _SYSCALL32_IMPL
3577 static int
3578 oprgetmap32(proc_t *p, list_t *iolhead)
3579 {
3580 struct as *as = p->p_as;
3581 ioc_prmap32_t *mp;
3582 struct seg *seg;
3583 struct seg *brkseg, *stkseg;
3584 uint_t prot;
3585
3586 ASSERT(as != &kas && AS_WRITE_HELD(as));
3587
3588 /*
3589 * Request an initial buffer size that doesn't waste memory
3590 * if the address space has only a small number of segments.
3591 */
3592 pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
3593
3594 if ((seg = AS_SEGFIRST(as)) == NULL)
3595 return (0);
3596
3597 brkseg = break_seg(p);
3598 stkseg = as_segat(as, prgetstackbase(p));
3599
3600 do {
3601 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3602 caddr_t saddr, naddr;
3603 void *tmp = NULL;
3604
3605 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3606 prot = pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3626 else if (seg == stkseg)
3627 mp->pr_mflags |= MA_STACK;
3628 mp->pr_pagesize = PAGESIZE;
3629 }
3630 ASSERT(tmp == NULL);
3631 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3632
3633 return (0);
3634 }
3635 #endif /* _SYSCALL32_IMPL */
3636
3637 /*
3638 * Return the size of the old /proc page data file.
3639 */
3640 size_t
3641 oprpdsize(struct as *as)
3642 {
3643 struct seg *seg;
3644 size_t size;
3645
3646 ASSERT(as != &kas && AS_WRITE_HELD(as));
3647
3648 if ((seg = AS_SEGFIRST(as)) == NULL)
3649 return (0);
3650
3651 size = sizeof (prpageheader_t);
3652 do {
3653 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3654 caddr_t saddr, naddr;
3655 void *tmp = NULL;
3656 size_t npage;
3657
3658 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3659 (void) pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3660 if ((npage = (naddr - saddr) / PAGESIZE) != 0)
3661 size += sizeof (prasmap_t) + roundlong(npage);
3662 }
3663 ASSERT(tmp == NULL);
3664 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3665
3666 return (size);
3667 }
3668
3669 #ifdef _SYSCALL32_IMPL
3670 size_t
3671 oprpdsize32(struct as *as)
3672 {
3673 struct seg *seg;
3674 size_t size;
3675
3676 ASSERT(as != &kas && AS_WRITE_HELD(as));
3677
3678 if ((seg = AS_SEGFIRST(as)) == NULL)
3679 return (0);
3680
3681 size = sizeof (ioc_prpageheader32_t);
3682 do {
3683 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3684 caddr_t saddr, naddr;
3685 void *tmp = NULL;
3686 size_t npage;
3687
3688 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3689 (void) pr_getprot(seg, 0, &tmp, &saddr, &naddr, eaddr);
3690 if ((npage = (naddr - saddr) / PAGESIZE) != 0)
3691 size += sizeof (ioc_prmap32_t) + round4(npage);
3692 }
3693 ASSERT(tmp == NULL);
3694 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3695
3696 return (size);
3697 }
3698 #endif /* _SYSCALL32_IMPL */
3699
3700 /*
3701 * Read old /proc page data information.
3702 */
3703 int
3704 oprpdread(struct as *as, uint_t hatid, struct uio *uiop)
3705 {
3706 caddr_t buf;
3707 size_t size;
3708 prpageheader_t *php;
3709 prasmap_t *pmp;
3710 struct seg *seg;
3711 int error;
3712
3713 again:
3714 AS_LOCK_ENTER(as, RW_WRITER);
3715
3716 if ((seg = AS_SEGFIRST(as)) == NULL) {
3717 AS_LOCK_EXIT(as);
3718 return (0);
3719 }
3720 size = oprpdsize(as);
3721 if (uiop->uio_resid < size) {
3722 AS_LOCK_EXIT(as);
3723 return (E2BIG);
3724 }
3725
3726 buf = kmem_zalloc(size, KM_SLEEP);
3727 php = (prpageheader_t *)buf;
3728 pmp = (prasmap_t *)(buf + sizeof (prpageheader_t));
3729
3730 hrt2ts(gethrtime(), &php->pr_tstamp);
3731 php->pr_nmap = 0;
3732 php->pr_npage = 0;
3733 do {
3734 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3735 caddr_t saddr, naddr;
3736 void *tmp = NULL;
3737
3738 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3739 size_t len;
3740 size_t npage;
3741 uint_t prot;
3742 uintptr_t next;
3748 next = (uintptr_t)(pmp + 1) + roundlong(npage);
3749 /*
3750 * It's possible that the address space can change
3751 * subtlely even though we're holding as->a_lock
3752 * due to the nondeterminism of page_exists() in
3753 * the presence of asychronously flushed pages or
3754 * mapped files whose sizes are changing.
3755 * page_exists() may be called indirectly from
3756 * pr_getprot() by a SEGOP_INCORE() routine.
3757 * If this happens we need to make sure we don't
3758 * overrun the buffer whose size we computed based
3759 * on the initial iteration through the segments.
3760 * Once we've detected an overflow, we need to clean
3761 * up the temporary memory allocated in pr_getprot()
3762 * and retry. If there's a pending signal, we return
3763 * EINTR so that this thread can be dislodged if
3764 * a latent bug causes us to spin indefinitely.
3765 */
3766 if (next > (uintptr_t)buf + size) {
3767 pr_getprot_done(&tmp);
3768 AS_LOCK_EXIT(as);
3769
3770 kmem_free(buf, size);
3771
3772 if (ISSIG(curthread, JUSTLOOKING))
3773 return (EINTR);
3774
3775 goto again;
3776 }
3777
3778 php->pr_nmap++;
3779 php->pr_npage += npage;
3780 pmp->pr_vaddr = saddr;
3781 pmp->pr_npage = npage;
3782 pmp->pr_off = SEGOP_GETOFFSET(seg, saddr);
3783 pmp->pr_mflags = 0;
3784 if (prot & PROT_READ)
3785 pmp->pr_mflags |= MA_READ;
3786 if (prot & PROT_WRITE)
3787 pmp->pr_mflags |= MA_WRITE;
3788 if (prot & PROT_EXEC)
3789 pmp->pr_mflags |= MA_EXEC;
3790 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
3791 pmp->pr_mflags |= MA_SHARED;
3792 pmp->pr_pagesize = PAGESIZE;
3793 hat_getstat(as, saddr, len, hatid,
3794 (char *)(pmp + 1), HAT_SYNC_ZERORM);
3795 pmp = (prasmap_t *)next;
3796 }
3797 ASSERT(tmp == NULL);
3798 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3799
3800 AS_LOCK_EXIT(as);
3801
3802 ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
3803 error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
3804 kmem_free(buf, size);
3805
3806 return (error);
3807 }
3808
3809 #ifdef _SYSCALL32_IMPL
3810 int
3811 oprpdread32(struct as *as, uint_t hatid, struct uio *uiop)
3812 {
3813 caddr_t buf;
3814 size_t size;
3815 ioc_prpageheader32_t *php;
3816 ioc_prasmap32_t *pmp;
3817 struct seg *seg;
3818 int error;
3819
3820 again:
3821 AS_LOCK_ENTER(as, RW_WRITER);
3822
3823 if ((seg = AS_SEGFIRST(as)) == NULL) {
3824 AS_LOCK_EXIT(as);
3825 return (0);
3826 }
3827 size = oprpdsize32(as);
3828 if (uiop->uio_resid < size) {
3829 AS_LOCK_EXIT(as);
3830 return (E2BIG);
3831 }
3832
3833 buf = kmem_zalloc(size, KM_SLEEP);
3834 php = (ioc_prpageheader32_t *)buf;
3835 pmp = (ioc_prasmap32_t *)(buf + sizeof (ioc_prpageheader32_t));
3836
3837 hrt2ts32(gethrtime(), &php->pr_tstamp);
3838 php->pr_nmap = 0;
3839 php->pr_npage = 0;
3840 do {
3841 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
3842 caddr_t saddr, naddr;
3843 void *tmp = NULL;
3844
3845 for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
3846 size_t len;
3847 size_t npage;
3848 uint_t prot;
3849 uintptr_t next;
3855 next = (uintptr_t)(pmp + 1) + round4(npage);
3856 /*
3857 * It's possible that the address space can change
3858 * subtlely even though we're holding as->a_lock
3859 * due to the nondeterminism of page_exists() in
3860 * the presence of asychronously flushed pages or
3861 * mapped files whose sizes are changing.
3862 * page_exists() may be called indirectly from
3863 * pr_getprot() by a SEGOP_INCORE() routine.
3864 * If this happens we need to make sure we don't
3865 * overrun the buffer whose size we computed based
3866 * on the initial iteration through the segments.
3867 * Once we've detected an overflow, we need to clean
3868 * up the temporary memory allocated in pr_getprot()
3869 * and retry. If there's a pending signal, we return
3870 * EINTR so that this thread can be dislodged if
3871 * a latent bug causes us to spin indefinitely.
3872 */
3873 if (next > (uintptr_t)buf + size) {
3874 pr_getprot_done(&tmp);
3875 AS_LOCK_EXIT(as);
3876
3877 kmem_free(buf, size);
3878
3879 if (ISSIG(curthread, JUSTLOOKING))
3880 return (EINTR);
3881
3882 goto again;
3883 }
3884
3885 php->pr_nmap++;
3886 php->pr_npage += npage;
3887 pmp->pr_vaddr = (uint32_t)(uintptr_t)saddr;
3888 pmp->pr_npage = (uint32_t)npage;
3889 pmp->pr_off = (int32_t)SEGOP_GETOFFSET(seg, saddr);
3890 pmp->pr_mflags = 0;
3891 if (prot & PROT_READ)
3892 pmp->pr_mflags |= MA_READ;
3893 if (prot & PROT_WRITE)
3894 pmp->pr_mflags |= MA_WRITE;
3895 if (prot & PROT_EXEC)
3896 pmp->pr_mflags |= MA_EXEC;
3897 if (SEGOP_GETTYPE(seg, saddr) & MAP_SHARED)
3898 pmp->pr_mflags |= MA_SHARED;
3899 pmp->pr_pagesize = PAGESIZE;
3900 hat_getstat(as, saddr, len, hatid,
3901 (char *)(pmp + 1), HAT_SYNC_ZERORM);
3902 pmp = (ioc_prasmap32_t *)next;
3903 }
3904 ASSERT(tmp == NULL);
3905 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
3906
3907 AS_LOCK_EXIT(as);
3908
3909 ASSERT((uintptr_t)pmp == (uintptr_t)buf + size);
3910 error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
3911 kmem_free(buf, size);
3912
3913 return (error);
3914 }
3915 #endif /* _SYSCALL32_IMPL */
3916
3917 /*ARGSUSED*/
3918 #ifdef _SYSCALL32_IMPL
3919 int
3920 prioctl(
3921 struct vnode *vp,
3922 int cmd,
3923 intptr_t arg,
3924 int flag,
3925 cred_t *cr,
3926 int *rvalp,
3927 caller_context_t *ct)
|