2756 * We're supposed to both update and age the load.
2757 * This happens 10 times/sec. per cpu. We do a
2758 * little hoop-jumping to avoid integer overflow.
2759 */
2760 int64_t q, r;
2761
2762 do {
2763 old = new = lpl->lpl_loadavg;
2764 q = (old >> 16) << 7;
2765 r = (old & 0xffff) << 7;
2766 new += ((long long)(nrcpus - q) * f -
2767 ((r * f) >> 16)) >> 7;
2768
2769 /*
2770 * Check for overflow
2771 */
2772 if (new > LGRP_LOADAVG_MAX)
2773 new = LGRP_LOADAVG_MAX;
2774 else if (new < 0)
2775 new = 0;
2776 } while (cas32((lgrp_load_t *)&lpl->lpl_loadavg, old,
2777 new) != old);
2778 } else {
2779 /*
2780 * We're supposed to update the load, but not age it.
2781 * This option is used to update the load (which either
2782 * has already been aged in this 1/10 sec. interval or
2783 * soon will be) to account for a remotely executing
2784 * thread.
2785 */
2786 do {
2787 old = new = lpl->lpl_loadavg;
2788 new += f;
2789 /*
2790 * Check for overflow
2791 * Underflow not possible here
2792 */
2793 if (new < old)
2794 new = LGRP_LOADAVG_MAX;
2795 } while (cas32((lgrp_load_t *)&lpl->lpl_loadavg, old,
2796 new) != old);
2797 }
2798
2799 /*
2800 * Do the same for this lpl's parent
2801 */
2802 if ((lpl = lpl->lpl_parent) == NULL)
2803 break;
2804 ncpu = lpl->lpl_ncpu;
2805 }
2806 }
2807
2808 /*
2809 * Initialize lpl topology in the target based on topology currently present in
2810 * lpl_bootstrap.
2811 *
2812 * lpl_topo_bootstrap is only called once from cpupart_initialize_default() to
2813 * initialize cp_default list of lpls. Up to this point all topology operations
2814 * were performed using lpl_bootstrap. Now cp_default has its own list of lpls
2815 * and all subsequent lpl operations should use it instead of lpl_bootstrap. The
2816 * `target' points to the list of lpls in cp_default and `size' is the size of
3337 * short amount of time that the anticipatory load that was
3338 * added on its behalf has aged very little, remove that
3339 * anticipatory load.
3340 */
3341 if ((t->t_anttime + lgrp_min_nsec > gethrtime()) &&
3342 ((ncpu = oldlpl->lpl_ncpu) > 0)) {
3343 lpl = oldlpl;
3344 for (;;) {
3345 do {
3346 old = new = lpl->lpl_loadavg;
3347 new -= LGRP_LOADAVG_MAX_EFFECT(ncpu);
3348 if (new > old) {
3349 /*
3350 * this can happen if the load
3351 * average was aged since we
3352 * added in the anticipatory
3353 * load
3354 */
3355 new = 0;
3356 }
3357 } while (cas32(
3358 (lgrp_load_t *)&lpl->lpl_loadavg, old,
3359 new) != old);
3360
3361 lpl = lpl->lpl_parent;
3362 if (lpl == NULL)
3363 break;
3364
3365 ncpu = lpl->lpl_ncpu;
3366 ASSERT(ncpu > 0);
3367 }
3368 }
3369 }
3370 /*
3371 * If the thread has a new lgroup (i.e. it's not exiting), update its
3372 * t_lpl and its process' p_lgrpset, and apply an anticipatory load
3373 * to its new lgroup to account for its move to its new lgroup.
3374 */
3375 if (newlpl != NULL) {
3376 /*
3377 * This thread is moving to a new lgroup
3406 * will generate on its new lgroup. The goal is to
3407 * make the lgroup's load appear as though the thread
3408 * had been there all along. We're very conservative
3409 * in calculating this anticipatory load, we assume
3410 * the worst case case (100% CPU-bound thread). This
3411 * may be modified in the future to be more accurate.
3412 */
3413 lpl = newlpl;
3414 for (;;) {
3415 ncpu = lpl->lpl_ncpu;
3416 ASSERT(ncpu > 0);
3417 do {
3418 old = new = lpl->lpl_loadavg;
3419 new += LGRP_LOADAVG_MAX_EFFECT(ncpu);
3420 /*
3421 * Check for overflow
3422 * Underflow not possible here
3423 */
3424 if (new < old)
3425 new = UINT32_MAX;
3426 } while (cas32((lgrp_load_t *)&lpl->lpl_loadavg, old,
3427 new) != old);
3428
3429 lpl = lpl->lpl_parent;
3430 if (lpl == NULL)
3431 break;
3432 }
3433 t->t_anttime = gethrtime();
3434 }
3435 }
3436
3437 /*
3438 * Return lgroup memory allocation policy given advice from madvise(3C)
3439 */
3440 lgrp_mem_policy_t
3441 lgrp_madv_to_policy(uchar_t advice, size_t size, int type)
3442 {
3443 switch (advice) {
3444 case MADV_ACCESS_LWP:
3445 return (LGRP_MEM_POLICY_NEXT);
3446 case MADV_ACCESS_MANY:
3447 return (LGRP_MEM_POLICY_RANDOM);
|
2756 * We're supposed to both update and age the load.
2757 * This happens 10 times/sec. per cpu. We do a
2758 * little hoop-jumping to avoid integer overflow.
2759 */
2760 int64_t q, r;
2761
2762 do {
2763 old = new = lpl->lpl_loadavg;
2764 q = (old >> 16) << 7;
2765 r = (old & 0xffff) << 7;
2766 new += ((long long)(nrcpus - q) * f -
2767 ((r * f) >> 16)) >> 7;
2768
2769 /*
2770 * Check for overflow
2771 */
2772 if (new > LGRP_LOADAVG_MAX)
2773 new = LGRP_LOADAVG_MAX;
2774 else if (new < 0)
2775 new = 0;
2776 } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
2777 old, new) != old);
2778 } else {
2779 /*
2780 * We're supposed to update the load, but not age it.
2781 * This option is used to update the load (which either
2782 * has already been aged in this 1/10 sec. interval or
2783 * soon will be) to account for a remotely executing
2784 * thread.
2785 */
2786 do {
2787 old = new = lpl->lpl_loadavg;
2788 new += f;
2789 /*
2790 * Check for overflow
2791 * Underflow not possible here
2792 */
2793 if (new < old)
2794 new = LGRP_LOADAVG_MAX;
2795 } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
2796 old, new) != old);
2797 }
2798
2799 /*
2800 * Do the same for this lpl's parent
2801 */
2802 if ((lpl = lpl->lpl_parent) == NULL)
2803 break;
2804 ncpu = lpl->lpl_ncpu;
2805 }
2806 }
2807
2808 /*
2809 * Initialize lpl topology in the target based on topology currently present in
2810 * lpl_bootstrap.
2811 *
2812 * lpl_topo_bootstrap is only called once from cpupart_initialize_default() to
2813 * initialize cp_default list of lpls. Up to this point all topology operations
2814 * were performed using lpl_bootstrap. Now cp_default has its own list of lpls
2815 * and all subsequent lpl operations should use it instead of lpl_bootstrap. The
2816 * `target' points to the list of lpls in cp_default and `size' is the size of
3337 * short amount of time that the anticipatory load that was
3338 * added on its behalf has aged very little, remove that
3339 * anticipatory load.
3340 */
3341 if ((t->t_anttime + lgrp_min_nsec > gethrtime()) &&
3342 ((ncpu = oldlpl->lpl_ncpu) > 0)) {
3343 lpl = oldlpl;
3344 for (;;) {
3345 do {
3346 old = new = lpl->lpl_loadavg;
3347 new -= LGRP_LOADAVG_MAX_EFFECT(ncpu);
3348 if (new > old) {
3349 /*
3350 * this can happen if the load
3351 * average was aged since we
3352 * added in the anticipatory
3353 * load
3354 */
3355 new = 0;
3356 }
3357 } while (atomic_cas_32(
3358 (lgrp_load_t *)&lpl->lpl_loadavg, old,
3359 new) != old);
3360
3361 lpl = lpl->lpl_parent;
3362 if (lpl == NULL)
3363 break;
3364
3365 ncpu = lpl->lpl_ncpu;
3366 ASSERT(ncpu > 0);
3367 }
3368 }
3369 }
3370 /*
3371 * If the thread has a new lgroup (i.e. it's not exiting), update its
3372 * t_lpl and its process' p_lgrpset, and apply an anticipatory load
3373 * to its new lgroup to account for its move to its new lgroup.
3374 */
3375 if (newlpl != NULL) {
3376 /*
3377 * This thread is moving to a new lgroup
3406 * will generate on its new lgroup. The goal is to
3407 * make the lgroup's load appear as though the thread
3408 * had been there all along. We're very conservative
3409 * in calculating this anticipatory load, we assume
3410 * the worst case case (100% CPU-bound thread). This
3411 * may be modified in the future to be more accurate.
3412 */
3413 lpl = newlpl;
3414 for (;;) {
3415 ncpu = lpl->lpl_ncpu;
3416 ASSERT(ncpu > 0);
3417 do {
3418 old = new = lpl->lpl_loadavg;
3419 new += LGRP_LOADAVG_MAX_EFFECT(ncpu);
3420 /*
3421 * Check for overflow
3422 * Underflow not possible here
3423 */
3424 if (new < old)
3425 new = UINT32_MAX;
3426 } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
3427 old, new) != old);
3428
3429 lpl = lpl->lpl_parent;
3430 if (lpl == NULL)
3431 break;
3432 }
3433 t->t_anttime = gethrtime();
3434 }
3435 }
3436
3437 /*
3438 * Return lgroup memory allocation policy given advice from madvise(3C)
3439 */
3440 lgrp_mem_policy_t
3441 lgrp_madv_to_policy(uchar_t advice, size_t size, int type)
3442 {
3443 switch (advice) {
3444 case MADV_ACCESS_LWP:
3445 return (LGRP_MEM_POLICY_NEXT);
3446 case MADV_ACCESS_MANY:
3447 return (LGRP_MEM_POLICY_RANDOM);
|