Print this page
patch fix-bad-code
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/zone.c
          +++ new/usr/src/uts/common/os/zone.c
↓ open down ↓ 5601 lines elided ↑ open up ↑
5602 5602  static int
5603 5603  as_can_change_zones(void)
5604 5604  {
5605 5605          proc_t *pp = curproc;
5606 5606          struct seg *seg;
5607 5607          struct as *as = pp->p_as;
5608 5608          vnode_t *vp;
5609 5609          int allow = 1;
5610 5610  
5611 5611          ASSERT(pp->p_as != &kas);
5612      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     5612 +        AS_LOCK_ENTER(as, RW_READER);
5613 5613          for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
5614 5614  
5615 5615                  /*
5616 5616                   * Cannot enter zone with shared anon memory which
5617 5617                   * reserves swap.  See comment above.
5618 5618                   */
5619 5619                  if (seg_can_change_zones(seg) == B_FALSE) {
5620 5620                          allow = 0;
5621 5621                          break;
5622 5622                  }
↓ open down ↓ 2 lines elided ↑ open up ↑
5625 5625                   * it.
5626 5626                   */
5627 5627                  vp = NULL;
5628 5628                  if (SEGOP_GETVP(seg, seg->s_base, &vp) != 0 || vp == NULL)
5629 5629                          continue;
5630 5630                  if (!vn_can_change_zones(vp)) { /* bail on first match */
5631 5631                          allow = 0;
5632 5632                          break;
5633 5633                  }
5634 5634          }
5635      -        AS_LOCK_EXIT(as, &as->a_lock);
     5635 +        AS_LOCK_EXIT(as);
5636 5636          return (allow);
5637 5637  }
5638 5638  
5639 5639  /*
5640 5640   * Count swap reserved by curproc's address space
5641 5641   */
5642 5642  static size_t
5643 5643  as_swresv(void)
5644 5644  {
5645 5645          proc_t *pp = curproc;
5646 5646          struct seg *seg;
5647 5647          struct as *as = pp->p_as;
5648 5648          size_t swap = 0;
5649 5649  
5650 5650          ASSERT(pp->p_as != &kas);
5651      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     5651 +        ASSERT(AS_WRITE_HELD(as));
5652 5652          for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg))
5653 5653                  swap += seg_swresv(seg);
5654 5654  
5655 5655          return (swap);
5656 5656  }
5657 5657  
5658 5658  /*
5659 5659   * Systemcall entry point for zone_enter().
5660 5660   *
5661 5661   * The current process is injected into said zone.  In the process
↓ open down ↓ 184 lines elided ↑ open up ↑
5846 5846                  goto out;
5847 5847          }
5848 5848  
5849 5849          /*
5850 5850           * a_lock must be held while transfering locked memory and swap
5851 5851           * reservation from the global zone to the non global zone because
5852 5852           * asynchronous faults on the processes' address space can lock
5853 5853           * memory and reserve swap via MCL_FUTURE and MAP_NORESERVE
5854 5854           * segments respectively.
5855 5855           */
5856      -        AS_LOCK_ENTER(pp->as, &pp->p_as->a_lock, RW_WRITER);
     5856 +        AS_LOCK_ENTER(pp->p_as, RW_WRITER);
5857 5857          swap = as_swresv();
5858 5858          mutex_enter(&pp->p_lock);
5859 5859          zone_proj0 = zone->zone_zsched->p_task->tk_proj;
5860 5860          /* verify that we do not exceed and task or lwp limits */
5861 5861          mutex_enter(&zone->zone_nlwps_lock);
5862 5862          /* add new lwps to zone and zone's proj0 */
5863 5863          zone_proj0->kpj_nlwps += pp->p_lwpcnt;
5864 5864          zone->zone_nlwps += pp->p_lwpcnt;
5865 5865          /* add 1 task to zone's proj0 */
5866 5866          zone_proj0->kpj_ntasks += 1;
↓ open down ↓ 26 lines elided ↑ open up ↑
5893 5893          pp->p_zone->zone_max_swap -= swap;
5894 5894          mutex_exit(&pp->p_zone->zone_mem_lock);
5895 5895  
5896 5896          mutex_enter(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
5897 5897          pp->p_task->tk_proj->kpj_data.kpd_crypto_mem -= pp->p_crypto_mem;
5898 5898          mutex_exit(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
5899 5899  
5900 5900          pp->p_flag |= SZONETOP;
5901 5901          pp->p_zone = zone;
5902 5902          mutex_exit(&pp->p_lock);
5903      -        AS_LOCK_EXIT(pp->p_as, &pp->p_as->a_lock);
     5903 +        AS_LOCK_EXIT(pp->p_as);
5904 5904  
5905 5905          /*
5906 5906           * Joining the zone cannot fail from now on.
5907 5907           *
5908 5908           * This means that a lot of the following code can be commonized and
5909 5909           * shared with zsched().
5910 5910           */
5911 5911  
5912 5912          /*
5913 5913           * If the process contract fmri was inherited, we need to
↓ open down ↓ 1261 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX