Print this page
6138 don't abuse atomic_cas_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/os/memnode.c
          +++ new/usr/src/uts/i86pc/os/memnode.c
↓ open down ↓ 63 lines elided ↑ open up ↑
  64   64   * It is rather tricky to do these updates since we can't
  65   65   * protect the memnode structures with locks, so we must
  66   66   * be mindful of the order in which updates and reads to
  67   67   * these values can occur.
  68   68   */
  69   69  
  70   70  void
  71   71  mem_node_add_slice(pfn_t start, pfn_t end)
  72   72  {
  73   73          int mnode;
  74      -        mnodeset_t newmask, oldmask;
  75   74  
  76   75          /*
  77   76           * DR will pass us the first pfn that is allocatable.
  78   77           * We need to round down to get the real start of
  79   78           * the slice.
  80   79           */
  81   80          if (mem_node_physalign) {
  82   81                  start &= ~(btop(mem_node_physalign) - 1);
  83   82                  end = roundup(end, btop(mem_node_physalign)) - 1;
  84   83          }
↓ open down ↓ 6 lines elided ↑ open up ↑
  91   90                   * Add slice to existing node.
  92   91                   */
  93   92                  if (start < mem_node_config[mnode].physbase)
  94   93                          mem_node_config[mnode].physbase = start;
  95   94                  if (end > mem_node_config[mnode].physmax)
  96   95                          mem_node_config[mnode].physmax = end;
  97   96          } else {
  98   97                  mem_node_config[mnode].physbase = start;
  99   98                  mem_node_config[mnode].physmax = end;
 100   99                  atomic_inc_16(&num_memnodes);
 101      -                do {
 102      -                        oldmask = memnodes_mask;
 103      -                        newmask = memnodes_mask | (1ull << mnode);
 104      -                } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
 105      -                    oldmask);
      100 +                atomic_or_64(&memnodes_mask, 1ull << mnode);
 106  101          }
 107  102  
 108  103          /*
 109  104           * Inform the common lgrp framework about the new memory
 110  105           */
 111  106          lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
 112  107  }
 113  108  
 114  109  /*
 115  110   * Remove a PFN range from a memnode.  On some platforms,
 116  111   * the memnode will be created with physbase at the first
 117  112   * allocatable PFN, but later deleted with the MC slice
 118  113   * base address converted to a PFN, in which case we need
 119  114   * to assume physbase and up.
 120  115   */
 121  116  void
 122  117  mem_node_del_slice(pfn_t start, pfn_t end)
 123  118  {
 124  119          int mnode;
 125  120          pgcnt_t delta_pgcnt, node_size;
 126      -        mnodeset_t omask, nmask;
 127  121  
 128  122          if (mem_node_physalign) {
 129  123                  start &= ~(btop(mem_node_physalign) - 1);
 130  124                  end = roundup(end, btop(mem_node_physalign)) - 1;
 131  125          }
 132  126          mnode = PFN_2_MEM_NODE(start);
 133  127  
 134  128          ASSERT(mnode >= 0 && mnode < max_mem_nodes);
 135  129          ASSERT(mem_node_config[mnode].exists == 1);
 136  130  
↓ open down ↓ 15 lines elided ↑ open up ↑
 152  146                   * Let the common lgrp framework know this mnode is
 153  147                   * leaving
 154  148                   */
 155  149                  lgrp_config(LGRP_CONFIG_MEM_DEL,
 156  150                      mnode, MEM_NODE_2_LGRPHAND(mnode));
 157  151  
 158  152                  /*
 159  153                   * Delete the whole node.
 160  154                   */
 161  155                  ASSERT(MNODE_PGCNT(mnode) == 0);
 162      -                do {
 163      -                        omask = memnodes_mask;
 164      -                        nmask = omask & ~(1ull << mnode);
 165      -                } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
      156 +                atomic_and_64(&memnodes_mask, ~(1ull << mnode));
 166  157                  atomic_dec_16(&num_memnodes);
 167  158                  mem_node_config[mnode].exists = 0;
 168  159          }
 169  160  }
 170  161  
 171  162  void
 172  163  mem_node_add_range(pfn_t start, pfn_t end)
 173  164  {
 174  165          if (&plat_slice_add)
 175  166                  plat_slice_add(start, end);
↓ open down ↓ 40 lines elided ↑ open up ↑
 216  207          }
 217  208  }
 218  209  
 219  210  /*
 220  211   * Allocate an unassigned memnode.
 221  212   */
 222  213  int
 223  214  mem_node_alloc()
 224  215  {
 225  216          int mnode;
 226      -        mnodeset_t newmask, oldmask;
 227  217  
 228  218          /*
 229  219           * Find an unused memnode.  Update it atomically to prevent
 230  220           * a first time memnode creation race.
 231  221           */
 232  222          for (mnode = 0; mnode < max_mem_nodes; mnode++)
 233  223                  if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
 234  224                      0, 1) == 0)
 235  225                          break;
 236  226  
 237  227          if (mnode >= max_mem_nodes)
 238  228                  panic("Out of free memnodes\n");
 239  229  
 240  230          mem_node_config[mnode].physbase = (pfn_t)-1l;
 241  231          mem_node_config[mnode].physmax = 0;
 242  232          atomic_inc_16(&num_memnodes);
 243      -        do {
 244      -                oldmask = memnodes_mask;
 245      -                newmask = memnodes_mask | (1ull << mnode);
 246      -        } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
      233 +        atomic_or_64(&memnodes_mask, 1ull << mnode);
 247  234  
 248  235          return (mnode);
 249  236  }
 250  237  
 251  238  /*
 252  239   * Find the intersection between a memnode and a memlist
 253  240   * and returns the number of pages that overlap.
 254  241   *
 255  242   * Assumes the list is protected from DR operations by
 256  243   * the memlist lock.
↓ open down ↓ 28 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX