Print this page
6138 don't abuse atomic_cas_*


 871                 alloc_base += (sizeof (kmutex_t) * max_mem_nodes);
 872                 cpc_mutex[i] = (kmutex_t *)alloc_base;
 873                 alloc_base += (sizeof (kmutex_t) * max_mem_nodes);
 874         }
 875         return (0);
 876 }
 877 
 878 /*
 879  * To select our starting bin, we stride through the bins with a stride
 880  * of 337.  Why 337?  It's prime, it's largeish, and it performs well both
 881  * in simulation and practice for different workloads on varying cache sizes.
 882  */
 883 uint32_t color_start_current = 0;
 884 uint32_t color_start_stride = 337;
 885 int color_start_random = 0;
 886 
 887 /* ARGSUSED */
 888 uint_t
 889 get_color_start(struct as *as)
 890 {
 891         uint32_t old, new;
 892 
 893         if (consistent_coloring == 2 || color_start_random) {
 894                 return ((uint_t)(((gettick()) << (vac_shift - MMU_PAGESHIFT)) &
 895                     (hw_page_array[0].hp_colors - 1)));
 896         }
 897 
 898         do {
 899                 old = color_start_current;
 900                 new = old + (color_start_stride << (vac_shift - MMU_PAGESHIFT));
 901         } while (atomic_cas_32(&color_start_current, old, new) != old);
 902 
 903         return ((uint_t)(new));
 904 }
 905 
 906 /*
 907  * Called once at startup from kphysm_init() -- before memialloc()
 908  * is invoked to do the 1st page_free()/page_freelist_add().
 909  *
 910  * initializes page_colors and page_colors_mask based on ecache_setsize.
 911  *
 912  * Also initializes the counter locks.
 913  */
 914 void
 915 page_coloring_init()
 916 {
 917         int     a, i;
 918         uint_t colors;
 919 
 920         if (do_pg_coloring == 0) {
 921                 page_colors = 1;
 922                 for (i = 0; i < mmu_page_sizes; i++) {
 923                         colorequivszc[i] = 0;




 871                 alloc_base += (sizeof (kmutex_t) * max_mem_nodes);
 872                 cpc_mutex[i] = (kmutex_t *)alloc_base;
 873                 alloc_base += (sizeof (kmutex_t) * max_mem_nodes);
 874         }
 875         return (0);
 876 }
 877 
 878 /*
 879  * To select our starting bin, we stride through the bins with a stride
 880  * of 337.  Why 337?  It's prime, it's largeish, and it performs well both
 881  * in simulation and practice for different workloads on varying cache sizes.
 882  */
 883 uint32_t color_start_current = 0;
 884 uint32_t color_start_stride = 337;
 885 int color_start_random = 0;
 886 
 887 /* ARGSUSED */
 888 uint_t
 889 get_color_start(struct as *as)
 890 {


 891         if (consistent_coloring == 2 || color_start_random) {
 892                 return ((uint_t)(((gettick()) << (vac_shift - MMU_PAGESHIFT)) &
 893                     (hw_page_array[0].hp_colors - 1)));
 894         }
 895 
 896         return ((uint_t)atomic_add_32_nv(&color_start_current,
 897             color_start_stride << (vac_shift - MMU_PAGESHIFT)));




 898 }
 899 
 900 /*
 901  * Called once at startup from kphysm_init() -- before memialloc()
 902  * is invoked to do the 1st page_free()/page_freelist_add().
 903  *
 904  * initializes page_colors and page_colors_mask based on ecache_setsize.
 905  *
 906  * Also initializes the counter locks.
 907  */
 908 void
 909 page_coloring_init()
 910 {
 911         int     a, i;
 912         uint_t colors;
 913 
 914         if (do_pg_coloring == 0) {
 915                 page_colors = 1;
 916                 for (i = 0; i < mmu_page_sizes; i++) {
 917                         colorequivszc[i] = 0;