Print this page
5255 uts shouldn't open-code ISP2


1271                 ulong_t lpthrt = *lpthrtp;
1272                 int     dowakeup = 0;
1273                 int     doalloc = 1;
1274 
1275                 ASSERT(kmem_lp_arena != NULL);
1276                 ASSERT(asize >= size);
1277 
1278                 if (lpthrt != 0) {
1279                         /* try to update the throttle value */
1280                         lpthrt = atomic_inc_ulong_nv(lpthrtp);
1281                         if (lpthrt >= segkmem_lpthrottle_max) {
1282                                 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1283                                     segkmem_lpthrottle_max / 4);
1284                         }
1285 
1286                         /*
1287                          * when we get above throttle start do an exponential
1288                          * backoff at trying large pages and reaping
1289                          */
1290                         if (lpthrt > segkmem_lpthrottle_start &&
1291                             (lpthrt & (lpthrt - 1))) {
1292                                 lpcb->allocs_throttled++;
1293                                 lpthrt--;
1294                                 if ((lpthrt & (lpthrt - 1)) == 0)
1295                                         kmem_reap();
1296                                 return (segkmem_alloc(vmp, size, vmflag));
1297                         }
1298                 }
1299 
1300                 if (!(vmflag & VM_NOSLEEP) &&
1301                     segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1302                     vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1303                     asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1304 
1305                         /*
1306                          * we are low on free memory in kmem_lp_arena
1307                          * we let only one guy to allocate heap_lp
1308                          * quantum size chunk that everybody is going to
1309                          * share
1310                          */
1311                         mutex_enter(&lpcb->lp_lock);
1312 
1313                         if (lpcb->lp_wait) {
1314 


1458                 return (0);
1459         }
1460 
1461         /* get a platform dependent value of large page size for kernel heap */
1462         segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1463 
1464         if (segkmem_lpsize <= PAGESIZE) {
1465                 /*
1466                  * put virtual space reserved for the large page kernel
1467                  * back to the regular heap
1468                  */
1469                 vmem_xfree(heap_arena, heap_lp_base,
1470                     heap_lp_end - heap_lp_base);
1471                 heap_lp_base = NULL;
1472                 heap_lp_end = NULL;
1473                 segkmem_lpsize = PAGESIZE;
1474                 return (0);
1475         }
1476 
1477         /* set heap_lp quantum if necessary */
1478         if (segkmem_heaplp_quantum == 0 ||
1479             (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
1480             P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1481                 segkmem_heaplp_quantum = segkmem_lpsize;
1482         }
1483 
1484         /* set kmem_lp quantum if necessary */
1485         if (segkmem_kmemlp_quantum == 0 ||
1486             (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
1487             segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1488                 segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1489         }
1490 
1491         /* set total amount of memory allowed for large page kernel heap */
1492         if (segkmem_kmemlp_max == 0) {
1493                 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1494                         segkmem_kmemlp_pcnt = 12;
1495                 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1496         }
1497         segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1498             segkmem_heaplp_quantum);
1499 
1500         /* fix lp kmem preallocation request if necesssary */
1501         if (segkmem_kmemlp_min) {
1502                 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1503                     segkmem_heaplp_quantum);
1504                 if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1505                         segkmem_kmemlp_min = segkmem_kmemlp_max;
1506         }




1271                 ulong_t lpthrt = *lpthrtp;
1272                 int     dowakeup = 0;
1273                 int     doalloc = 1;
1274 
1275                 ASSERT(kmem_lp_arena != NULL);
1276                 ASSERT(asize >= size);
1277 
1278                 if (lpthrt != 0) {
1279                         /* try to update the throttle value */
1280                         lpthrt = atomic_inc_ulong_nv(lpthrtp);
1281                         if (lpthrt >= segkmem_lpthrottle_max) {
1282                                 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1283                                     segkmem_lpthrottle_max / 4);
1284                         }
1285 
1286                         /*
1287                          * when we get above throttle start do an exponential
1288                          * backoff at trying large pages and reaping
1289                          */
1290                         if (lpthrt > segkmem_lpthrottle_start &&
1291                             !ISP2(lpthrt)) {
1292                                 lpcb->allocs_throttled++;
1293                                 lpthrt--;
1294                                 if (ISP2(lpthrt))
1295                                         kmem_reap();
1296                                 return (segkmem_alloc(vmp, size, vmflag));
1297                         }
1298                 }
1299 
1300                 if (!(vmflag & VM_NOSLEEP) &&
1301                     segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1302                     vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1303                     asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1304 
1305                         /*
1306                          * we are low on free memory in kmem_lp_arena
1307                          * we let only one guy to allocate heap_lp
1308                          * quantum size chunk that everybody is going to
1309                          * share
1310                          */
1311                         mutex_enter(&lpcb->lp_lock);
1312 
1313                         if (lpcb->lp_wait) {
1314 


1458                 return (0);
1459         }
1460 
1461         /* get a platform dependent value of large page size for kernel heap */
1462         segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1463 
1464         if (segkmem_lpsize <= PAGESIZE) {
1465                 /*
1466                  * put virtual space reserved for the large page kernel
1467                  * back to the regular heap
1468                  */
1469                 vmem_xfree(heap_arena, heap_lp_base,
1470                     heap_lp_end - heap_lp_base);
1471                 heap_lp_base = NULL;
1472                 heap_lp_end = NULL;
1473                 segkmem_lpsize = PAGESIZE;
1474                 return (0);
1475         }
1476 
1477         /* set heap_lp quantum if necessary */
1478         if (segkmem_heaplp_quantum == 0 || !ISP2(segkmem_heaplp_quantum) ||

1479             P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1480                 segkmem_heaplp_quantum = segkmem_lpsize;
1481         }
1482 
1483         /* set kmem_lp quantum if necessary */
1484         if (segkmem_kmemlp_quantum == 0 || !ISP2(segkmem_kmemlp_quantum) ||

1485             segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1486                 segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1487         }
1488 
1489         /* set total amount of memory allowed for large page kernel heap */
1490         if (segkmem_kmemlp_max == 0) {
1491                 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1492                         segkmem_kmemlp_pcnt = 12;
1493                 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1494         }
1495         segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1496             segkmem_heaplp_quantum);
1497 
1498         /* fix lp kmem preallocation request if necesssary */
1499         if (segkmem_kmemlp_min) {
1500                 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1501                     segkmem_heaplp_quantum);
1502                 if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1503                         segkmem_kmemlp_min = segkmem_kmemlp_max;
1504         }