62 #include <sys/kmem.h>
63 #include <sys/atomic.h>
64 #include <sys/strlog.h>
65 #include <sys/mman.h>
66 #include <sys/ontrap.h>
67 #include <sys/lgrp.h>
68 #include <sys/vfs.h>
69
70 #include <vm/hat.h>
71 #include <vm/anon.h>
72 #include <vm/page.h>
73 #include <vm/seg.h>
74 #include <vm/pvn.h>
75 #include <vm/seg_kmem.h>
76 #include <vm/vm_dep.h>
77 #include <sys/vm_usage.h>
78 #include <fs/fs_subr.h>
79 #include <sys/ddi.h>
80 #include <sys/modctl.h>
81
82 static int nopageage = 0;
83
84 static pgcnt_t max_page_get; /* max page_get request size in pages */
85 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */
86
87 /*
88 * freemem_lock protects all freemem variables:
89 * availrmem. Also this lock protects the globals which track the
90 * availrmem changes for accurate kernel footprint calculation.
91 * See below for an explanation of these
92 * globals.
93 */
94 kmutex_t freemem_lock;
95 pgcnt_t availrmem;
96 pgcnt_t availrmem_initial;
97
98 /*
99 * These globals track availrmem changes to get a more accurate
100 * estimate of tke kernel size. Historically pp_kernel is used for
101 * kernel size and is based on availrmem. But availrmem is adjusted for
102 * locked pages in the system not just for kernel locked pages.
103 * These new counters will track the pages locked through segvn and
2674
2675 /*
2676 * Now we add the page to the head of the free list.
2677 * But if this page is associated with a paged vnode
2678 * then we adjust the head forward so that the page is
2679 * effectively at the end of the list.
2680 */
2681 if (pp->p_vnode == NULL) {
2682 /*
2683 * Page has no identity, put it on the free list.
2684 */
2685 PP_SETAGED(pp);
2686 pp->p_offset = (u_offset_t)-1;
2687 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2688 VM_STAT_ADD(pagecnt.pc_free_free);
2689 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2690 "page_free_free:pp %p", pp);
2691 } else {
2692 PP_CLRAGED(pp);
2693
2694 if (!dontneed || nopageage) {
2695 /* move it to the tail of the list */
2696 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2697
2698 VM_STAT_ADD(pagecnt.pc_free_cache);
2699 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
2700 "page_free_cache_tail:pp %p", pp);
2701 } else {
2702 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2703
2704 VM_STAT_ADD(pagecnt.pc_free_dontneed);
2705 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
2706 "page_free_cache_head:pp %p", pp);
2707 }
2708 }
2709 page_unlock(pp);
2710
2711 /*
2712 * Now do the `freemem' accounting.
2713 */
2714 pcf_index = PCF_INDEX();
|
62 #include <sys/kmem.h>
63 #include <sys/atomic.h>
64 #include <sys/strlog.h>
65 #include <sys/mman.h>
66 #include <sys/ontrap.h>
67 #include <sys/lgrp.h>
68 #include <sys/vfs.h>
69
70 #include <vm/hat.h>
71 #include <vm/anon.h>
72 #include <vm/page.h>
73 #include <vm/seg.h>
74 #include <vm/pvn.h>
75 #include <vm/seg_kmem.h>
76 #include <vm/vm_dep.h>
77 #include <sys/vm_usage.h>
78 #include <fs/fs_subr.h>
79 #include <sys/ddi.h>
80 #include <sys/modctl.h>
81
82 static pgcnt_t max_page_get; /* max page_get request size in pages */
83 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */
84
85 /*
86 * freemem_lock protects all freemem variables:
87 * availrmem. Also this lock protects the globals which track the
88 * availrmem changes for accurate kernel footprint calculation.
89 * See below for an explanation of these
90 * globals.
91 */
92 kmutex_t freemem_lock;
93 pgcnt_t availrmem;
94 pgcnt_t availrmem_initial;
95
96 /*
97 * These globals track availrmem changes to get a more accurate
98 * estimate of tke kernel size. Historically pp_kernel is used for
99 * kernel size and is based on availrmem. But availrmem is adjusted for
100 * locked pages in the system not just for kernel locked pages.
101 * These new counters will track the pages locked through segvn and
2672
2673 /*
2674 * Now we add the page to the head of the free list.
2675 * But if this page is associated with a paged vnode
2676 * then we adjust the head forward so that the page is
2677 * effectively at the end of the list.
2678 */
2679 if (pp->p_vnode == NULL) {
2680 /*
2681 * Page has no identity, put it on the free list.
2682 */
2683 PP_SETAGED(pp);
2684 pp->p_offset = (u_offset_t)-1;
2685 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2686 VM_STAT_ADD(pagecnt.pc_free_free);
2687 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2688 "page_free_free:pp %p", pp);
2689 } else {
2690 PP_CLRAGED(pp);
2691
2692 if (!dontneed) {
2693 /* move it to the tail of the list */
2694 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2695
2696 VM_STAT_ADD(pagecnt.pc_free_cache);
2697 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
2698 "page_free_cache_tail:pp %p", pp);
2699 } else {
2700 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2701
2702 VM_STAT_ADD(pagecnt.pc_free_dontneed);
2703 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
2704 "page_free_cache_head:pp %p", pp);
2705 }
2706 }
2707 page_unlock(pp);
2708
2709 /*
2710 * Now do the `freemem' accounting.
2711 */
2712 pcf_index = PCF_INDEX();
|