Print this page
patch remove-load-flag
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/disp.h
+++ new/usr/src/uts/common/sys/disp.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29
30 30 #ifndef _SYS_DISP_H
31 31 #define _SYS_DISP_H
32 32
33 33 #pragma ident "%Z%%M% %I% %E% SMI" /* SVr4.0 1.11 */
34 34
35 35 #include <sys/priocntl.h>
36 36 #include <sys/thread.h>
37 37 #include <sys/class.h>
38 38
39 39 #ifdef __cplusplus
40 40 extern "C" {
41 41 #endif
42 42
43 43 /*
44 44 * The following is the format of a dispatcher queue entry.
45 45 */
46 46 typedef struct dispq {
47 47 kthread_t *dq_first; /* first thread on queue or NULL */
48 48 kthread_t *dq_last; /* last thread on queue or NULL */
49 49 int dq_sruncnt; /* number of loaded, runnable */
50 50 /* threads on queue */
51 51 } dispq_t;
52 52
53 53 /*
54 54 * Dispatch queue structure.
55 55 */
56 56 typedef struct _disp {
57 57 disp_lock_t disp_lock; /* protects dispatching fields */
58 58 pri_t disp_npri; /* # of priority levels in queue */
59 59 dispq_t *disp_q; /* the dispatch queue */
60 60 dispq_t *disp_q_limit; /* ptr past end of dispatch queue */
61 61 ulong_t *disp_qactmap; /* bitmap of active dispatch queues */
62 62
63 63 /*
64 64 * Priorities:
65 65 * disp_maxrunpri is the maximum run priority of runnable threads
66 66 * on this queue. It is -1 if nothing is runnable.
67 67 *
68 68 * disp_max_unbound_pri is the maximum run priority of threads on
69 69 * this dispatch queue but runnable by any CPU. This may be left
70 70 * artificially high, then corrected when some CPU tries to take
71 71 * an unbound thread. It is -1 if nothing is runnable.
72 72 */
73 73 pri_t disp_maxrunpri; /* maximum run priority */
74 74 pri_t disp_max_unbound_pri; /* max pri of unbound threads */
75 75
76 76 volatile int disp_nrunnable; /* runnable threads in cpu dispq */
77 77
78 78 struct cpu *disp_cpu; /* cpu owning this queue or NULL */
79 79 hrtime_t disp_steal; /* time when threads become stealable */
80 80 } disp_t;
81 81
82 82 #if defined(_KERNEL)
83 83
84 84 #define MAXCLSYSPRI 99
85 85 #define MINCLSYSPRI 60
86 86
87 87
88 88 /*
89 89 * Global scheduling variables.
90 90 * - See sys/cpuvar.h for CPU-local variables.
91 91 */
92 92 extern int nswapped; /* number of swapped threads */
93 93 /* nswapped protected by swap_lock */
94 94
95 95 extern pri_t minclsyspri; /* minimum level of any system class */
96 96 extern pri_t maxclsyspri; /* maximum level of any system class */
97 97 extern pri_t intr_pri; /* interrupt thread priority base level */
98 98
99 99 /*
100 100 * Minimum amount of time that a thread can remain runnable before it can
101 101 * be stolen by another CPU (in nanoseconds).
102 102 */
103 103 extern hrtime_t nosteal_nsec;
104 104
105 105 /*
106 106 * Kernel preemption occurs if a higher-priority thread is runnable with
107 107 * a priority at or above kpreemptpri.
108 108 *
109 109 * So that other processors can watch for such threads, a separate
110 110 * dispatch queue with unbound work above kpreemptpri is maintained.
111 111 * This is part of the CPU partition structure (cpupart_t).
112 112 */
113 113 extern pri_t kpreemptpri; /* level above which preemption takes place */
114 114
115 115 extern void disp_kp_alloc(disp_t *, pri_t); /* allocate kp queue */
116 116 extern void disp_kp_free(disp_t *); /* free kp queue */
117 117
118 118 /*
119 119 * Macro for use by scheduling classes to decide whether the thread is about
120 120 * to be scheduled or not. This returns the maximum run priority.
121 121 */
122 122 #define DISP_MAXRUNPRI(t) ((t)->t_disp_queue->disp_maxrunpri)
123 123
124 124 /*
125 125 * Platform callbacks for various dispatcher operations
126 126 *
127 127 * idle_cpu() is invoked when a cpu goes idle, and has nothing to do.
128 128 * disp_enq_thread() is invoked when a thread is placed on a run queue.
129 129 */
130 130 extern void (*idle_cpu)();
131 131 extern void (*disp_enq_thread)(struct cpu *, int);
132 132
133 133
134 134 extern int dispdeq(kthread_t *);
135 135 extern void dispinit(void);
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
136 136 extern void disp_add(sclass_t *);
137 137 extern int intr_active(struct cpu *, int);
138 138 extern int servicing_interrupt(void);
139 139 extern void preempt(void);
140 140 extern void setbackdq(kthread_t *);
141 141 extern void setfrontdq(kthread_t *);
142 142 extern void swtch(void);
143 143 extern void swtch_to(kthread_t *);
144 144 extern void swtch_from_zombie(void)
145 145 __NORETURN;
146 -extern void dq_sruninc(kthread_t *);
147 -extern void dq_srundec(kthread_t *);
148 146 extern void cpu_rechoose(kthread_t *);
149 147 extern void cpu_surrender(kthread_t *);
150 148 extern void kpreempt(int);
151 149 extern struct cpu *disp_lowpri_cpu(struct cpu *, struct lgrp_ld *, pri_t,
152 150 struct cpu *);
153 151 extern int disp_bound_threads(struct cpu *, int);
154 152 extern int disp_bound_anythreads(struct cpu *, int);
155 153 extern int disp_bound_partition(struct cpu *, int);
156 154 extern void disp_cpu_init(struct cpu *);
157 155 extern void disp_cpu_fini(struct cpu *);
158 156 extern void disp_cpu_inactive(struct cpu *);
159 157 extern void disp_adjust_unbound_pri(kthread_t *);
160 158 extern void resume(kthread_t *);
161 159 extern void resume_from_intr(kthread_t *);
162 160 extern void resume_from_zombie(kthread_t *)
163 161 __NORETURN;
164 162 extern void disp_swapped_enq(kthread_t *);
165 163 extern int disp_anywork(void);
166 164
167 165 #define KPREEMPT_SYNC (-1)
168 166 #define kpreempt_disable() \
169 167 { \
170 168 curthread->t_preempt++; \
171 169 ASSERT(curthread->t_preempt >= 1); \
172 170 }
173 171 #define kpreempt_enable() \
174 172 { \
175 173 ASSERT(curthread->t_preempt >= 1); \
176 174 if (--curthread->t_preempt == 0 && \
177 175 CPU->cpu_kprunrun) \
178 176 kpreempt(KPREEMPT_SYNC); \
179 177 }
180 178
181 179 #endif /* _KERNEL */
182 180
183 181 #ifdef __cplusplus
184 182 }
185 183 #endif
186 184
187 185 #endif /* _SYS_DISP_H */
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX