Print this page
6583 remove whole-process swapping
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/clock.c
+++ new/usr/src/uts/common/os/clock.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 22 /* All Rights Reserved */
23 23
24 24 /*
25 25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 27 */
28 28
29 29 #include <sys/param.h>
30 30 #include <sys/t_lock.h>
31 31 #include <sys/types.h>
32 32 #include <sys/tuneable.h>
33 33 #include <sys/sysmacros.h>
34 34 #include <sys/systm.h>
35 35 #include <sys/cpuvar.h>
36 36 #include <sys/lgrp.h>
37 37 #include <sys/user.h>
38 38 #include <sys/proc.h>
39 39 #include <sys/callo.h>
40 40 #include <sys/kmem.h>
41 41 #include <sys/var.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/swap.h>
44 44 #include <sys/vmsystm.h>
45 45 #include <sys/class.h>
46 46 #include <sys/time.h>
47 47 #include <sys/debug.h>
48 48 #include <sys/vtrace.h>
49 49 #include <sys/spl.h>
50 50 #include <sys/atomic.h>
51 51 #include <sys/dumphdr.h>
52 52 #include <sys/archsystm.h>
53 53 #include <sys/fs/swapnode.h>
54 54 #include <sys/panic.h>
55 55 #include <sys/disp.h>
56 56 #include <sys/msacct.h>
57 57 #include <sys/mem_cage.h>
58 58
59 59 #include <vm/page.h>
60 60 #include <vm/anon.h>
61 61 #include <vm/rm.h>
62 62 #include <sys/cyclic.h>
63 63 #include <sys/cpupart.h>
64 64 #include <sys/rctl.h>
65 65 #include <sys/task.h>
66 66 #include <sys/sdt.h>
67 67 #include <sys/ddi_periodic.h>
68 68 #include <sys/random.h>
69 69 #include <sys/modctl.h>
70 70 #include <sys/zone.h>
71 71
72 72 /*
73 73 * for NTP support
74 74 */
75 75 #include <sys/timex.h>
76 76 #include <sys/inttypes.h>
77 77
78 78 #include <sys/sunddi.h>
79 79 #include <sys/clock_impl.h>
80 80
81 81 /*
82 82 * clock() is called straight from the clock cyclic; see clock_init().
83 83 *
84 84 * Functions:
85 85 * reprime clock
86 86 * maintain date
87 87 * jab the scheduler
88 88 */
89 89
90 90 extern kcondvar_t fsflush_cv;
91 91 extern sysinfo_t sysinfo;
92 92 extern vminfo_t vminfo;
93 93 extern int idleswtch; /* flag set while idle in pswtch() */
94 94 extern hrtime_t volatile devinfo_freeze;
95 95
96 96 /*
97 97 * high-precision avenrun values. These are needed to make the
98 98 * regular avenrun values accurate.
99 99 */
100 100 static uint64_t hp_avenrun[3];
101 101 int avenrun[3]; /* FSCALED average run queue lengths */
102 102 time_t time; /* time in seconds since 1970 - for compatibility only */
103 103
104 104 static struct loadavg_s loadavg;
105 105 /*
106 106 * Phase/frequency-lock loop (PLL/FLL) definitions
107 107 *
108 108 * The following variables are read and set by the ntp_adjtime() system
109 109 * call.
110 110 *
111 111 * time_state shows the state of the system clock, with values defined
112 112 * in the timex.h header file.
113 113 *
114 114 * time_status shows the status of the system clock, with bits defined
115 115 * in the timex.h header file.
116 116 *
117 117 * time_offset is used by the PLL/FLL to adjust the system time in small
118 118 * increments.
119 119 *
120 120 * time_constant determines the bandwidth or "stiffness" of the PLL.
121 121 *
122 122 * time_tolerance determines maximum frequency error or tolerance of the
123 123 * CPU clock oscillator and is a property of the architecture; however,
124 124 * in principle it could change as result of the presence of external
125 125 * discipline signals, for instance.
126 126 *
127 127 * time_precision is usually equal to the kernel tick variable; however,
128 128 * in cases where a precision clock counter or external clock is
129 129 * available, the resolution can be much less than this and depend on
130 130 * whether the external clock is working or not.
131 131 *
132 132 * time_maxerror is initialized by a ntp_adjtime() call and increased by
133 133 * the kernel once each second to reflect the maximum error bound
134 134 * growth.
135 135 *
136 136 * time_esterror is set and read by the ntp_adjtime() call, but
137 137 * otherwise not used by the kernel.
138 138 */
139 139 int32_t time_state = TIME_OK; /* clock state */
140 140 int32_t time_status = STA_UNSYNC; /* clock status bits */
141 141 int32_t time_offset = 0; /* time offset (us) */
142 142 int32_t time_constant = 0; /* pll time constant */
143 143 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
144 144 int32_t time_precision = 1; /* clock precision (us) */
145 145 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */
146 146 int32_t time_esterror = MAXPHASE; /* estimated error (us) */
147 147
148 148 /*
149 149 * The following variables establish the state of the PLL/FLL and the
150 150 * residual time and frequency offset of the local clock. The scale
151 151 * factors are defined in the timex.h header file.
152 152 *
153 153 * time_phase and time_freq are the phase increment and the frequency
154 154 * increment, respectively, of the kernel time variable.
155 155 *
156 156 * time_freq is set via ntp_adjtime() from a value stored in a file when
157 157 * the synchronization daemon is first started. Its value is retrieved
158 158 * via ntp_adjtime() and written to the file about once per hour by the
159 159 * daemon.
160 160 *
161 161 * time_adj is the adjustment added to the value of tick at each timer
162 162 * interrupt and is recomputed from time_phase and time_freq at each
163 163 * seconds rollover.
164 164 *
165 165 * time_reftime is the second's portion of the system time at the last
166 166 * call to ntp_adjtime(). It is used to adjust the time_freq variable
167 167 * and to increase the time_maxerror as the time since last update
168 168 * increases.
169 169 */
170 170 int32_t time_phase = 0; /* phase offset (scaled us) */
171 171 int32_t time_freq = 0; /* frequency offset (scaled ppm) */
172 172 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */
173 173 int32_t time_reftime = 0; /* time at last adjustment (s) */
174 174
175 175 /*
176 176 * The scale factors of the following variables are defined in the
177 177 * timex.h header file.
178 178 *
179 179 * pps_time contains the time at each calibration interval, as read by
180 180 * microtime(). pps_count counts the seconds of the calibration
181 181 * interval, the duration of which is nominally pps_shift in powers of
182 182 * two.
183 183 *
184 184 * pps_offset is the time offset produced by the time median filter
185 185 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
186 186 * this filter.
187 187 *
188 188 * pps_freq is the frequency offset produced by the frequency median
189 189 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
190 190 * by this filter.
191 191 *
192 192 * pps_usec is latched from a high resolution counter or external clock
193 193 * at pps_time. Here we want the hardware counter contents only, not the
194 194 * contents plus the time_tv.usec as usual.
195 195 *
196 196 * pps_valid counts the number of seconds since the last PPS update. It
197 197 * is used as a watchdog timer to disable the PPS discipline should the
198 198 * PPS signal be lost.
199 199 *
200 200 * pps_glitch counts the number of seconds since the beginning of an
201 201 * offset burst more than tick/2 from current nominal offset. It is used
202 202 * mainly to suppress error bursts due to priority conflicts between the
203 203 * PPS interrupt and timer interrupt.
204 204 *
205 205 * pps_intcnt counts the calibration intervals for use in the interval-
206 206 * adaptation algorithm. It's just too complicated for words.
207 207 */
208 208 struct timeval pps_time; /* kernel time at last interval */
209 209 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
210 210 int32_t pps_offset = 0; /* pps time offset (us) */
211 211 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
212 212 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
213 213 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */
214 214 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
215 215 int32_t pps_usec = 0; /* microsec counter at last interval */
216 216 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */
217 217 int32_t pps_glitch = 0; /* pps signal glitch counter */
218 218 int32_t pps_count = 0; /* calibration interval counter (s) */
219 219 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
220 220 int32_t pps_intcnt = 0; /* intervals at current duration */
221 221
222 222 /*
223 223 * PPS signal quality monitors
224 224 *
225 225 * pps_jitcnt counts the seconds that have been discarded because the
226 226 * jitter measured by the time median filter exceeds the limit MAXTIME
227 227 * (100 us).
228 228 *
229 229 * pps_calcnt counts the frequency calibration intervals, which are
230 230 * variable from 4 s to 256 s.
231 231 *
232 232 * pps_errcnt counts the calibration intervals which have been discarded
233 233 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
234 234 * calibration interval jitter exceeds two ticks.
235 235 *
236 236 * pps_stbcnt counts the calibration intervals that have been discarded
237 237 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
238 238 */
239 239 int32_t pps_jitcnt = 0; /* jitter limit exceeded */
240 240 int32_t pps_calcnt = 0; /* calibration intervals */
241 241 int32_t pps_errcnt = 0; /* calibration errors */
242 242 int32_t pps_stbcnt = 0; /* stability limit exceeded */
243 243
244 244 kcondvar_t lbolt_cv;
245 245
246 246 /*
247 247 * Hybrid lbolt implementation:
248 248 *
249 249 * The service historically provided by the lbolt and lbolt64 variables has
250 250 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
251 251 * original symbols removed from the system. The once clock driven variables are
252 252 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
253 253 * the appropriate clock resolution. The default event driven implementation is
254 254 * complemented by a cyclic driven one, active only during periods of intense
255 255 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
256 256 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
257 257 * rely on the original low cost of consulting a memory position.
258 258 *
259 259 * The implementation uses the number of calls to these routines and the
260 260 * frequency of these to determine when to transition from event to cyclic
261 261 * driven and vice-versa. These values are kept on a per CPU basis for
262 262 * scalability reasons and to prevent CPUs from constantly invalidating a single
263 263 * cache line when modifying a global variable. The transition from event to
264 264 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
265 265 * can cause such transition.
266 266 *
267 267 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
268 268 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
269 269 * lbolt_cyclic_driven() according to the current mode. When the thresholds
270 270 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
271 271 * fire at a nsec_per_tick interval and increment an internal variable at
272 272 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
273 273 * will simply return the value of such variable. lbolt_cyclic() will attempt
274 274 * to shut itself off at each threshold interval (sampling period for calls
275 275 * to the DDI lbolt routines), and return to the event driven mode, but will
276 276 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
277 277 *
278 278 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
279 279 * for the cyclic subsystem to be intialized.
280 280 *
281 281 */
282 282 int64_t lbolt_bootstrap(void);
283 283 int64_t lbolt_event_driven(void);
284 284 int64_t lbolt_cyclic_driven(void);
285 285 int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
286 286 uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
287 287
288 288 /*
289 289 * lbolt's cyclic, installed by clock_init().
290 290 */
291 291 static void lbolt_cyclic(void);
292 292
293 293 /*
294 294 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
295 295 * from switching back to event driven, once it reaches cyclic mode.
296 296 */
297 297 static boolean_t lbolt_cyc_only = B_FALSE;
298 298
299 299 /*
300 300 * Cache aligned, per CPU structure with lbolt usage statistics.
301 301 */
302 302 static lbolt_cpu_t *lb_cpu;
303 303
304 304 /*
305 305 * Single, cache aligned, structure with all the information required by
306 306 * the lbolt implementation.
307 307 */
308 308 lbolt_info_t *lb_info;
309 309
310 310
311 311 int one_sec = 1; /* turned on once every second */
312 312 static int fsflushcnt; /* counter for t_fsflushr */
313 313 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */
314 314 int tod_needsync = 0; /* need to sync tod chip with software time */
315 315 static int tod_broken = 0; /* clock chip doesn't work */
316 316 time_t boot_time = 0; /* Boot time in seconds since 1970 */
317 317 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */
318 318 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */
319 319
320 320 extern void clock_tick_schedule(int);
321 321
322 322 static int lgrp_ticks; /* counter to schedule lgrp load calcs */
323 323
324 324 /*
325 325 * for tod fault detection
326 326 */
327 327 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
328 328 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
329 329 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
330 330 #define TOD_FILTER_N 4
331 331 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
332 332 static enum tod_fault_type tod_faulted = TOD_NOFAULT;
333 333
334 334 static int tod_status_flag = 0; /* used by tod_validate() */
335 335
336 336 static hrtime_t prev_set_tick = 0; /* gethrtime() prior to tod_set() */
337 337 static time_t prev_set_tod = 0; /* tv_sec value passed to tod_set() */
338 338
339 339 /* patchable via /etc/system */
340 340 int tod_validate_enable = 1;
341 341
342 342 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
343 343 int delay_from_interrupt_diagnose = 0;
344 344 volatile uint32_t delay_from_interrupt_msg = 20;
345 345
346 346 /*
347 347 * On non-SPARC systems, TOD validation must be deferred until gethrtime
348 348 * returns non-zero values (after mach_clkinit's execution).
349 349 * On SPARC systems, it must be deferred until after hrtime_base
350 350 * and hres_last_tick are set (in the first invocation of hres_tick).
351 351 * Since in both cases the prerequisites occur before the invocation of
352 352 * tod_get() in clock(), the deferment is lifted there.
353 353 */
354 354 static boolean_t tod_validate_deferred = B_TRUE;
355 355
356 356 /*
357 357 * tod_fault_table[] must be aligned with
358 358 * enum tod_fault_type in systm.h
359 359 */
360 360 static char *tod_fault_table[] = {
361 361 "Reversed", /* TOD_REVERSED */
362 362 "Stalled", /* TOD_STALLED */
363 363 "Jumped", /* TOD_JUMPED */
364 364 "Changed in Clock Rate", /* TOD_RATECHANGED */
365 365 "Is Read-Only" /* TOD_RDONLY */
366 366 /*
367 367 * no strings needed for TOD_NOFAULT
368 368 */
369 369 };
370 370
371 371 /*
372 372 * test hook for tod broken detection in tod_validate
373 373 */
374 374 int tod_unit_test = 0;
375 375 time_t tod_test_injector;
376 376
377 377 #define CLOCK_ADJ_HIST_SIZE 4
378 378
379 379 static int adj_hist_entry;
380 380
381 381 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
382 382
383 383 static void calcloadavg(int, uint64_t *);
384 384 static int genloadavg(struct loadavg_s *);
385 385 static void loadavg_update();
386 386
387 387 void (*cmm_clock_callout)() = NULL;
388 388 void (*cpucaps_clock_callout)() = NULL;
389 389
390 390 extern clock_t clock_tick_proc_max;
391 391
392 392 static int64_t deadman_counter = 0;
393 393
394 394 static void
395 395 clock(void)
396 396 {
397 397 kthread_t *t;
398 398 uint_t nrunnable;
399 399 uint_t w_io;
400 400 cpu_t *cp;
401 401 cpupart_t *cpupart;
402 402 extern void set_freemem();
403 403 void (*funcp)();
404 404 int32_t ltemp;
405 405 int64_t lltemp;
406 406 int s;
407 407 int do_lgrp_load;
408 408 int i;
409 409 clock_t now = LBOLT_NO_ACCOUNT; /* current tick */
410 410
411 411 if (panicstr)
412 412 return;
413 413
414 414 /*
415 415 * Make sure that 'freemem' do not drift too far from the truth
416 416 */
417 417 set_freemem();
418 418
419 419
420 420 /*
421 421 * Before the section which is repeated is executed, we do
422 422 * the time delta processing which occurs every clock tick
423 423 *
424 424 * There is additional processing which happens every time
425 425 * the nanosecond counter rolls over which is described
426 426 * below - see the section which begins with : if (one_sec)
427 427 *
428 428 * This section marks the beginning of the precision-kernel
429 429 * code fragment.
430 430 *
431 431 * First, compute the phase adjustment. If the low-order bits
432 432 * (time_phase) of the update overflow, bump the higher order
433 433 * bits (time_update).
434 434 */
435 435 time_phase += time_adj;
436 436 if (time_phase <= -FINEUSEC) {
437 437 ltemp = -time_phase / SCALE_PHASE;
438 438 time_phase += ltemp * SCALE_PHASE;
439 439 s = hr_clock_lock();
440 440 timedelta -= ltemp * (NANOSEC/MICROSEC);
441 441 hr_clock_unlock(s);
442 442 } else if (time_phase >= FINEUSEC) {
443 443 ltemp = time_phase / SCALE_PHASE;
444 444 time_phase -= ltemp * SCALE_PHASE;
445 445 s = hr_clock_lock();
446 446 timedelta += ltemp * (NANOSEC/MICROSEC);
447 447 hr_clock_unlock(s);
448 448 }
449 449
450 450 /*
451 451 * End of precision-kernel code fragment which is processed
452 452 * every timer interrupt.
453 453 *
454 454 * Continue with the interrupt processing as scheduled.
455 455 */
456 456 /*
457 457 * Count the number of runnable threads and the number waiting
458 458 * for some form of I/O to complete -- gets added to
459 459 * sysinfo.waiting. To know the state of the system, must add
460 460 * wait counts from all CPUs. Also add up the per-partition
461 461 * statistics.
462 462 */
463 463 w_io = 0;
464 464 nrunnable = 0;
465 465
466 466 /*
467 467 * keep track of when to update lgrp/part loads
468 468 */
469 469
470 470 do_lgrp_load = 0;
471 471 if (lgrp_ticks++ >= hz / 10) {
472 472 lgrp_ticks = 0;
473 473 do_lgrp_load = 1;
474 474 }
475 475
476 476 if (one_sec) {
477 477 loadavg_update();
478 478 deadman_counter++;
479 479 }
480 480
481 481 /*
482 482 * First count the threads waiting on kpreempt queues in each
483 483 * CPU partition.
484 484 */
485 485
486 486 cpupart = cp_list_head;
487 487 do {
488 488 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
489 489
490 490 cpupart->cp_updates++;
491 491 nrunnable += cpupart_nrunnable;
492 492 cpupart->cp_nrunnable_cum += cpupart_nrunnable;
493 493 if (one_sec) {
494 494 cpupart->cp_nrunning = 0;
495 495 cpupart->cp_nrunnable = cpupart_nrunnable;
496 496 }
497 497 } while ((cpupart = cpupart->cp_next) != cp_list_head);
498 498
499 499
500 500 /* Now count the per-CPU statistics. */
501 501 cp = cpu_list;
502 502 do {
503 503 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
504 504
505 505 nrunnable += cpu_nrunnable;
506 506 cpupart = cp->cpu_part;
507 507 cpupart->cp_nrunnable_cum += cpu_nrunnable;
508 508 if (one_sec) {
509 509 cpupart->cp_nrunnable += cpu_nrunnable;
510 510 /*
511 511 * Update user, system, and idle cpu times.
512 512 */
513 513 cpupart->cp_nrunning++;
514 514 /*
515 515 * w_io is used to update sysinfo.waiting during
516 516 * one_second processing below. Only gather w_io
517 517 * information when we walk the list of cpus if we're
518 518 * going to perform one_second processing.
519 519 */
520 520 w_io += CPU_STATS(cp, sys.iowait);
521 521 }
522 522
523 523 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
524 524 int i, load, change;
525 525 hrtime_t intracct, intrused;
526 526 const hrtime_t maxnsec = 1000000000;
527 527 const int precision = 100;
528 528
529 529 /*
530 530 * Estimate interrupt load on this cpu each second.
531 531 * Computes cpu_intrload as %utilization (0-99).
532 532 */
533 533
534 534 /* add up interrupt time from all micro states */
535 535 for (intracct = 0, i = 0; i < NCMSTATES; i++)
536 536 intracct += cp->cpu_intracct[i];
537 537 scalehrtime(&intracct);
538 538
539 539 /* compute nsec used in the past second */
540 540 intrused = intracct - cp->cpu_intrlast;
541 541 cp->cpu_intrlast = intracct;
542 542
543 543 /* limit the value for safety (and the first pass) */
544 544 if (intrused >= maxnsec)
545 545 intrused = maxnsec - 1;
546 546
547 547 /* calculate %time in interrupt */
548 548 load = (precision * intrused) / maxnsec;
549 549 ASSERT(load >= 0 && load < precision);
550 550 change = cp->cpu_intrload - load;
551 551
552 552 /* jump to new max, or decay the old max */
553 553 if (change < 0)
554 554 cp->cpu_intrload = load;
555 555 else if (change > 0)
556 556 cp->cpu_intrload -= (change + 3) / 4;
557 557
558 558 DTRACE_PROBE3(cpu_intrload,
559 559 cpu_t *, cp,
560 560 hrtime_t, intracct,
561 561 hrtime_t, intrused);
562 562 }
563 563
564 564 if (do_lgrp_load &&
565 565 (cp->cpu_flags & CPU_EXISTS)) {
566 566 /*
567 567 * When updating the lgroup's load average,
568 568 * account for the thread running on the CPU.
569 569 * If the CPU is the current one, then we need
570 570 * to account for the underlying thread which
571 571 * got the clock interrupt not the thread that is
572 572 * handling the interrupt and caculating the load
573 573 * average
574 574 */
575 575 t = cp->cpu_thread;
576 576 if (CPU == cp)
577 577 t = t->t_intr;
578 578
579 579 /*
580 580 * Account for the load average for this thread if
581 581 * it isn't the idle thread or it is on the interrupt
582 582 * stack and not the current CPU handling the clock
583 583 * interrupt
584 584 */
585 585 if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
586 586 CPU_ON_INTR(cp))) {
587 587 if (t->t_lpl == cp->cpu_lpl) {
588 588 /* local thread */
589 589 cpu_nrunnable++;
590 590 } else {
591 591 /*
592 592 * This is a remote thread, charge it
593 593 * against its home lgroup. Note that
594 594 * we notice that a thread is remote
595 595 * only if it's currently executing.
596 596 * This is a reasonable approximation,
597 597 * since queued remote threads are rare.
598 598 * Note also that if we didn't charge
599 599 * it to its home lgroup, remote
600 600 * execution would often make a system
601 601 * appear balanced even though it was
602 602 * not, and thread placement/migration
603 603 * would often not be done correctly.
604 604 */
605 605 lgrp_loadavg(t->t_lpl,
606 606 LGRP_LOADAVG_IN_THREAD_MAX, 0);
607 607 }
608 608 }
609 609 lgrp_loadavg(cp->cpu_lpl,
610 610 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
611 611 }
612 612 } while ((cp = cp->cpu_next) != cpu_list);
613 613
614 614 clock_tick_schedule(one_sec);
615 615
616 616 /*
617 617 * Check for a callout that needs be called from the clock
618 618 * thread to support the membership protocol in a clustered
619 619 * system. Copy the function pointer so that we can reset
620 620 * this to NULL if needed.
621 621 */
622 622 if ((funcp = cmm_clock_callout) != NULL)
623 623 (*funcp)();
624 624
625 625 if ((funcp = cpucaps_clock_callout) != NULL)
626 626 (*funcp)();
627 627
628 628 /*
629 629 * Wakeup the cageout thread waiters once per second.
630 630 */
631 631 if (one_sec)
632 632 kcage_tick();
633 633
634 634 if (one_sec) {
635 635
636 636 int drift, absdrift;
637 637 timestruc_t tod;
638 638 int s;
639 639
640 640 /*
641 641 * Beginning of precision-kernel code fragment executed
642 642 * every second.
643 643 *
644 644 * On rollover of the second the phase adjustment to be
645 645 * used for the next second is calculated. Also, the
646 646 * maximum error is increased by the tolerance. If the
647 647 * PPS frequency discipline code is present, the phase is
648 648 * increased to compensate for the CPU clock oscillator
649 649 * frequency error.
650 650 *
651 651 * On a 32-bit machine and given parameters in the timex.h
652 652 * header file, the maximum phase adjustment is +-512 ms
653 653 * and maximum frequency offset is (a tad less than)
654 654 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
655 655 */
656 656 time_maxerror += time_tolerance / SCALE_USEC;
657 657
658 658 /*
659 659 * Leap second processing. If in leap-insert state at
660 660 * the end of the day, the system clock is set back one
661 661 * second; if in leap-delete state, the system clock is
662 662 * set ahead one second. The microtime() routine or
663 663 * external clock driver will insure that reported time
664 664 * is always monotonic. The ugly divides should be
665 665 * replaced.
666 666 */
667 667 switch (time_state) {
668 668
669 669 case TIME_OK:
670 670 if (time_status & STA_INS)
671 671 time_state = TIME_INS;
672 672 else if (time_status & STA_DEL)
673 673 time_state = TIME_DEL;
674 674 break;
675 675
676 676 case TIME_INS:
677 677 if (hrestime.tv_sec % 86400 == 0) {
678 678 s = hr_clock_lock();
679 679 hrestime.tv_sec--;
680 680 hr_clock_unlock(s);
681 681 time_state = TIME_OOP;
682 682 }
683 683 break;
684 684
685 685 case TIME_DEL:
686 686 if ((hrestime.tv_sec + 1) % 86400 == 0) {
687 687 s = hr_clock_lock();
688 688 hrestime.tv_sec++;
689 689 hr_clock_unlock(s);
690 690 time_state = TIME_WAIT;
691 691 }
692 692 break;
693 693
694 694 case TIME_OOP:
695 695 time_state = TIME_WAIT;
696 696 break;
697 697
698 698 case TIME_WAIT:
699 699 if (!(time_status & (STA_INS | STA_DEL)))
700 700 time_state = TIME_OK;
701 701 default:
702 702 break;
703 703 }
704 704
705 705 /*
706 706 * Compute the phase adjustment for the next second. In
707 707 * PLL mode, the offset is reduced by a fixed factor
708 708 * times the time constant. In FLL mode the offset is
709 709 * used directly. In either mode, the maximum phase
710 710 * adjustment for each second is clamped so as to spread
711 711 * the adjustment over not more than the number of
712 712 * seconds between updates.
713 713 */
714 714 if (time_offset == 0)
715 715 time_adj = 0;
716 716 else if (time_offset < 0) {
717 717 lltemp = -time_offset;
718 718 if (!(time_status & STA_FLL)) {
719 719 if ((1 << time_constant) >= SCALE_KG)
720 720 lltemp *= (1 << time_constant) /
721 721 SCALE_KG;
722 722 else
723 723 lltemp = (lltemp / SCALE_KG) >>
724 724 time_constant;
725 725 }
726 726 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
727 727 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
728 728 time_offset += lltemp;
729 729 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
730 730 } else {
731 731 lltemp = time_offset;
732 732 if (!(time_status & STA_FLL)) {
733 733 if ((1 << time_constant) >= SCALE_KG)
734 734 lltemp *= (1 << time_constant) /
735 735 SCALE_KG;
736 736 else
737 737 lltemp = (lltemp / SCALE_KG) >>
738 738 time_constant;
739 739 }
740 740 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
741 741 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
742 742 time_offset -= lltemp;
743 743 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
744 744 }
745 745
746 746 /*
747 747 * Compute the frequency estimate and additional phase
748 748 * adjustment due to frequency error for the next
749 749 * second. When the PPS signal is engaged, gnaw on the
750 750 * watchdog counter and update the frequency computed by
751 751 * the pll and the PPS signal.
752 752 */
753 753 pps_valid++;
754 754 if (pps_valid == PPS_VALID) {
755 755 pps_jitter = MAXTIME;
756 756 pps_stabil = MAXFREQ;
757 757 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
758 758 STA_PPSWANDER | STA_PPSERROR);
759 759 }
760 760 lltemp = time_freq + pps_freq;
761 761
762 762 if (lltemp)
763 763 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
764 764
765 765 /*
766 766 * End of precision kernel-code fragment
767 767 *
768 768 * The section below should be modified if we are planning
769 769 * to use NTP for synchronization.
770 770 *
771 771 * Note: the clock synchronization code now assumes
772 772 * the following:
773 773 * - if dosynctodr is 1, then compute the drift between
774 774 * the tod chip and software time and adjust one or
775 775 * the other depending on the circumstances
776 776 *
777 777 * - if dosynctodr is 0, then the tod chip is independent
778 778 * of the software clock and should not be adjusted,
779 779 * but allowed to free run. this allows NTP to sync.
780 780 * hrestime without any interference from the tod chip.
781 781 */
782 782
783 783 tod_validate_deferred = B_FALSE;
784 784 mutex_enter(&tod_lock);
785 785 tod = tod_get();
786 786 drift = tod.tv_sec - hrestime.tv_sec;
787 787 absdrift = (drift >= 0) ? drift : -drift;
788 788 if (tod_needsync || absdrift > 1) {
789 789 int s;
790 790 if (absdrift > 2) {
791 791 if (!tod_broken && tod_faulted == TOD_NOFAULT) {
792 792 s = hr_clock_lock();
793 793 hrestime = tod;
794 794 membar_enter(); /* hrestime visible */
795 795 timedelta = 0;
796 796 timechanged++;
797 797 tod_needsync = 0;
798 798 hr_clock_unlock(s);
799 799 callout_hrestime();
800 800
801 801 }
802 802 } else {
803 803 if (tod_needsync || !dosynctodr) {
804 804 gethrestime(&tod);
805 805 tod_set(tod);
806 806 s = hr_clock_lock();
807 807 if (timedelta == 0)
808 808 tod_needsync = 0;
809 809 hr_clock_unlock(s);
810 810 } else {
811 811 /*
812 812 * If the drift is 2 seconds on the
813 813 * money, then the TOD is adjusting
814 814 * the clock; record that.
815 815 */
816 816 clock_adj_hist[adj_hist_entry++ %
817 817 CLOCK_ADJ_HIST_SIZE] = now;
818 818 s = hr_clock_lock();
819 819 timedelta = (int64_t)drift*NANOSEC;
820 820 hr_clock_unlock(s);
821 821 }
822 822 }
823 823 }
824 824 one_sec = 0;
825 825 time = gethrestime_sec(); /* for crusty old kmem readers */
826 826 mutex_exit(&tod_lock);
827 827
828 828 /*
829 829 * Some drivers still depend on this... XXX
830 830 */
831 831 cv_broadcast(&lbolt_cv);
832 832
833 833 vminfo.freemem += freemem;
834 834 {
835 835 pgcnt_t maxswap, resv, free;
836 836 pgcnt_t avail =
837 837 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
838 838
839 839 maxswap = k_anoninfo.ani_mem_resv +
840 840 k_anoninfo.ani_max +avail;
841 841 /* Update ani_free */
842 842 set_anoninfo();
843 843 free = k_anoninfo.ani_free + avail;
844 844 resv = k_anoninfo.ani_phys_resv +
845 845 k_anoninfo.ani_mem_resv;
846 846
847 847 vminfo.swap_resv += resv;
848 848 /* number of reserved and allocated pages */
849 849 #ifdef DEBUG
850 850 if (maxswap < free)
851 851 cmn_err(CE_WARN, "clock: maxswap < free");
852 852 if (maxswap < resv)
853 853 cmn_err(CE_WARN, "clock: maxswap < resv");
854 854 #endif
855 855 vminfo.swap_alloc += maxswap - free;
856 856 vminfo.swap_avail += maxswap - resv;
857 857 vminfo.swap_free += free;
858 858 }
859 859 vminfo.updates++;
860 860 if (nrunnable) {
861 861 sysinfo.runque += nrunnable;
862 862 sysinfo.runocc++;
863 863 }
864 864 if (nswapped) {
865 865 sysinfo.swpque += nswapped;
866 866 sysinfo.swpocc++;
867 867 }
868 868 sysinfo.waiting += w_io;
869 869 sysinfo.updates++;
870 870
871 871 /*
872 872 * Wake up fsflush to write out DELWRI
873 873 * buffers, dirty pages and other cached
874 874 * administrative data, e.g. inodes.
875 875 */
876 876 if (--fsflushcnt <= 0) {
877 877 fsflushcnt = tune.t_fsflushr;
878 878 cv_signal(&fsflush_cv);
879 879 }
880 880
881 881 vmmeter();
882 882 calcloadavg(genloadavg(&loadavg), hp_avenrun);
883 883 for (i = 0; i < 3; i++)
884 884 /*
885 885 * At the moment avenrun[] can only hold 31
886 886 * bits of load average as it is a signed
887 887 * int in the API. We need to ensure that
888 888 * hp_avenrun[i] >> (16 - FSHIFT) will not be
889 889 * too large. If it is, we put the largest value
890 890 * that we can use into avenrun[i]. This is
891 891 * kludgey, but about all we can do until we
892 892 * avenrun[] is declared as an array of uint64[]
893 893 */
894 894 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
↓ open down ↓ |
894 lines elided |
↑ open up ↑ |
895 895 avenrun[i] = (int32_t)(hp_avenrun[i] >>
896 896 (16 - FSHIFT));
897 897 else
898 898 avenrun[i] = 0x7fffffff;
899 899
900 900 cpupart = cp_list_head;
901 901 do {
902 902 calcloadavg(genloadavg(&cpupart->cp_loadavg),
903 903 cpupart->cp_hp_avenrun);
904 904 } while ((cpupart = cpupart->cp_next) != cp_list_head);
905 -
906 - /*
907 - * Wake up the swapper thread if necessary.
908 - */
909 - if (runin ||
910 - (runout && (avefree < desfree || wake_sched_sec))) {
911 - t = &t0;
912 - thread_lock(t);
913 - if (t->t_state == TS_STOPPED) {
914 - runin = runout = 0;
915 - wake_sched_sec = 0;
916 - t->t_whystop = 0;
917 - t->t_whatstop = 0;
918 - t->t_schedflag &= ~TS_ALLSTART;
919 - THREAD_TRANSITION(t);
920 - setfrontdq(t);
921 - }
922 - thread_unlock(t);
923 - }
924 - }
925 -
926 - /*
927 - * Wake up the swapper if any high priority swapped-out threads
928 - * became runable during the last tick.
929 - */
930 - if (wake_sched) {
931 - t = &t0;
932 - thread_lock(t);
933 - if (t->t_state == TS_STOPPED) {
934 - runin = runout = 0;
935 - wake_sched = 0;
936 - t->t_whystop = 0;
937 - t->t_whatstop = 0;
938 - t->t_schedflag &= ~TS_ALLSTART;
939 - THREAD_TRANSITION(t);
940 - setfrontdq(t);
941 - }
942 - thread_unlock(t);
943 905 }
944 906 }
945 907
946 908 void
947 909 clock_init(void)
948 910 {
949 911 cyc_handler_t clk_hdlr, lbolt_hdlr;
950 912 cyc_time_t clk_when, lbolt_when;
951 913 int i, sz;
952 914 intptr_t buf;
953 915
954 916 /*
955 917 * Setup handler and timer for the clock cyclic.
956 918 */
957 919 clk_hdlr.cyh_func = (cyc_func_t)clock;
958 920 clk_hdlr.cyh_level = CY_LOCK_LEVEL;
959 921 clk_hdlr.cyh_arg = NULL;
960 922
961 923 clk_when.cyt_when = 0;
962 924 clk_when.cyt_interval = nsec_per_tick;
963 925
964 926 /*
965 927 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
966 928 * interval to satisfy performance needs of the DDI lbolt consumers.
967 929 * It is off by default.
968 930 */
969 931 lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
970 932 lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
971 933 lbolt_hdlr.cyh_arg = NULL;
972 934
973 935 lbolt_when.cyt_interval = nsec_per_tick;
974 936
975 937 /*
976 938 * Allocate cache line aligned space for the per CPU lbolt data and
977 939 * lbolt info structures, and initialize them with their default
978 940 * values. Note that these structures are also cache line sized.
979 941 */
980 942 sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
981 943 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
982 944 lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
983 945
984 946 if (hz != HZ_DEFAULT)
985 947 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
986 948 hz/HZ_DEFAULT;
987 949 else
988 950 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
989 951
990 952 lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
991 953
992 954 sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
993 955 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
994 956 lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
995 957
996 958 for (i = 0; i < max_ncpus; i++)
997 959 lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
998 960
999 961 /*
1000 962 * Install the softint used to switch between event and cyclic driven
1001 963 * lbolt. We use a soft interrupt to make sure the context of the
1002 964 * cyclic reprogram call is safe.
1003 965 */
1004 966 lbolt_softint_add();
1005 967
1006 968 /*
1007 969 * Since the hybrid lbolt implementation is based on a hardware counter
1008 970 * that is reset at every hardware reboot and that we'd like to have
1009 971 * the lbolt value starting at zero after both a hardware and a fast
1010 972 * reboot, we calculate the number of clock ticks the system's been up
1011 973 * and store it in the lbi_debug_time field of the lbolt info structure.
1012 974 * The value of this field will be subtracted from lbolt before
1013 975 * returning it.
1014 976 */
1015 977 lb_info->lbi_internal = lb_info->lbi_debug_time =
1016 978 (gethrtime()/nsec_per_tick);
1017 979
1018 980 /*
1019 981 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1020 982 * and lbolt_debug_{enter,return} use this value as an indication that
1021 983 * the initializaion above hasn't been completed. Setting lbolt_hybrid
1022 984 * to either lbolt_{cyclic,event}_driven here signals those code paths
1023 985 * that the lbolt related structures can be used.
1024 986 */
1025 987 if (lbolt_cyc_only) {
1026 988 lbolt_when.cyt_when = 0;
1027 989 lbolt_hybrid = lbolt_cyclic_driven;
1028 990 } else {
1029 991 lbolt_when.cyt_when = CY_INFINITY;
1030 992 lbolt_hybrid = lbolt_event_driven;
1031 993 }
1032 994
1033 995 /*
1034 996 * Grab cpu_lock and install all three cyclics.
1035 997 */
1036 998 mutex_enter(&cpu_lock);
1037 999
1038 1000 clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
1039 1001 lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
1040 1002
1041 1003 mutex_exit(&cpu_lock);
1042 1004 }
1043 1005
1044 1006 /*
1045 1007 * Called before calcloadavg to get 10-sec moving loadavg together
1046 1008 */
1047 1009
1048 1010 static int
1049 1011 genloadavg(struct loadavg_s *avgs)
1050 1012 {
1051 1013 int avg;
1052 1014 int spos; /* starting position */
1053 1015 int cpos; /* moving current position */
1054 1016 int i;
1055 1017 int slen;
1056 1018 hrtime_t hr_avg;
1057 1019
1058 1020 /* 10-second snapshot, calculate first positon */
1059 1021 if (avgs->lg_len == 0) {
1060 1022 return (0);
1061 1023 }
1062 1024 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
1063 1025
1064 1026 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
1065 1027 S_LOADAVG_SZ + (avgs->lg_cur - 1);
1066 1028 for (i = hr_avg = 0; i < slen; i++) {
1067 1029 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
1068 1030 hr_avg += avgs->lg_loads[cpos];
1069 1031 }
1070 1032
1071 1033 hr_avg = hr_avg / slen;
1072 1034 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
1073 1035
1074 1036 return (avg);
1075 1037 }
1076 1038
1077 1039 /*
1078 1040 * Run every second from clock () to update the loadavg count available to the
1079 1041 * system and cpu-partitions.
1080 1042 *
1081 1043 * This works by sampling the previous usr, sys, wait time elapsed,
1082 1044 * computing a delta, and adding that delta to the elapsed usr, sys,
1083 1045 * wait increase.
1084 1046 */
1085 1047
1086 1048 static void
1087 1049 loadavg_update()
1088 1050 {
1089 1051 cpu_t *cp;
1090 1052 cpupart_t *cpupart;
1091 1053 hrtime_t cpu_total;
1092 1054 int prev;
1093 1055
1094 1056 cp = cpu_list;
1095 1057 loadavg.lg_total = 0;
1096 1058
1097 1059 /*
1098 1060 * first pass totals up per-cpu statistics for system and cpu
1099 1061 * partitions
1100 1062 */
1101 1063
1102 1064 do {
1103 1065 struct loadavg_s *lavg;
1104 1066
1105 1067 lavg = &cp->cpu_loadavg;
1106 1068
1107 1069 cpu_total = cp->cpu_acct[CMS_USER] +
1108 1070 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
1109 1071 /* compute delta against last total */
1110 1072 scalehrtime(&cpu_total);
1111 1073 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
1112 1074 S_LOADAVG_SZ + (lavg->lg_cur - 1);
1113 1075 if (lavg->lg_loads[prev] <= 0) {
1114 1076 lavg->lg_loads[lavg->lg_cur] = cpu_total;
1115 1077 cpu_total = 0;
1116 1078 } else {
1117 1079 lavg->lg_loads[lavg->lg_cur] = cpu_total;
1118 1080 cpu_total = cpu_total - lavg->lg_loads[prev];
1119 1081 if (cpu_total < 0)
1120 1082 cpu_total = 0;
1121 1083 }
1122 1084
1123 1085 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1124 1086 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1125 1087 lavg->lg_len + 1 : S_LOADAVG_SZ;
1126 1088
1127 1089 loadavg.lg_total += cpu_total;
1128 1090 cp->cpu_part->cp_loadavg.lg_total += cpu_total;
1129 1091
1130 1092 } while ((cp = cp->cpu_next) != cpu_list);
1131 1093
1132 1094 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
1133 1095 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
1134 1096 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
1135 1097 loadavg.lg_len + 1 : S_LOADAVG_SZ;
1136 1098 /*
1137 1099 * Second pass updates counts
1138 1100 */
1139 1101 cpupart = cp_list_head;
1140 1102
1141 1103 do {
1142 1104 struct loadavg_s *lavg;
1143 1105
1144 1106 lavg = &cpupart->cp_loadavg;
1145 1107 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
1146 1108 lavg->lg_total = 0;
1147 1109 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1148 1110 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1149 1111 lavg->lg_len + 1 : S_LOADAVG_SZ;
1150 1112
1151 1113 } while ((cpupart = cpupart->cp_next) != cp_list_head);
1152 1114
1153 1115 /*
1154 1116 * Third pass totals up per-zone statistics.
1155 1117 */
1156 1118 zone_loadavg_update();
1157 1119 }
1158 1120
1159 1121 /*
1160 1122 * clock_update() - local clock update
1161 1123 *
1162 1124 * This routine is called by ntp_adjtime() to update the local clock
1163 1125 * phase and frequency. The implementation is of an
1164 1126 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1165 1127 * routine computes new time and frequency offset estimates for each
1166 1128 * call. The PPS signal itself determines the new time offset,
1167 1129 * instead of the calling argument. Presumably, calls to
1168 1130 * ntp_adjtime() occur only when the caller believes the local clock
1169 1131 * is valid within some bound (+-128 ms with NTP). If the caller's
1170 1132 * time is far different than the PPS time, an argument will ensue,
1171 1133 * and it's not clear who will lose.
1172 1134 *
1173 1135 * For uncompensated quartz crystal oscillatores and nominal update
1174 1136 * intervals less than 1024 s, operation should be in phase-lock mode
1175 1137 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1176 1138 * intervals greater than this, operation should be in frequency-lock
1177 1139 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1178 1140 *
1179 1141 * Note: mutex(&tod_lock) is in effect.
1180 1142 */
1181 1143 void
1182 1144 clock_update(int offset)
1183 1145 {
1184 1146 int ltemp, mtemp, s;
1185 1147
1186 1148 ASSERT(MUTEX_HELD(&tod_lock));
1187 1149
1188 1150 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1189 1151 return;
1190 1152 ltemp = offset;
1191 1153 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
1192 1154 ltemp = pps_offset;
1193 1155
1194 1156 /*
1195 1157 * Scale the phase adjustment and clamp to the operating range.
1196 1158 */
1197 1159 if (ltemp > MAXPHASE)
1198 1160 time_offset = MAXPHASE * SCALE_UPDATE;
1199 1161 else if (ltemp < -MAXPHASE)
1200 1162 time_offset = -(MAXPHASE * SCALE_UPDATE);
1201 1163 else
1202 1164 time_offset = ltemp * SCALE_UPDATE;
1203 1165
1204 1166 /*
1205 1167 * Select whether the frequency is to be controlled and in which
1206 1168 * mode (PLL or FLL). Clamp to the operating range. Ugly
1207 1169 * multiply/divide should be replaced someday.
1208 1170 */
1209 1171 if (time_status & STA_FREQHOLD || time_reftime == 0)
1210 1172 time_reftime = hrestime.tv_sec;
1211 1173
1212 1174 mtemp = hrestime.tv_sec - time_reftime;
1213 1175 time_reftime = hrestime.tv_sec;
1214 1176
1215 1177 if (time_status & STA_FLL) {
1216 1178 if (mtemp >= MINSEC) {
1217 1179 ltemp = ((time_offset / mtemp) * (SCALE_USEC /
1218 1180 SCALE_UPDATE));
1219 1181 if (ltemp)
1220 1182 time_freq += ltemp / SCALE_KH;
1221 1183 }
1222 1184 } else {
1223 1185 if (mtemp < MAXSEC) {
1224 1186 ltemp *= mtemp;
1225 1187 if (ltemp)
1226 1188 time_freq += (int)(((int64_t)ltemp *
1227 1189 SCALE_USEC) / SCALE_KF)
1228 1190 / (1 << (time_constant * 2));
1229 1191 }
1230 1192 }
1231 1193 if (time_freq > time_tolerance)
1232 1194 time_freq = time_tolerance;
1233 1195 else if (time_freq < -time_tolerance)
1234 1196 time_freq = -time_tolerance;
1235 1197
1236 1198 s = hr_clock_lock();
1237 1199 tod_needsync = 1;
1238 1200 hr_clock_unlock(s);
1239 1201 }
1240 1202
1241 1203 /*
1242 1204 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1243 1205 *
1244 1206 * This routine is called at each PPS interrupt in order to discipline
1245 1207 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1246 1208 * and leaves it in a handy spot for the clock() routine. It
1247 1209 * integrates successive PPS phase differences and calculates the
1248 1210 * frequency offset. This is used in clock() to discipline the CPU
1249 1211 * clock oscillator so that intrinsic frequency error is cancelled out.
1250 1212 * The code requires the caller to capture the time and hardware counter
1251 1213 * value at the on-time PPS signal transition.
1252 1214 *
1253 1215 * Note that, on some Unix systems, this routine runs at an interrupt
1254 1216 * priority level higher than the timer interrupt routine clock().
1255 1217 * Therefore, the variables used are distinct from the clock()
1256 1218 * variables, except for certain exceptions: The PPS frequency pps_freq
1257 1219 * and phase pps_offset variables are determined by this routine and
1258 1220 * updated atomically. The time_tolerance variable can be considered a
1259 1221 * constant, since it is infrequently changed, and then only when the
1260 1222 * PPS signal is disabled. The watchdog counter pps_valid is updated
1261 1223 * once per second by clock() and is atomically cleared in this
1262 1224 * routine.
1263 1225 *
1264 1226 * tvp is the time of the last tick; usec is a microsecond count since the
1265 1227 * last tick.
1266 1228 *
1267 1229 * Note: In Solaris systems, the tick value is actually given by
1268 1230 * usec_per_tick. This is called from the serial driver cdintr(),
1269 1231 * or equivalent, at a high PIL. Because the kernel keeps a
1270 1232 * highresolution time, the following code can accept either
1271 1233 * the traditional argument pair, or the current highres timestamp
1272 1234 * in tvp and zero in usec.
1273 1235 */
1274 1236 void
1275 1237 ddi_hardpps(struct timeval *tvp, int usec)
1276 1238 {
1277 1239 int u_usec, v_usec, bigtick;
1278 1240 time_t cal_sec;
1279 1241 int cal_usec;
1280 1242
1281 1243 /*
1282 1244 * An occasional glitch can be produced when the PPS interrupt
1283 1245 * occurs in the clock() routine before the time variable is
1284 1246 * updated. Here the offset is discarded when the difference
1285 1247 * between it and the last one is greater than tick/2, but not
1286 1248 * if the interval since the first discard exceeds 30 s.
1287 1249 */
1288 1250 time_status |= STA_PPSSIGNAL;
1289 1251 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1290 1252 pps_valid = 0;
1291 1253 u_usec = -tvp->tv_usec;
1292 1254 if (u_usec < -(MICROSEC/2))
1293 1255 u_usec += MICROSEC;
1294 1256 v_usec = pps_offset - u_usec;
1295 1257 if (v_usec < 0)
1296 1258 v_usec = -v_usec;
1297 1259 if (v_usec > (usec_per_tick >> 1)) {
1298 1260 if (pps_glitch > MAXGLITCH) {
1299 1261 pps_glitch = 0;
1300 1262 pps_tf[2] = u_usec;
1301 1263 pps_tf[1] = u_usec;
1302 1264 } else {
1303 1265 pps_glitch++;
1304 1266 u_usec = pps_offset;
1305 1267 }
1306 1268 } else
1307 1269 pps_glitch = 0;
1308 1270
1309 1271 /*
1310 1272 * A three-stage median filter is used to help deglitch the pps
1311 1273 * time. The median sample becomes the time offset estimate; the
1312 1274 * difference between the other two samples becomes the time
1313 1275 * dispersion (jitter) estimate.
1314 1276 */
1315 1277 pps_tf[2] = pps_tf[1];
1316 1278 pps_tf[1] = pps_tf[0];
1317 1279 pps_tf[0] = u_usec;
1318 1280 if (pps_tf[0] > pps_tf[1]) {
1319 1281 if (pps_tf[1] > pps_tf[2]) {
1320 1282 pps_offset = pps_tf[1]; /* 0 1 2 */
1321 1283 v_usec = pps_tf[0] - pps_tf[2];
1322 1284 } else if (pps_tf[2] > pps_tf[0]) {
1323 1285 pps_offset = pps_tf[0]; /* 2 0 1 */
1324 1286 v_usec = pps_tf[2] - pps_tf[1];
1325 1287 } else {
1326 1288 pps_offset = pps_tf[2]; /* 0 2 1 */
1327 1289 v_usec = pps_tf[0] - pps_tf[1];
1328 1290 }
1329 1291 } else {
1330 1292 if (pps_tf[1] < pps_tf[2]) {
1331 1293 pps_offset = pps_tf[1]; /* 2 1 0 */
1332 1294 v_usec = pps_tf[2] - pps_tf[0];
1333 1295 } else if (pps_tf[2] < pps_tf[0]) {
1334 1296 pps_offset = pps_tf[0]; /* 1 0 2 */
1335 1297 v_usec = pps_tf[1] - pps_tf[2];
1336 1298 } else {
1337 1299 pps_offset = pps_tf[2]; /* 1 2 0 */
1338 1300 v_usec = pps_tf[1] - pps_tf[0];
1339 1301 }
1340 1302 }
1341 1303 if (v_usec > MAXTIME)
1342 1304 pps_jitcnt++;
1343 1305 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1344 1306 pps_jitter += v_usec / (1 << PPS_AVG);
1345 1307 if (pps_jitter > (MAXTIME >> 1))
1346 1308 time_status |= STA_PPSJITTER;
1347 1309
1348 1310 /*
1349 1311 * During the calibration interval adjust the starting time when
1350 1312 * the tick overflows. At the end of the interval compute the
1351 1313 * duration of the interval and the difference of the hardware
1352 1314 * counters at the beginning and end of the interval. This code
1353 1315 * is deliciously complicated by the fact valid differences may
1354 1316 * exceed the value of tick when using long calibration
1355 1317 * intervals and small ticks. Note that the counter can be
1356 1318 * greater than tick if caught at just the wrong instant, but
1357 1319 * the values returned and used here are correct.
1358 1320 */
1359 1321 bigtick = (int)usec_per_tick * SCALE_USEC;
1360 1322 pps_usec -= pps_freq;
1361 1323 if (pps_usec >= bigtick)
1362 1324 pps_usec -= bigtick;
1363 1325 if (pps_usec < 0)
1364 1326 pps_usec += bigtick;
1365 1327 pps_time.tv_sec++;
1366 1328 pps_count++;
1367 1329 if (pps_count < (1 << pps_shift))
1368 1330 return;
1369 1331 pps_count = 0;
1370 1332 pps_calcnt++;
1371 1333 u_usec = usec * SCALE_USEC;
1372 1334 v_usec = pps_usec - u_usec;
1373 1335 if (v_usec >= bigtick >> 1)
1374 1336 v_usec -= bigtick;
1375 1337 if (v_usec < -(bigtick >> 1))
1376 1338 v_usec += bigtick;
1377 1339 if (v_usec < 0)
1378 1340 v_usec = -(-v_usec >> pps_shift);
1379 1341 else
1380 1342 v_usec = v_usec >> pps_shift;
1381 1343 pps_usec = u_usec;
1382 1344 cal_sec = tvp->tv_sec;
1383 1345 cal_usec = tvp->tv_usec;
1384 1346 cal_sec -= pps_time.tv_sec;
1385 1347 cal_usec -= pps_time.tv_usec;
1386 1348 if (cal_usec < 0) {
1387 1349 cal_usec += MICROSEC;
1388 1350 cal_sec--;
1389 1351 }
1390 1352 pps_time = *tvp;
1391 1353
1392 1354 /*
1393 1355 * Check for lost interrupts, noise, excessive jitter and
1394 1356 * excessive frequency error. The number of timer ticks during
1395 1357 * the interval may vary +-1 tick. Add to this a margin of one
1396 1358 * tick for the PPS signal jitter and maximum frequency
1397 1359 * deviation. If the limits are exceeded, the calibration
1398 1360 * interval is reset to the minimum and we start over.
1399 1361 */
1400 1362 u_usec = (int)usec_per_tick << 1;
1401 1363 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
1402 1364 (cal_sec == 0 && cal_usec < u_usec)) ||
1403 1365 v_usec > time_tolerance || v_usec < -time_tolerance) {
1404 1366 pps_errcnt++;
1405 1367 pps_shift = PPS_SHIFT;
1406 1368 pps_intcnt = 0;
1407 1369 time_status |= STA_PPSERROR;
1408 1370 return;
1409 1371 }
1410 1372
1411 1373 /*
1412 1374 * A three-stage median filter is used to help deglitch the pps
1413 1375 * frequency. The median sample becomes the frequency offset
1414 1376 * estimate; the difference between the other two samples
1415 1377 * becomes the frequency dispersion (stability) estimate.
1416 1378 */
1417 1379 pps_ff[2] = pps_ff[1];
1418 1380 pps_ff[1] = pps_ff[0];
1419 1381 pps_ff[0] = v_usec;
1420 1382 if (pps_ff[0] > pps_ff[1]) {
1421 1383 if (pps_ff[1] > pps_ff[2]) {
1422 1384 u_usec = pps_ff[1]; /* 0 1 2 */
1423 1385 v_usec = pps_ff[0] - pps_ff[2];
1424 1386 } else if (pps_ff[2] > pps_ff[0]) {
1425 1387 u_usec = pps_ff[0]; /* 2 0 1 */
1426 1388 v_usec = pps_ff[2] - pps_ff[1];
1427 1389 } else {
1428 1390 u_usec = pps_ff[2]; /* 0 2 1 */
1429 1391 v_usec = pps_ff[0] - pps_ff[1];
1430 1392 }
1431 1393 } else {
1432 1394 if (pps_ff[1] < pps_ff[2]) {
1433 1395 u_usec = pps_ff[1]; /* 2 1 0 */
1434 1396 v_usec = pps_ff[2] - pps_ff[0];
1435 1397 } else if (pps_ff[2] < pps_ff[0]) {
1436 1398 u_usec = pps_ff[0]; /* 1 0 2 */
1437 1399 v_usec = pps_ff[1] - pps_ff[2];
1438 1400 } else {
1439 1401 u_usec = pps_ff[2]; /* 1 2 0 */
1440 1402 v_usec = pps_ff[1] - pps_ff[0];
1441 1403 }
1442 1404 }
1443 1405
1444 1406 /*
1445 1407 * Here the frequency dispersion (stability) is updated. If it
1446 1408 * is less than one-fourth the maximum (MAXFREQ), the frequency
1447 1409 * offset is updated as well, but clamped to the tolerance. It
1448 1410 * will be processed later by the clock() routine.
1449 1411 */
1450 1412 v_usec = (v_usec >> 1) - pps_stabil;
1451 1413 if (v_usec < 0)
1452 1414 pps_stabil -= -v_usec >> PPS_AVG;
1453 1415 else
1454 1416 pps_stabil += v_usec >> PPS_AVG;
1455 1417 if (pps_stabil > MAXFREQ >> 2) {
1456 1418 pps_stbcnt++;
1457 1419 time_status |= STA_PPSWANDER;
1458 1420 return;
1459 1421 }
1460 1422 if (time_status & STA_PPSFREQ) {
1461 1423 if (u_usec < 0) {
1462 1424 pps_freq -= -u_usec >> PPS_AVG;
1463 1425 if (pps_freq < -time_tolerance)
1464 1426 pps_freq = -time_tolerance;
1465 1427 u_usec = -u_usec;
1466 1428 } else {
1467 1429 pps_freq += u_usec >> PPS_AVG;
1468 1430 if (pps_freq > time_tolerance)
1469 1431 pps_freq = time_tolerance;
1470 1432 }
1471 1433 }
1472 1434
1473 1435 /*
1474 1436 * Here the calibration interval is adjusted. If the maximum
1475 1437 * time difference is greater than tick / 4, reduce the interval
1476 1438 * by half. If this is not the case for four consecutive
1477 1439 * intervals, double the interval.
1478 1440 */
1479 1441 if (u_usec << pps_shift > bigtick >> 2) {
1480 1442 pps_intcnt = 0;
1481 1443 if (pps_shift > PPS_SHIFT)
1482 1444 pps_shift--;
1483 1445 } else if (pps_intcnt >= 4) {
1484 1446 pps_intcnt = 0;
1485 1447 if (pps_shift < PPS_SHIFTMAX)
1486 1448 pps_shift++;
1487 1449 } else
1488 1450 pps_intcnt++;
1489 1451
1490 1452 /*
1491 1453 * If recovering from kmdb, then make sure the tod chip gets resynced.
1492 1454 * If we took an early exit above, then we don't yet have a stable
1493 1455 * calibration signal to lock onto, so don't mark the tod for sync
1494 1456 * until we get all the way here.
1495 1457 */
1496 1458 {
1497 1459 int s = hr_clock_lock();
1498 1460
1499 1461 tod_needsync = 1;
1500 1462 hr_clock_unlock(s);
1501 1463 }
1502 1464 }
1503 1465
1504 1466 /*
1505 1467 * Handle clock tick processing for a thread.
1506 1468 * Check for timer action, enforce CPU rlimit, do profiling etc.
1507 1469 */
1508 1470 void
1509 1471 clock_tick(kthread_t *t, int pending)
1510 1472 {
1511 1473 struct proc *pp;
1512 1474 klwp_id_t lwp;
1513 1475 struct as *as;
1514 1476 clock_t ticks;
1515 1477 int poke = 0; /* notify another CPU */
1516 1478 int user_mode;
1517 1479 size_t rss;
1518 1480 int i, total_usec, usec;
1519 1481 rctl_qty_t secs;
1520 1482
1521 1483 ASSERT(pending > 0);
1522 1484
1523 1485 /* Must be operating on a lwp/thread */
1524 1486 if ((lwp = ttolwp(t)) == NULL) {
1525 1487 panic("clock_tick: no lwp");
1526 1488 /*NOTREACHED*/
1527 1489 }
1528 1490
1529 1491 for (i = 0; i < pending; i++) {
1530 1492 CL_TICK(t); /* Class specific tick processing */
1531 1493 DTRACE_SCHED1(tick, kthread_t *, t);
1532 1494 }
1533 1495
1534 1496 pp = ttoproc(t);
1535 1497
1536 1498 /* pp->p_lock makes sure that the thread does not exit */
1537 1499 ASSERT(MUTEX_HELD(&pp->p_lock));
1538 1500
1539 1501 user_mode = (lwp->lwp_state == LWP_USER);
1540 1502
1541 1503 ticks = (pp->p_utime + pp->p_stime) % hz;
1542 1504 /*
1543 1505 * Update process times. Should use high res clock and state
1544 1506 * changes instead of statistical sampling method. XXX
1545 1507 */
1546 1508 if (user_mode) {
1547 1509 pp->p_utime += pending;
1548 1510 } else {
1549 1511 pp->p_stime += pending;
1550 1512 }
1551 1513
1552 1514 pp->p_ttime += pending;
1553 1515 as = pp->p_as;
1554 1516
1555 1517 /*
1556 1518 * Update user profiling statistics. Get the pc from the
1557 1519 * lwp when the AST happens.
1558 1520 */
1559 1521 if (pp->p_prof.pr_scale) {
1560 1522 atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
1561 1523 if (user_mode) {
1562 1524 poke = 1;
1563 1525 aston(t);
1564 1526 }
1565 1527 }
1566 1528
1567 1529 /*
1568 1530 * If CPU was in user state, process lwp-virtual time
1569 1531 * interval timer. The value passed to itimerdecr() has to be
1570 1532 * in microseconds and has to be less than one second. Hence
1571 1533 * this loop.
1572 1534 */
1573 1535 total_usec = usec_per_tick * pending;
1574 1536 while (total_usec > 0) {
1575 1537 usec = MIN(total_usec, (MICROSEC - 1));
1576 1538 if (user_mode &&
1577 1539 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
1578 1540 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
1579 1541 poke = 1;
1580 1542 sigtoproc(pp, t, SIGVTALRM);
1581 1543 }
1582 1544 total_usec -= usec;
1583 1545 }
1584 1546
1585 1547 /*
1586 1548 * If CPU was in user state, process lwp-profile
1587 1549 * interval timer.
1588 1550 */
1589 1551 total_usec = usec_per_tick * pending;
1590 1552 while (total_usec > 0) {
1591 1553 usec = MIN(total_usec, (MICROSEC - 1));
1592 1554 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
1593 1555 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
1594 1556 poke = 1;
1595 1557 sigtoproc(pp, t, SIGPROF);
1596 1558 }
1597 1559 total_usec -= usec;
1598 1560 }
1599 1561
1600 1562 /*
1601 1563 * Enforce CPU resource controls:
1602 1564 * (a) process.max-cpu-time resource control
1603 1565 *
1604 1566 * Perform the check only if we have accumulated more a second.
1605 1567 */
1606 1568 if ((ticks + pending) >= hz) {
1607 1569 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
1608 1570 (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
1609 1571 }
1610 1572
1611 1573 /*
1612 1574 * (b) task.max-cpu-time resource control
1613 1575 *
1614 1576 * If we have accumulated enough ticks, increment the task CPU
1615 1577 * time usage and test for the resource limit. This minimizes the
1616 1578 * number of calls to the rct_test(). The task CPU time mutex
1617 1579 * is highly contentious as many processes can be sharing a task.
1618 1580 */
1619 1581 if (pp->p_ttime >= clock_tick_proc_max) {
1620 1582 secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
1621 1583 pp->p_ttime = 0;
1622 1584 if (secs) {
1623 1585 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
1624 1586 pp, secs, RCA_UNSAFE_SIGINFO);
1625 1587 }
1626 1588 }
1627 1589
1628 1590 /*
1629 1591 * Update memory usage for the currently running process.
1630 1592 */
1631 1593 rss = rm_asrss(as);
1632 1594 PTOU(pp)->u_mem += rss;
1633 1595 if (rss > PTOU(pp)->u_mem_max)
1634 1596 PTOU(pp)->u_mem_max = rss;
1635 1597
1636 1598 /*
1637 1599 * Notify the CPU the thread is running on.
1638 1600 */
1639 1601 if (poke && t->t_cpu != CPU)
1640 1602 poke_cpu(t->t_cpu->cpu_id);
1641 1603 }
1642 1604
1643 1605 void
1644 1606 profil_tick(uintptr_t upc)
1645 1607 {
1646 1608 int ticks;
1647 1609 proc_t *p = ttoproc(curthread);
1648 1610 klwp_t *lwp = ttolwp(curthread);
1649 1611 struct prof *pr = &p->p_prof;
1650 1612
1651 1613 do {
1652 1614 ticks = lwp->lwp_oweupc;
1653 1615 } while (atomic_cas_32(&lwp->lwp_oweupc, ticks, 0) != ticks);
1654 1616
1655 1617 mutex_enter(&p->p_pflock);
1656 1618 if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
1657 1619 /*
1658 1620 * Old-style profiling
1659 1621 */
1660 1622 uint16_t *slot = pr->pr_base;
1661 1623 uint16_t old, new;
1662 1624 if (pr->pr_scale != 2) {
1663 1625 uintptr_t delta = upc - pr->pr_off;
1664 1626 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
1665 1627 (((delta & 0xffff) * pr->pr_scale) >> 16);
1666 1628 if (byteoff >= (uintptr_t)pr->pr_size) {
1667 1629 mutex_exit(&p->p_pflock);
1668 1630 return;
1669 1631 }
1670 1632 slot += byteoff / sizeof (uint16_t);
1671 1633 }
1672 1634 if (fuword16(slot, &old) < 0 ||
1673 1635 (new = old + ticks) > SHRT_MAX ||
1674 1636 suword16(slot, new) < 0) {
1675 1637 pr->pr_scale = 0;
1676 1638 }
1677 1639 } else if (pr->pr_scale == 1) {
1678 1640 /*
1679 1641 * PC Sampling
1680 1642 */
1681 1643 model_t model = lwp_getdatamodel(lwp);
1682 1644 int result;
1683 1645 #ifdef __lint
1684 1646 model = model;
1685 1647 #endif
1686 1648 while (ticks-- > 0) {
1687 1649 if (pr->pr_samples == pr->pr_size) {
1688 1650 /* buffer full, turn off sampling */
1689 1651 pr->pr_scale = 0;
1690 1652 break;
1691 1653 }
1692 1654 switch (SIZEOF_PTR(model)) {
1693 1655 case sizeof (uint32_t):
1694 1656 result = suword32(pr->pr_base, (uint32_t)upc);
1695 1657 break;
1696 1658 #ifdef _LP64
1697 1659 case sizeof (uint64_t):
1698 1660 result = suword64(pr->pr_base, (uint64_t)upc);
1699 1661 break;
1700 1662 #endif
1701 1663 default:
1702 1664 cmn_err(CE_WARN, "profil_tick: unexpected "
1703 1665 "data model");
1704 1666 result = -1;
1705 1667 break;
1706 1668 }
1707 1669 if (result != 0) {
1708 1670 pr->pr_scale = 0;
1709 1671 break;
1710 1672 }
1711 1673 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
1712 1674 pr->pr_samples++;
1713 1675 }
1714 1676 }
1715 1677 mutex_exit(&p->p_pflock);
1716 1678 }
1717 1679
1718 1680 static void
1719 1681 delay_wakeup(void *arg)
1720 1682 {
1721 1683 kthread_t *t = arg;
1722 1684
1723 1685 mutex_enter(&t->t_delay_lock);
1724 1686 cv_signal(&t->t_delay_cv);
1725 1687 mutex_exit(&t->t_delay_lock);
1726 1688 }
1727 1689
1728 1690 /*
1729 1691 * The delay(9F) man page indicates that it can only be called from user or
1730 1692 * kernel context - detect and diagnose bad calls. The following macro will
1731 1693 * produce a limited number of messages identifying bad callers. This is done
1732 1694 * in a macro so that caller() is meaningful. When a bad caller is identified,
1733 1695 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1734 1696 */
1735 1697 #define DELAY_CONTEXT_CHECK() { \
1736 1698 uint32_t m; \
1737 1699 char *f; \
1738 1700 ulong_t off; \
1739 1701 \
1740 1702 m = delay_from_interrupt_msg; \
1741 1703 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1742 1704 !panicstr && !devinfo_freeze && \
1743 1705 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1744 1706 f = modgetsymname((uintptr_t)caller(), &off); \
1745 1707 cmn_err(CE_WARN, "delay(9F) called from " \
1746 1708 "interrupt context: %s`%s", \
1747 1709 mod_containing_pc(caller()), f ? f : "..."); \
1748 1710 } \
1749 1711 }
1750 1712
1751 1713 /*
1752 1714 * delay_common: common delay code.
1753 1715 */
1754 1716 static void
1755 1717 delay_common(clock_t ticks)
1756 1718 {
1757 1719 kthread_t *t = curthread;
1758 1720 clock_t deadline;
1759 1721 clock_t timeleft;
1760 1722 callout_id_t id;
1761 1723
1762 1724 /* If timeouts aren't running all we can do is spin. */
1763 1725 if (panicstr || devinfo_freeze) {
1764 1726 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1765 1727 if (ticks > 0)
1766 1728 drv_usecwait(TICK_TO_USEC(ticks));
1767 1729 return;
1768 1730 }
1769 1731
1770 1732 deadline = ddi_get_lbolt() + ticks;
1771 1733 while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
1772 1734 mutex_enter(&t->t_delay_lock);
1773 1735 id = timeout_default(delay_wakeup, t, timeleft);
1774 1736 cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1775 1737 mutex_exit(&t->t_delay_lock);
1776 1738 (void) untimeout_default(id, 0);
1777 1739 }
1778 1740 }
1779 1741
1780 1742 /*
1781 1743 * Delay specified number of clock ticks.
1782 1744 */
1783 1745 void
1784 1746 delay(clock_t ticks)
1785 1747 {
1786 1748 DELAY_CONTEXT_CHECK();
1787 1749
1788 1750 delay_common(ticks);
1789 1751 }
1790 1752
1791 1753 /*
1792 1754 * Delay a random number of clock ticks between 1 and ticks.
1793 1755 */
1794 1756 void
1795 1757 delay_random(clock_t ticks)
1796 1758 {
1797 1759 int r;
1798 1760
1799 1761 DELAY_CONTEXT_CHECK();
1800 1762
1801 1763 (void) random_get_pseudo_bytes((void *)&r, sizeof (r));
1802 1764 if (ticks == 0)
1803 1765 ticks = 1;
1804 1766 ticks = (r % ticks) + 1;
1805 1767 delay_common(ticks);
1806 1768 }
1807 1769
1808 1770 /*
1809 1771 * Like delay, but interruptible by a signal.
1810 1772 */
1811 1773 int
1812 1774 delay_sig(clock_t ticks)
1813 1775 {
1814 1776 kthread_t *t = curthread;
1815 1777 clock_t deadline;
1816 1778 clock_t rc;
1817 1779
1818 1780 /* If timeouts aren't running all we can do is spin. */
1819 1781 if (panicstr || devinfo_freeze) {
1820 1782 if (ticks > 0)
1821 1783 drv_usecwait(TICK_TO_USEC(ticks));
1822 1784 return (0);
1823 1785 }
1824 1786
1825 1787 deadline = ddi_get_lbolt() + ticks;
1826 1788 mutex_enter(&t->t_delay_lock);
1827 1789 do {
1828 1790 rc = cv_timedwait_sig(&t->t_delay_cv,
1829 1791 &t->t_delay_lock, deadline);
1830 1792 /* loop until past deadline or signaled */
1831 1793 } while (rc > 0);
1832 1794 mutex_exit(&t->t_delay_lock);
1833 1795 if (rc == 0)
1834 1796 return (EINTR);
1835 1797 return (0);
1836 1798 }
1837 1799
1838 1800
1839 1801 #define SECONDS_PER_DAY 86400
1840 1802
1841 1803 /*
1842 1804 * Initialize the system time based on the TOD chip. approx is used as
1843 1805 * an approximation of time (e.g. from the filesystem) in the event that
1844 1806 * the TOD chip has been cleared or is unresponsive. An approx of -1
1845 1807 * means the filesystem doesn't keep time.
1846 1808 */
1847 1809 void
1848 1810 clkset(time_t approx)
1849 1811 {
1850 1812 timestruc_t ts;
1851 1813 int spl;
1852 1814 int set_clock = 0;
1853 1815
1854 1816 mutex_enter(&tod_lock);
1855 1817 ts = tod_get();
1856 1818
1857 1819 if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
1858 1820 /*
1859 1821 * If the TOD chip is reporting some time after 1971,
1860 1822 * then it probably didn't lose power or become otherwise
1861 1823 * cleared in the recent past; check to assure that
1862 1824 * the time coming from the filesystem isn't in the future
1863 1825 * according to the TOD chip.
1864 1826 */
1865 1827 if (approx != -1 && approx > ts.tv_sec) {
1866 1828 cmn_err(CE_WARN, "Last shutdown is later "
1867 1829 "than time on time-of-day chip; check date.");
1868 1830 }
1869 1831 } else {
1870 1832 /*
1871 1833 * If the TOD chip isn't giving correct time, set it to the
1872 1834 * greater of i) approx and ii) 1987. That way if approx
1873 1835 * is negative or is earlier than 1987, we set the clock
1874 1836 * back to a time when Oliver North, ALF and Dire Straits
1875 1837 * were all on the collective brain: 1987.
1876 1838 */
1877 1839 timestruc_t tmp;
1878 1840 time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY;
1879 1841 ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date);
1880 1842 ts.tv_nsec = 0;
1881 1843
1882 1844 /*
1883 1845 * Attempt to write the new time to the TOD chip. Set spl high
1884 1846 * to avoid getting preempted between the tod_set and tod_get.
1885 1847 */
1886 1848 spl = splhi();
1887 1849 tod_set(ts);
1888 1850 tmp = tod_get();
1889 1851 splx(spl);
1890 1852
1891 1853 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
1892 1854 tod_broken = 1;
1893 1855 dosynctodr = 0;
1894 1856 cmn_err(CE_WARN, "Time-of-day chip unresponsive.");
1895 1857 } else {
1896 1858 cmn_err(CE_WARN, "Time-of-day chip had "
1897 1859 "incorrect date; check and reset.");
1898 1860 }
1899 1861 set_clock = 1;
1900 1862 }
1901 1863
1902 1864 if (!boot_time) {
1903 1865 boot_time = ts.tv_sec;
1904 1866 set_clock = 1;
1905 1867 }
1906 1868
1907 1869 if (set_clock)
1908 1870 set_hrestime(&ts);
1909 1871
1910 1872 mutex_exit(&tod_lock);
1911 1873 }
1912 1874
1913 1875 int timechanged; /* for testing if the system time has been reset */
1914 1876
1915 1877 void
1916 1878 set_hrestime(timestruc_t *ts)
1917 1879 {
1918 1880 int spl = hr_clock_lock();
1919 1881 hrestime = *ts;
1920 1882 membar_enter(); /* hrestime must be visible before timechanged++ */
1921 1883 timedelta = 0;
1922 1884 timechanged++;
1923 1885 hr_clock_unlock(spl);
1924 1886 callout_hrestime();
1925 1887 }
1926 1888
1927 1889 static uint_t deadman_seconds;
1928 1890 static uint32_t deadman_panics;
1929 1891 static int deadman_enabled = 0;
1930 1892 static int deadman_panic_timers = 1;
1931 1893
1932 1894 static void
1933 1895 deadman(void)
1934 1896 {
1935 1897 if (panicstr) {
1936 1898 /*
1937 1899 * During panic, other CPUs besides the panic
1938 1900 * master continue to handle cyclics and some other
1939 1901 * interrupts. The code below is intended to be
1940 1902 * single threaded, so any CPU other than the master
1941 1903 * must keep out.
1942 1904 */
1943 1905 if (CPU->cpu_id != panic_cpu.cpu_id)
1944 1906 return;
1945 1907
1946 1908 if (!deadman_panic_timers)
1947 1909 return; /* allow all timers to be manually disabled */
1948 1910
1949 1911 /*
1950 1912 * If we are generating a crash dump or syncing filesystems and
1951 1913 * the corresponding timer is set, decrement it and re-enter
1952 1914 * the panic code to abort it and advance to the next state.
1953 1915 * The panic states and triggers are explained in panic.c.
1954 1916 */
1955 1917 if (panic_dump) {
1956 1918 if (dump_timeleft && (--dump_timeleft == 0)) {
1957 1919 panic("panic dump timeout");
1958 1920 /*NOTREACHED*/
1959 1921 }
1960 1922 } else if (panic_sync) {
1961 1923 if (sync_timeleft && (--sync_timeleft == 0)) {
1962 1924 panic("panic sync timeout");
1963 1925 /*NOTREACHED*/
1964 1926 }
1965 1927 }
1966 1928
1967 1929 return;
1968 1930 }
1969 1931
1970 1932 if (deadman_counter != CPU->cpu_deadman_counter) {
1971 1933 CPU->cpu_deadman_counter = deadman_counter;
1972 1934 CPU->cpu_deadman_countdown = deadman_seconds;
1973 1935 return;
1974 1936 }
1975 1937
1976 1938 if (--CPU->cpu_deadman_countdown > 0)
1977 1939 return;
1978 1940
1979 1941 /*
1980 1942 * Regardless of whether or not we actually bring the system down,
1981 1943 * bump the deadman_panics variable.
1982 1944 *
1983 1945 * N.B. deadman_panics is incremented once for each CPU that
1984 1946 * passes through here. It's expected that all the CPUs will
1985 1947 * detect this condition within one second of each other, so
1986 1948 * when deadman_enabled is off, deadman_panics will
1987 1949 * typically be a multiple of the total number of CPUs in
1988 1950 * the system.
1989 1951 */
1990 1952 atomic_inc_32(&deadman_panics);
1991 1953
1992 1954 if (!deadman_enabled) {
1993 1955 CPU->cpu_deadman_countdown = deadman_seconds;
1994 1956 return;
1995 1957 }
1996 1958
1997 1959 /*
1998 1960 * If we're here, we want to bring the system down.
1999 1961 */
2000 1962 panic("deadman: timed out after %d seconds of clock "
2001 1963 "inactivity", deadman_seconds);
2002 1964 /*NOTREACHED*/
2003 1965 }
2004 1966
2005 1967 /*ARGSUSED*/
2006 1968 static void
2007 1969 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
2008 1970 {
2009 1971 cpu->cpu_deadman_counter = 0;
2010 1972 cpu->cpu_deadman_countdown = deadman_seconds;
2011 1973
2012 1974 hdlr->cyh_func = (cyc_func_t)deadman;
2013 1975 hdlr->cyh_level = CY_HIGH_LEVEL;
2014 1976 hdlr->cyh_arg = NULL;
2015 1977
2016 1978 /*
2017 1979 * Stagger the CPUs so that they don't all run deadman() at
2018 1980 * the same time. Simplest reason to do this is to make it
2019 1981 * more likely that only one CPU will panic in case of a
2020 1982 * timeout. This is (strictly speaking) an aesthetic, not a
2021 1983 * technical consideration.
2022 1984 */
2023 1985 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
2024 1986 when->cyt_interval = NANOSEC;
2025 1987 }
2026 1988
2027 1989
2028 1990 void
2029 1991 deadman_init(void)
2030 1992 {
2031 1993 cyc_omni_handler_t hdlr;
2032 1994
2033 1995 if (deadman_seconds == 0)
2034 1996 deadman_seconds = snoop_interval / MICROSEC;
2035 1997
2036 1998 if (snooping)
2037 1999 deadman_enabled = 1;
2038 2000
2039 2001 hdlr.cyo_online = deadman_online;
2040 2002 hdlr.cyo_offline = NULL;
2041 2003 hdlr.cyo_arg = NULL;
2042 2004
2043 2005 mutex_enter(&cpu_lock);
2044 2006 deadman_cyclic = cyclic_add_omni(&hdlr);
2045 2007 mutex_exit(&cpu_lock);
2046 2008 }
2047 2009
2048 2010 /*
2049 2011 * tod_fault() is for updating tod validate mechanism state:
2050 2012 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2051 2013 * currently used for debugging only
2052 2014 * (2) The following four cases detected by tod validate mechanism:
2053 2015 * TOD_REVERSED: current tod value is less than previous value.
2054 2016 * TOD_STALLED: current tod value hasn't advanced.
2055 2017 * TOD_JUMPED: current tod value advanced too far from previous value.
2056 2018 * TOD_RATECHANGED: the ratio between average tod delta and
2057 2019 * average tick delta has changed.
2058 2020 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2059 2021 * a virtual TOD provided by a hypervisor.
2060 2022 */
2061 2023 enum tod_fault_type
2062 2024 tod_fault(enum tod_fault_type ftype, int off)
2063 2025 {
2064 2026 ASSERT(MUTEX_HELD(&tod_lock));
2065 2027
2066 2028 if (tod_faulted != ftype) {
2067 2029 switch (ftype) {
2068 2030 case TOD_NOFAULT:
2069 2031 plat_tod_fault(TOD_NOFAULT);
2070 2032 cmn_err(CE_NOTE, "Restarted tracking "
2071 2033 "Time of Day clock.");
2072 2034 tod_faulted = ftype;
2073 2035 break;
2074 2036 case TOD_REVERSED:
2075 2037 case TOD_JUMPED:
2076 2038 if (tod_faulted == TOD_NOFAULT) {
2077 2039 plat_tod_fault(ftype);
2078 2040 cmn_err(CE_WARN, "Time of Day clock error: "
2079 2041 "reason [%s by 0x%x]. -- "
2080 2042 " Stopped tracking Time Of Day clock.",
2081 2043 tod_fault_table[ftype], off);
2082 2044 tod_faulted = ftype;
2083 2045 }
2084 2046 break;
2085 2047 case TOD_STALLED:
2086 2048 case TOD_RATECHANGED:
2087 2049 if (tod_faulted == TOD_NOFAULT) {
2088 2050 plat_tod_fault(ftype);
2089 2051 cmn_err(CE_WARN, "Time of Day clock error: "
2090 2052 "reason [%s]. -- "
2091 2053 " Stopped tracking Time Of Day clock.",
2092 2054 tod_fault_table[ftype]);
2093 2055 tod_faulted = ftype;
2094 2056 }
2095 2057 break;
2096 2058 case TOD_RDONLY:
2097 2059 if (tod_faulted == TOD_NOFAULT) {
2098 2060 plat_tod_fault(ftype);
2099 2061 cmn_err(CE_NOTE, "!Time of Day clock is "
2100 2062 "Read-Only; set of Date/Time will not "
2101 2063 "persist across reboot.");
2102 2064 tod_faulted = ftype;
2103 2065 }
2104 2066 break;
2105 2067 default:
2106 2068 break;
2107 2069 }
2108 2070 }
2109 2071 return (tod_faulted);
2110 2072 }
2111 2073
2112 2074 /*
2113 2075 * Two functions that allow tod_status_flag to be manipulated by functions
2114 2076 * external to this file.
2115 2077 */
2116 2078
2117 2079 void
2118 2080 tod_status_set(int tod_flag)
2119 2081 {
2120 2082 tod_status_flag |= tod_flag;
2121 2083 }
2122 2084
2123 2085 void
2124 2086 tod_status_clear(int tod_flag)
2125 2087 {
2126 2088 tod_status_flag &= ~tod_flag;
2127 2089 }
2128 2090
2129 2091 /*
2130 2092 * Record a timestamp and the value passed to tod_set(). The next call to
2131 2093 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2132 2094 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2133 2095 * tod_validate() will use prev_tick and prev_tod for this task but these
2134 2096 * become obsolete, and will be re-assigned with the prev_set_* values,
2135 2097 * in the case when the TOD is re-written.
2136 2098 */
2137 2099 void
2138 2100 tod_set_prev(timestruc_t ts)
2139 2101 {
2140 2102 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2141 2103 tod_validate_deferred) {
2142 2104 return;
2143 2105 }
2144 2106 prev_set_tick = gethrtime();
2145 2107 /*
2146 2108 * A negative value will be set to zero in utc_to_tod() so we fake
2147 2109 * a zero here in such a case. This would need to change if the
2148 2110 * behavior of utc_to_tod() changes.
2149 2111 */
2150 2112 prev_set_tod = ts.tv_sec < 0 ? 0 : ts.tv_sec;
2151 2113 }
2152 2114
2153 2115 /*
2154 2116 * tod_validate() is used for checking values returned by tod_get().
2155 2117 * Four error cases can be detected by this routine:
2156 2118 * TOD_REVERSED: current tod value is less than previous.
2157 2119 * TOD_STALLED: current tod value hasn't advanced.
2158 2120 * TOD_JUMPED: current tod value advanced too far from previous value.
2159 2121 * TOD_RATECHANGED: the ratio between average tod delta and
2160 2122 * average tick delta has changed.
2161 2123 */
2162 2124 time_t
2163 2125 tod_validate(time_t tod)
2164 2126 {
2165 2127 time_t diff_tod;
2166 2128 hrtime_t diff_tick;
2167 2129
2168 2130 long dtick;
2169 2131 int dtick_delta;
2170 2132
2171 2133 int off = 0;
2172 2134 enum tod_fault_type tod_bad = TOD_NOFAULT;
2173 2135
2174 2136 static int firsttime = 1;
2175 2137
2176 2138 static time_t prev_tod = 0;
2177 2139 static hrtime_t prev_tick = 0;
2178 2140 static long dtick_avg = TOD_REF_FREQ;
2179 2141
2180 2142 int cpr_resume_done = 0;
2181 2143 int dr_resume_done = 0;
2182 2144
2183 2145 hrtime_t tick = gethrtime();
2184 2146
2185 2147 ASSERT(MUTEX_HELD(&tod_lock));
2186 2148
2187 2149 /*
2188 2150 * tod_validate_enable is patchable via /etc/system.
2189 2151 * If TOD is already faulted, or if TOD validation is deferred,
2190 2152 * there is nothing to do.
2191 2153 */
2192 2154 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2193 2155 tod_validate_deferred) {
2194 2156 return (tod);
2195 2157 }
2196 2158
2197 2159 /*
2198 2160 * If this is the first time through, we just need to save the tod
2199 2161 * we were called with and hrtime so we can use them next time to
2200 2162 * validate tod_get().
2201 2163 */
2202 2164 if (firsttime) {
2203 2165 firsttime = 0;
2204 2166 prev_tod = tod;
2205 2167 prev_tick = tick;
2206 2168 return (tod);
2207 2169 }
2208 2170
2209 2171 /*
2210 2172 * Handle any flags that have been turned on by tod_status_set().
2211 2173 * In the case where a tod_set() is done and then a subsequent
2212 2174 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2213 2175 * true), we treat the TOD_GET_FAILED with precedence by switching
2214 2176 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2215 2177 * until such time as tod_get() completes successfully.
2216 2178 */
2217 2179 if (tod_status_flag & TOD_GET_FAILED) {
2218 2180 /*
2219 2181 * tod_get() has encountered an issue, possibly transitory,
2220 2182 * when reading TOD. We'll just return the incoming tod
2221 2183 * value (which is actually hrestime.tv_sec in this case)
2222 2184 * and when we get a genuine tod, following a successful
2223 2185 * tod_get(), we can validate using prev_tod and prev_tick.
2224 2186 */
2225 2187 tod_status_flag &= ~TOD_GET_FAILED;
2226 2188 return (tod);
2227 2189 } else if (tod_status_flag & TOD_SET_DONE) {
2228 2190 /*
2229 2191 * TOD has been modified. Just before the TOD was written,
2230 2192 * tod_set_prev() saved tod and hrtime; we can now use
2231 2193 * those values, prev_set_tod and prev_set_tick, to validate
2232 2194 * the incoming tod that's just been read.
2233 2195 */
2234 2196 prev_tod = prev_set_tod;
2235 2197 prev_tick = prev_set_tick;
2236 2198 dtick_avg = TOD_REF_FREQ;
2237 2199 tod_status_flag &= ~TOD_SET_DONE;
2238 2200 /*
2239 2201 * If a tod_set() preceded a cpr_suspend() without an
2240 2202 * intervening tod_validate(), we need to ensure that a
2241 2203 * TOD_JUMPED condition is ignored.
2242 2204 * Note this isn't a concern in the case of DR as we've
2243 2205 * just reassigned dtick_avg, above.
2244 2206 */
2245 2207 if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2246 2208 cpr_resume_done = 1;
2247 2209 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2248 2210 }
2249 2211 } else if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2250 2212 /*
2251 2213 * The system's coming back from a checkpoint resume.
2252 2214 */
2253 2215 cpr_resume_done = 1;
2254 2216 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2255 2217 /*
2256 2218 * We need to handle the possibility of a CPR suspend
2257 2219 * operation having been initiated whilst a DR event was
2258 2220 * in-flight.
2259 2221 */
2260 2222 if (tod_status_flag & TOD_DR_RESUME_DONE) {
2261 2223 dr_resume_done = 1;
2262 2224 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2263 2225 }
2264 2226 } else if (tod_status_flag & TOD_DR_RESUME_DONE) {
2265 2227 /*
2266 2228 * A Dynamic Reconfiguration event has taken place.
2267 2229 */
2268 2230 dr_resume_done = 1;
2269 2231 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2270 2232 }
2271 2233
2272 2234 /* test hook */
2273 2235 switch (tod_unit_test) {
2274 2236 case 1: /* for testing jumping tod */
2275 2237 tod += tod_test_injector;
2276 2238 tod_unit_test = 0;
2277 2239 break;
2278 2240 case 2: /* for testing stuck tod bit */
2279 2241 tod |= 1 << tod_test_injector;
2280 2242 tod_unit_test = 0;
2281 2243 break;
2282 2244 case 3: /* for testing stalled tod */
2283 2245 tod = prev_tod;
2284 2246 tod_unit_test = 0;
2285 2247 break;
2286 2248 case 4: /* reset tod fault status */
2287 2249 (void) tod_fault(TOD_NOFAULT, 0);
2288 2250 tod_unit_test = 0;
2289 2251 break;
2290 2252 default:
2291 2253 break;
2292 2254 }
2293 2255
2294 2256 diff_tod = tod - prev_tod;
2295 2257 diff_tick = tick - prev_tick;
2296 2258
2297 2259 ASSERT(diff_tick >= 0);
2298 2260
2299 2261 if (diff_tod < 0) {
2300 2262 /* ERROR - tod reversed */
2301 2263 tod_bad = TOD_REVERSED;
2302 2264 off = (int)(prev_tod - tod);
2303 2265 } else if (diff_tod == 0) {
2304 2266 /* tod did not advance */
2305 2267 if (diff_tick > TOD_STALL_THRESHOLD) {
2306 2268 /* ERROR - tod stalled */
2307 2269 tod_bad = TOD_STALLED;
2308 2270 } else {
2309 2271 /*
2310 2272 * Make sure we don't update prev_tick
2311 2273 * so that diff_tick is calculated since
2312 2274 * the first diff_tod == 0
2313 2275 */
2314 2276 return (tod);
2315 2277 }
2316 2278 } else {
2317 2279 /* calculate dtick */
2318 2280 dtick = diff_tick / diff_tod;
2319 2281
2320 2282 /* update dtick averages */
2321 2283 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
2322 2284
2323 2285 /*
2324 2286 * Calculate dtick_delta as
2325 2287 * variation from reference freq in quartiles
2326 2288 */
2327 2289 dtick_delta = (dtick_avg - TOD_REF_FREQ) /
2328 2290 (TOD_REF_FREQ >> 2);
2329 2291
2330 2292 /*
2331 2293 * Even with a perfectly functioning TOD device,
2332 2294 * when the number of elapsed seconds is low the
2333 2295 * algorithm can calculate a rate that is beyond
2334 2296 * tolerance, causing an error. The algorithm is
2335 2297 * inaccurate when elapsed time is low (less than
2336 2298 * 5 seconds).
2337 2299 */
2338 2300 if (diff_tod > 4) {
2339 2301 if (dtick < TOD_JUMP_THRESHOLD) {
2340 2302 /*
2341 2303 * If we've just done a CPR resume, we detect
2342 2304 * a jump in the TOD but, actually, what's
2343 2305 * happened is that the TOD has been increasing
2344 2306 * whilst the system was suspended and the tick
2345 2307 * count hasn't kept up. We consider the first
2346 2308 * occurrence of this after a resume as normal
2347 2309 * and ignore it; otherwise, in a non-resume
2348 2310 * case, we regard it as a TOD problem.
2349 2311 */
2350 2312 if (!cpr_resume_done) {
2351 2313 /* ERROR - tod jumped */
2352 2314 tod_bad = TOD_JUMPED;
2353 2315 off = (int)diff_tod;
2354 2316 }
2355 2317 }
2356 2318 if (dtick_delta) {
2357 2319 /*
2358 2320 * If we've just done a DR resume, dtick_avg
2359 2321 * can go a bit askew so we reset it and carry
2360 2322 * on; otherwise, the TOD is in error.
2361 2323 */
2362 2324 if (dr_resume_done) {
2363 2325 dtick_avg = TOD_REF_FREQ;
2364 2326 } else {
2365 2327 /* ERROR - change in clock rate */
2366 2328 tod_bad = TOD_RATECHANGED;
2367 2329 }
2368 2330 }
2369 2331 }
2370 2332 }
2371 2333
2372 2334 if (tod_bad != TOD_NOFAULT) {
2373 2335 (void) tod_fault(tod_bad, off);
2374 2336
2375 2337 /*
2376 2338 * Disable dosynctodr since we are going to fault
2377 2339 * the TOD chip anyway here
2378 2340 */
2379 2341 dosynctodr = 0;
2380 2342
2381 2343 /*
2382 2344 * Set tod to the correct value from hrestime
2383 2345 */
2384 2346 tod = hrestime.tv_sec;
2385 2347 }
2386 2348
2387 2349 prev_tod = tod;
2388 2350 prev_tick = tick;
2389 2351 return (tod);
2390 2352 }
2391 2353
2392 2354 static void
2393 2355 calcloadavg(int nrun, uint64_t *hp_ave)
2394 2356 {
2395 2357 static int64_t f[3] = { 135, 27, 9 };
2396 2358 uint_t i;
2397 2359 int64_t q, r;
2398 2360
2399 2361 /*
2400 2362 * Compute load average over the last 1, 5, and 15 minutes
2401 2363 * (60, 300, and 900 seconds). The constants in f[3] are for
2402 2364 * exponential decay:
2403 2365 * (1 - exp(-1/60)) << 13 = 135,
2404 2366 * (1 - exp(-1/300)) << 13 = 27,
2405 2367 * (1 - exp(-1/900)) << 13 = 9.
2406 2368 */
2407 2369
2408 2370 /*
2409 2371 * a little hoop-jumping to avoid integer overflow
2410 2372 */
2411 2373 for (i = 0; i < 3; i++) {
2412 2374 q = (hp_ave[i] >> 16) << 7;
2413 2375 r = (hp_ave[i] & 0xffff) << 7;
2414 2376 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
2415 2377 }
2416 2378 }
2417 2379
2418 2380 /*
2419 2381 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2420 2382 * calculate the value of lbolt according to the current mode. In the event
2421 2383 * driven mode (the default), lbolt is calculated by dividing the current hires
2422 2384 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2423 2385 * an internal variable is incremented at each firing of the lbolt cyclic
2424 2386 * and returned by lbolt_cyclic_driven().
2425 2387 *
2426 2388 * The system will transition from event to cyclic driven mode when the number
2427 2389 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2428 2390 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2429 2391 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2430 2392 * causing enough activity to cross the thresholds.
2431 2393 */
2432 2394 int64_t
2433 2395 lbolt_bootstrap(void)
2434 2396 {
2435 2397 return (0);
2436 2398 }
2437 2399
2438 2400 /* ARGSUSED */
2439 2401 uint_t
2440 2402 lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
2441 2403 {
2442 2404 hrtime_t ts, exp;
2443 2405 int ret;
2444 2406
2445 2407 ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
2446 2408
2447 2409 kpreempt_disable();
2448 2410
2449 2411 ts = gethrtime();
2450 2412 lb_info->lbi_internal = (ts/nsec_per_tick);
2451 2413
2452 2414 /*
2453 2415 * Align the next expiration to a clock tick boundary.
2454 2416 */
2455 2417 exp = ts + nsec_per_tick - 1;
2456 2418 exp = (exp/nsec_per_tick) * nsec_per_tick;
2457 2419
2458 2420 ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp);
2459 2421 ASSERT(ret);
2460 2422
2461 2423 lbolt_hybrid = lbolt_cyclic_driven;
2462 2424 lb_info->lbi_cyc_deactivate = B_FALSE;
2463 2425 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2464 2426
2465 2427 kpreempt_enable();
2466 2428
2467 2429 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2468 2430 ASSERT(ret == 0);
2469 2431
2470 2432 return (1);
2471 2433 }
2472 2434
2473 2435 int64_t
2474 2436 lbolt_event_driven(void)
2475 2437 {
2476 2438 hrtime_t ts;
2477 2439 int64_t lb;
2478 2440 int ret, cpu = CPU->cpu_seqid;
2479 2441
2480 2442 ts = gethrtime();
2481 2443 ASSERT(ts > 0);
2482 2444
2483 2445 ASSERT(nsec_per_tick > 0);
2484 2446 lb = (ts/nsec_per_tick);
2485 2447
2486 2448 /*
2487 2449 * Switch to cyclic mode if the number of calls to this routine
2488 2450 * has reached the threshold within the interval.
2489 2451 */
2490 2452 if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
2491 2453
2492 2454 if (--lb_cpu[cpu].lbc_counter == 0) {
2493 2455 /*
2494 2456 * Reached the threshold within the interval, reset
2495 2457 * the usage statistics.
2496 2458 */
2497 2459 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2498 2460 lb_cpu[cpu].lbc_cnt_start = lb;
2499 2461
2500 2462 /*
2501 2463 * Make sure only one thread reprograms the
2502 2464 * lbolt cyclic and changes the mode.
2503 2465 */
2504 2466 if (panicstr == NULL &&
2505 2467 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2506 2468
2507 2469 if (lbolt_hybrid == lbolt_cyclic_driven) {
2508 2470 ret = atomic_dec_32_nv(
2509 2471 &lb_info->lbi_token);
2510 2472 ASSERT(ret == 0);
2511 2473 } else {
2512 2474 lbolt_softint_post();
2513 2475 }
2514 2476 }
2515 2477 }
2516 2478 } else {
2517 2479 /*
2518 2480 * Exceeded the interval, reset the usage statistics.
2519 2481 */
2520 2482 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2521 2483 lb_cpu[cpu].lbc_cnt_start = lb;
2522 2484 }
2523 2485
2524 2486 ASSERT(lb >= lb_info->lbi_debug_time);
2525 2487
2526 2488 return (lb - lb_info->lbi_debug_time);
2527 2489 }
2528 2490
2529 2491 int64_t
2530 2492 lbolt_cyclic_driven(void)
2531 2493 {
2532 2494 int64_t lb = lb_info->lbi_internal;
2533 2495 int cpu;
2534 2496
2535 2497 /*
2536 2498 * If a CPU has already prevented the lbolt cyclic from deactivating
2537 2499 * itself, don't bother tracking the usage. Otherwise check if we're
2538 2500 * within the interval and how the per CPU counter is doing.
2539 2501 */
2540 2502 if (lb_info->lbi_cyc_deactivate) {
2541 2503 cpu = CPU->cpu_seqid;
2542 2504 if ((lb - lb_cpu[cpu].lbc_cnt_start) <
2543 2505 lb_info->lbi_thresh_interval) {
2544 2506
2545 2507 if (lb_cpu[cpu].lbc_counter == 0)
2546 2508 /*
2547 2509 * Reached the threshold within the interval,
2548 2510 * prevent the lbolt cyclic from turning itself
2549 2511 * off.
2550 2512 */
2551 2513 lb_info->lbi_cyc_deactivate = B_FALSE;
2552 2514 else
2553 2515 lb_cpu[cpu].lbc_counter--;
2554 2516 } else {
2555 2517 /*
2556 2518 * Only reset the usage statistics when we have
2557 2519 * exceeded the interval.
2558 2520 */
2559 2521 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2560 2522 lb_cpu[cpu].lbc_cnt_start = lb;
2561 2523 }
2562 2524 }
2563 2525
2564 2526 ASSERT(lb >= lb_info->lbi_debug_time);
2565 2527
2566 2528 return (lb - lb_info->lbi_debug_time);
2567 2529 }
2568 2530
2569 2531 /*
2570 2532 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2571 2533 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2572 2534 * It is inactive by default, and will be activated when switching from event
2573 2535 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2574 2536 * by lbolt_cyclic_driven().
2575 2537 */
2576 2538 static void
2577 2539 lbolt_cyclic(void)
2578 2540 {
2579 2541 int ret;
2580 2542
2581 2543 lb_info->lbi_internal++;
2582 2544
2583 2545 if (!lbolt_cyc_only) {
2584 2546
2585 2547 if (lb_info->lbi_cyc_deactivate) {
2586 2548 /*
2587 2549 * Switching from cyclic to event driven mode.
2588 2550 */
2589 2551 if (panicstr == NULL &&
2590 2552 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2591 2553
2592 2554 if (lbolt_hybrid == lbolt_event_driven) {
2593 2555 ret = atomic_dec_32_nv(
2594 2556 &lb_info->lbi_token);
2595 2557 ASSERT(ret == 0);
2596 2558 return;
2597 2559 }
2598 2560
2599 2561 kpreempt_disable();
2600 2562
2601 2563 lbolt_hybrid = lbolt_event_driven;
2602 2564 ret = cyclic_reprogram(
2603 2565 lb_info->id.lbi_cyclic_id,
2604 2566 CY_INFINITY);
2605 2567 ASSERT(ret);
2606 2568
2607 2569 kpreempt_enable();
2608 2570
2609 2571 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2610 2572 ASSERT(ret == 0);
2611 2573 }
2612 2574 }
2613 2575
2614 2576 /*
2615 2577 * The lbolt cyclic should not try to deactivate itself before
2616 2578 * the sampling period has elapsed.
2617 2579 */
2618 2580 if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
2619 2581 lb_info->lbi_thresh_interval) {
2620 2582 lb_info->lbi_cyc_deactivate = B_TRUE;
2621 2583 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2622 2584 }
2623 2585 }
2624 2586 }
2625 2587
2626 2588 /*
2627 2589 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2628 2590 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2629 2591 * called by the KDI system claim callbacks to record a hires timestamp at
2630 2592 * debug enter time. lbolt_debug_return() is called by the sistem release
2631 2593 * callbacks to account for the time spent in the debugger. The value is then
2632 2594 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2633 2595 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2634 2596 */
2635 2597 void
2636 2598 lbolt_debug_entry(void)
2637 2599 {
2638 2600 if (lbolt_hybrid != lbolt_bootstrap) {
2639 2601 ASSERT(lb_info != NULL);
2640 2602 lb_info->lbi_debug_ts = gethrtime();
2641 2603 }
2642 2604 }
2643 2605
2644 2606 /*
2645 2607 * Calculate the time spent in the debugger and add it to the lbolt info
2646 2608 * structure. We also update the internal lbolt value in case we were in
2647 2609 * cyclic driven mode going in.
2648 2610 */
2649 2611 void
2650 2612 lbolt_debug_return(void)
2651 2613 {
2652 2614 hrtime_t ts;
2653 2615
2654 2616 if (lbolt_hybrid != lbolt_bootstrap) {
2655 2617 ASSERT(lb_info != NULL);
2656 2618 ASSERT(nsec_per_tick > 0);
2657 2619
2658 2620 ts = gethrtime();
2659 2621 lb_info->lbi_internal = (ts/nsec_per_tick);
2660 2622 lb_info->lbi_debug_time +=
2661 2623 ((ts - lb_info->lbi_debug_ts)/nsec_per_tick);
2662 2624
2663 2625 lb_info->lbi_debug_ts = 0;
2664 2626 }
2665 2627 }
↓ open down ↓ |
1713 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX