Print this page
6583 remove whole-process swapping

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/fss.c
          +++ new/usr/src/uts/common/disp/fss.c
↓ open down ↓ 341 lines elided ↑ open up ↑
 342  342  static pri_t    fss_minglobpri; /* minimum global priority */
 343  343  
 344  344  static fssproc_t fss_listhead[FSS_LISTS];
 345  345  static kmutex_t fss_listlock[FSS_LISTS];
 346  346  
 347  347  static fsspset_t *fsspsets;
 348  348  static kmutex_t fsspsets_lock;  /* protects fsspsets */
 349  349  
 350  350  static id_t     fss_cid;
 351  351  
 352      -static time_t   fss_minrun = 2; /* t_pri becomes 59 within 2 secs */
 353      -static time_t   fss_minslp = 2; /* min time on sleep queue for hardswap */
 354  352  static int      fss_quantum = 11;
 355  353  
 356  354  static void     fss_newpri(fssproc_t *, boolean_t);
 357  355  static void     fss_update(void *);
 358  356  static int      fss_update_list(int);
 359  357  static void     fss_change_priority(kthread_t *, fssproc_t *);
 360  358  
 361  359  static int      fss_admin(caddr_t, cred_t *);
 362  360  static int      fss_getclinfo(void *);
 363  361  static int      fss_parmsin(void *);
↓ open down ↓ 8 lines elided ↑ open up ↑
 372  370  static void     fss_exitclass(void *);
 373  371  static int      fss_canexit(kthread_t *, cred_t *);
 374  372  static int      fss_fork(kthread_t *, kthread_t *, void *);
 375  373  static void     fss_forkret(kthread_t *, kthread_t *);
 376  374  static void     fss_parmsget(kthread_t *, void *);
 377  375  static int      fss_parmsset(kthread_t *, void *, id_t, cred_t *);
 378  376  static void     fss_stop(kthread_t *, int, int);
 379  377  static void     fss_exit(kthread_t *);
 380  378  static void     fss_active(kthread_t *);
 381  379  static void     fss_inactive(kthread_t *);
 382      -static pri_t    fss_swapin(kthread_t *, int);
 383      -static pri_t    fss_swapout(kthread_t *, int);
 384  380  static void     fss_trapret(kthread_t *);
 385  381  static void     fss_preempt(kthread_t *);
 386  382  static void     fss_setrun(kthread_t *);
 387  383  static void     fss_sleep(kthread_t *);
 388  384  static void     fss_tick(kthread_t *);
 389  385  static void     fss_wakeup(kthread_t *);
 390  386  static int      fss_donice(kthread_t *, cred_t *, int, int *);
 391  387  static int      fss_doprio(kthread_t *, cred_t *, int, int *);
 392  388  static pri_t    fss_globpri(kthread_t *);
 393  389  static void     fss_yield(kthread_t *);
↓ open down ↓ 16 lines elided ↑ open up ↑
 410  406          fss_exitclass,
 411  407          fss_canexit,
 412  408          fss_fork,
 413  409          fss_forkret,
 414  410          fss_parmsget,
 415  411          fss_parmsset,
 416  412          fss_stop,
 417  413          fss_exit,
 418  414          fss_active,
 419  415          fss_inactive,
 420      -        fss_swapin,
 421      -        fss_swapout,
 422  416          fss_trapret,
 423  417          fss_preempt,
 424  418          fss_setrun,
 425  419          fss_sleep,
 426  420          fss_tick,
 427  421          fss_wakeup,
 428  422          fss_donice,
 429  423          fss_globpri,
 430  424          fss_nullsys,    /* set_process_group */
 431  425          fss_yield,
↓ open down ↓ 1701 lines elided ↑ open up ↑
2133 2127                  thread_unlock(t);
2134 2128          }
2135 2129  }
2136 2130  
2137 2131  static void
2138 2132  fss_nullsys()
2139 2133  {
2140 2134  }
2141 2135  
2142 2136  /*
2143      - * fss_swapin() returns -1 if the thread is loaded or is not eligible to be
2144      - * swapped in. Otherwise, it returns the thread's effective priority based
2145      - * on swapout time and size of process (0 <= epri <= 0 SHRT_MAX).
2146      - */
2147      -/*ARGSUSED*/
2148      -static pri_t
2149      -fss_swapin(kthread_t *t, int flags)
2150      -{
2151      -        fssproc_t *fssproc = FSSPROC(t);
2152      -        long epri = -1;
2153      -        proc_t *pp = ttoproc(t);
2154      -
2155      -        ASSERT(THREAD_LOCK_HELD(t));
2156      -
2157      -        if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
2158      -                time_t swapout_time;
2159      -
2160      -                swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
2161      -                if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
2162      -                        epri = (long)DISP_PRIO(t) + swapout_time;
2163      -                } else {
2164      -                        /*
2165      -                         * Threads which have been out for a long time,
2166      -                         * have high user mode priority and are associated
2167      -                         * with a small address space are more deserving.
2168      -                         */
2169      -                        epri = fssproc->fss_umdpri;
2170      -                        ASSERT(epri >= 0 && epri <= fss_maxumdpri);
2171      -                        epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
2172      -                }
2173      -                /*
2174      -                 * Scale epri so that SHRT_MAX / 2 represents zero priority.
2175      -                 */
2176      -                epri += SHRT_MAX / 2;
2177      -                if (epri < 0)
2178      -                        epri = 0;
2179      -                else if (epri > SHRT_MAX)
2180      -                        epri = SHRT_MAX;
2181      -        }
2182      -        return ((pri_t)epri);
2183      -}
2184      -
2185      -/*
2186      - * fss_swapout() returns -1 if the thread isn't loaded or is not eligible to
2187      - * be swapped out. Otherwise, it returns the thread's effective priority
2188      - * based on if the swapper is in softswap or hardswap mode.
2189      - */
2190      -static pri_t
2191      -fss_swapout(kthread_t *t, int flags)
2192      -{
2193      -        fssproc_t *fssproc = FSSPROC(t);
2194      -        long epri = -1;
2195      -        proc_t *pp = ttoproc(t);
2196      -        time_t swapin_time;
2197      -
2198      -        ASSERT(THREAD_LOCK_HELD(t));
2199      -
2200      -        if (INHERITED(t) ||
2201      -            (fssproc->fss_flags & FSSKPRI) ||
2202      -            (t->t_proc_flag & TP_LWPEXIT) ||
2203      -            (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
2204      -            !(t->t_schedflag & TS_LOAD) ||
2205      -            !(SWAP_OK(t)))
2206      -                return (-1);
2207      -
2208      -        ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
2209      -
2210      -        swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
2211      -
2212      -        if (flags == SOFTSWAP) {
2213      -                if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
2214      -                        epri = 0;
2215      -                } else {
2216      -                        return ((pri_t)epri);
2217      -                }
2218      -        } else {
2219      -                pri_t pri;
2220      -
2221      -                if ((t->t_state == TS_SLEEP && swapin_time > fss_minslp) ||
2222      -                    (t->t_state == TS_RUN && swapin_time > fss_minrun)) {
2223      -                        pri = fss_maxumdpri;
2224      -                        epri = swapin_time -
2225      -                            (rm_asrss(pp->p_as) / nz(maxpgio)/2) - (long)pri;
2226      -                } else {
2227      -                        return ((pri_t)epri);
2228      -                }
2229      -        }
2230      -
2231      -        /*
2232      -         * Scale epri so that SHRT_MAX / 2 represents zero priority.
2233      -         */
2234      -        epri += SHRT_MAX / 2;
2235      -        if (epri < 0)
2236      -                epri = 0;
2237      -        else if (epri > SHRT_MAX)
2238      -                epri = SHRT_MAX;
2239      -
2240      -        return ((pri_t)epri);
2241      -}
2242      -
2243      -/*
2244 2137   * If thread is currently at a kernel mode priority (has slept) and is
2245 2138   * returning to the userland we assign it the appropriate user mode priority
2246 2139   * and time quantum here.  If we're lowering the thread's priority below that
2247 2140   * of other runnable threads then we will set runrun via cpu_surrender() to
2248 2141   * cause preemption.
2249 2142   */
2250 2143  static void
2251 2144  fss_trapret(kthread_t *t)
2252 2145  {
2253 2146          fssproc_t *fssproc = FSSPROC(t);
↓ open down ↓ 10 lines elided ↑ open up ↑
2264 2157                   * If thread has blocked in the kernel
2265 2158                   */
2266 2159                  THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2267 2160                  cp->cpu_dispatch_pri = DISP_PRIO(t);
2268 2161                  ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
2269 2162                  fssproc->fss_flags &= ~FSSKPRI;
2270 2163  
2271 2164                  if (DISP_MUST_SURRENDER(t))
2272 2165                          cpu_surrender(t);
2273 2166          }
2274      -
2275      -        /*
2276      -         * Swapout lwp if the swapper is waiting for this thread to reach
2277      -         * a safe point.
2278      -         */
2279      -        if (t->t_schedflag & TS_SWAPENQ) {
2280      -                thread_unlock(t);
2281      -                swapout_lwp(ttolwp(t));
2282      -                thread_lock(t);
2283      -        }
2284 2167  }
2285 2168  
2286 2169  /*
2287 2170   * Arrange for thread to be placed in appropriate location on dispatcher queue.
2288 2171   * This is called with the current thread in TS_ONPROC and locked.
2289 2172   */
2290 2173  static void
2291 2174  fss_preempt(kthread_t *t)
2292 2175  {
2293 2176          fssproc_t *fssproc = FSSPROC(t);
↓ open down ↓ 24 lines elided ↑ open up ↑
2318 2201           */
2319 2202          if (CPUCAPS_ON()) {
2320 2203                  (void) cpucaps_charge(t, &fssproc->fss_caps,
2321 2204                      CPUCAPS_CHARGE_ENFORCE);
2322 2205  
2323 2206                  if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
2324 2207                          return;
2325 2208          }
2326 2209  
2327 2210          /*
2328      -         * If preempted in user-land mark the thread as swappable because it
2329      -         * cannot be holding any kernel locks.
2330      -         */
2331      -        ASSERT(t->t_schedflag & TS_DONT_SWAP);
2332      -        if (lwp != NULL && lwp->lwp_state == LWP_USER)
2333      -                t->t_schedflag &= ~TS_DONT_SWAP;
2334      -
2335      -        /*
2336 2211           * Check to see if we're doing "preemption control" here.  If
2337 2212           * we are, and if the user has requested that this thread not
2338 2213           * be preempted, and if preemptions haven't been put off for
2339 2214           * too long, let the preemption happen here but try to make
2340 2215           * sure the thread is rescheduled as soon as possible.  We do
2341 2216           * this by putting it on the front of the highest priority run
2342 2217           * queue in the FSS class.  If the preemption has been put off
2343 2218           * for too long, clear the "nopreempt" bit and let the thread
2344 2219           * be preempted.
2345 2220           */
↓ open down ↓ 3 lines elided ↑ open up ↑
2349 2224                          if (!(fssproc->fss_flags & FSSKPRI)) {
2350 2225                                  /*
2351 2226                                   * If not already remembered, remember current
2352 2227                                   * priority for restoration in fss_yield().
2353 2228                                   */
2354 2229                                  if (!(fssproc->fss_flags & FSSRESTORE)) {
2355 2230                                          fssproc->fss_scpri = t->t_pri;
2356 2231                                          fssproc->fss_flags |= FSSRESTORE;
2357 2232                                  }
2358 2233                                  THREAD_CHANGE_PRI(t, fss_maxumdpri);
2359      -                                t->t_schedflag |= TS_DONT_SWAP;
2360 2234                          }
2361 2235                          schedctl_set_yield(t, 1);
2362 2236                          setfrontdq(t);
2363 2237                          return;
2364 2238                  } else {
2365 2239                          if (fssproc->fss_flags & FSSRESTORE) {
2366 2240                                  THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
2367 2241                                  fssproc->fss_flags &= ~FSSRESTORE;
2368 2242                          }
2369 2243                          schedctl_set_nopreempt(t, 0);
↓ open down ↓ 85 lines elided ↑ open up ↑
2455 2329                   * The thread has done a THREAD_KPRI_REQUEST(), slept, then
2456 2330                   * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
2457 2331                   * then slept again all without finishing the current system
2458 2332                   * call so trapret won't have cleared FSSKPRI
2459 2333                   */
2460 2334                  fssproc->fss_flags &= ~FSSKPRI;
2461 2335                  THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2462 2336                  if (DISP_MUST_SURRENDER(curthread))
2463 2337                          cpu_surrender(t);
2464 2338          }
2465      -        t->t_stime = ddi_get_lbolt();   /* time stamp for the swapper */
2466 2339  }
2467 2340  
2468 2341  /*
2469 2342   * A tick interrupt has ocurrend on a running thread. Check to see if our
2470      - * time slice has expired.  We must also clear the TS_DONT_SWAP flag in
2471      - * t_schedflag if the thread is eligible to be swapped out.
     2343 + * time slice has expired.
2472 2344   */
2473 2345  static void
2474 2346  fss_tick(kthread_t *t)
2475 2347  {
2476 2348          fssproc_t *fssproc;
2477 2349          fssproj_t *fssproj;
2478      -        klwp_t *lwp;
2479 2350          boolean_t call_cpu_surrender = B_FALSE;
2480 2351          boolean_t cpucaps_enforce = B_FALSE;
2481 2352  
2482 2353          ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
2483 2354  
2484 2355          /*
2485 2356           * It's safe to access fsspset and fssproj structures because we're
2486 2357           * holding our p_lock here.
2487 2358           */
2488 2359          thread_lock(t);
↓ open down ↓ 51 lines elided ↑ open up ↑
2540 2411                          new_pri = fssproc->fss_umdpri;
2541 2412                          ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2542 2413  
2543 2414                          /*
2544 2415                           * When the priority of a thread is changed, it may
2545 2416                           * be necessary to adjust its position on a sleep queue
2546 2417                           * or dispatch queue. The function thread_change_pri
2547 2418                           * accomplishes this.
2548 2419                           */
2549 2420                          if (thread_change_pri(t, new_pri, 0)) {
2550      -                                if ((t->t_schedflag & TS_LOAD) &&
2551      -                                    (lwp = t->t_lwp) &&
2552      -                                    lwp->lwp_state == LWP_USER)
2553      -                                        t->t_schedflag &= ~TS_DONT_SWAP;
2554 2421                                  fssproc->fss_timeleft = fss_quantum;
2555 2422                          } else {
2556 2423                                  call_cpu_surrender = B_TRUE;
2557 2424                          }
2558 2425                  } else if (t->t_state == TS_ONPROC &&
2559 2426                      t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2560 2427                          /*
2561 2428                           * If there is a higher-priority thread which is
2562 2429                           * waiting for a processor, then thread surrenders
2563 2430                           * the processor.
↓ open down ↓ 43 lines elided ↑ open up ↑
2607 2474  static void
2608 2475  fss_wakeup(kthread_t *t)
2609 2476  {
2610 2477          fssproc_t *fssproc;
2611 2478  
2612 2479          ASSERT(THREAD_LOCK_HELD(t));
2613 2480          ASSERT(t->t_state == TS_SLEEP);
2614 2481  
2615 2482          fss_active(t);
2616 2483  
2617      -        t->t_stime = ddi_get_lbolt();           /* time stamp for the swapper */
2618 2484          fssproc = FSSPROC(t);
2619 2485          fssproc->fss_flags &= ~FSSBACKQ;
2620 2486  
2621 2487          if (fssproc->fss_flags & FSSKPRI) {
2622 2488                  /*
2623 2489                   * If we already have a kernel priority assigned, then we
2624 2490                   * just use it.
2625 2491                   */
2626 2492                  setbackdq(t);
2627 2493          } else if (t->t_kpri_req) {
↓ open down ↓ 364 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX