Print this page
5285 pass in cpu_pause_func via pause_cpus


 372                 if ((cp = cpu_get(id)) == NULL)
 373                         continue;
 374                 cpu_map_exec_units(cp);
 375         }
 376 
 377         /*
 378          * Re-calculate processor groups.
 379          *
 380          * First tear down all PG information before adding any new PG
 381          * information derived from the MD we just downloaded. We must
 382          * call pg_cpu_inactive and pg_cpu_active with CPUs paused and
 383          * we want to minimize the number of times pause_cpus is called.
 384          * Inactivating all CPUs would leave PGs without any active CPUs,
 385          * so while CPUs are paused, call pg_cpu_inactive and swap in the
 386          * bootstrap PG structure saving the original PG structure to be
 387          * fini'd afterwards. This prevents the dispatcher from encountering
 388          * PGs in which all CPUs are inactive. Offline CPUs are already
 389          * inactive in their PGs and shouldn't be reactivated, so we must
 390          * not call pg_cpu_inactive or pg_cpu_active for those CPUs.
 391          */
 392         pause_cpus(NULL);
 393         for (id = 0; id < NCPU; id++) {
 394                 if ((cp = cpu_get(id)) == NULL)
 395                         continue;
 396                 if ((cp->cpu_flags & CPU_OFFLINE) == 0)
 397                         pg_cpu_inactive(cp);
 398                 pgps[id] = cp->cpu_pg;
 399                 pg_cpu_bootstrap(cp);
 400         }
 401         start_cpus();
 402 
 403         /*
 404          * pg_cpu_fini* and pg_cpu_init* must be called while CPUs are
 405          * not paused. Use two separate loops here so that we do not
 406          * initialize PG data for CPUs until all the old PG data structures
 407          * are torn down.
 408          */
 409         for (id = 0; id < NCPU; id++) {
 410                 if ((cp = cpu_get(id)) == NULL)
 411                         continue;
 412                 pg_cpu_fini(cp, pgps[id]);
 413                 mpo_cpu_remove(id);
 414         }
 415 
 416         /*
 417          * Initialize PG data for each CPU, but leave the bootstrapped
 418          * PG structure in place to avoid running with any PGs containing
 419          * nothing but inactive CPUs.
 420          */
 421         for (id = 0; id < NCPU; id++) {
 422                 if ((cp = cpu_get(id)) == NULL)
 423                         continue;
 424                 mpo_cpu_add(mdp, id);
 425                 pgps[id] = pg_cpu_init(cp, B_TRUE);
 426         }
 427 
 428         /*
 429          * Now that PG data has been initialized for all CPUs in the
 430          * system, replace the bootstrapped PG structure with the
 431          * initialized PG structure and call pg_cpu_active for each CPU.
 432          */
 433         pause_cpus(NULL);
 434         for (id = 0; id < NCPU; id++) {
 435                 if ((cp = cpu_get(id)) == NULL)
 436                         continue;
 437                 cp->cpu_pg = pgps[id];
 438                 if ((cp->cpu_flags & CPU_OFFLINE) == 0)
 439                         pg_cpu_active(cp);
 440         }
 441         start_cpus();
 442 
 443         mutex_exit(&cpu_lock);
 444 
 445         (void) md_fini_handle(mdp);
 446 }
 447 
 448 /*
 449  * Wrapper for the Sun Cluster error decoding function.
 450  */
 451 static int
 452 cluster_error_decode(int error, char *error_reason, size_t max_reason_len)
 453 {


 598         uint64_t        rv;
 599         timestruc_t     source_tod;
 600         int             spl;
 601 
 602         ASSERT(suspend_supported());
 603         DBG("suspend: %s", __func__);
 604 
 605         sfmmu_ctxdoms_lock();
 606 
 607         mutex_enter(&cpu_lock);
 608 
 609         /* Suspend the watchdog */
 610         watchdog_suspend();
 611 
 612         /* Record the TOD */
 613         mutex_enter(&tod_lock);
 614         source_tod = tod_get();
 615         mutex_exit(&tod_lock);
 616 
 617         /* Pause all other CPUs */
 618         pause_cpus(NULL);
 619         DBG_PROM("suspend: CPUs paused\n");
 620 
 621         /* Suspend cyclics */
 622         cyclic_suspend();
 623         DBG_PROM("suspend: cyclics suspended\n");
 624 
 625         /* Disable interrupts */
 626         spl = spl8();
 627         DBG_PROM("suspend: spl8()\n");
 628 
 629         source_tick = gettick_counter();
 630         source_stick = gettick();
 631         DBG_PROM("suspend: source_tick: 0x%lx\n", source_tick);
 632         DBG_PROM("suspend: source_stick: 0x%lx\n", source_stick);
 633 
 634         /*
 635          * Call into the HV to initiate the suspend. hv_guest_suspend()
 636          * returns after the guest has been resumed or if the suspend
 637          * operation failed or was cancelled. After a successful suspend,
 638          * the %tick and %stick registers may have changed by an amount




 372                 if ((cp = cpu_get(id)) == NULL)
 373                         continue;
 374                 cpu_map_exec_units(cp);
 375         }
 376 
 377         /*
 378          * Re-calculate processor groups.
 379          *
 380          * First tear down all PG information before adding any new PG
 381          * information derived from the MD we just downloaded. We must
 382          * call pg_cpu_inactive and pg_cpu_active with CPUs paused and
 383          * we want to minimize the number of times pause_cpus is called.
 384          * Inactivating all CPUs would leave PGs without any active CPUs,
 385          * so while CPUs are paused, call pg_cpu_inactive and swap in the
 386          * bootstrap PG structure saving the original PG structure to be
 387          * fini'd afterwards. This prevents the dispatcher from encountering
 388          * PGs in which all CPUs are inactive. Offline CPUs are already
 389          * inactive in their PGs and shouldn't be reactivated, so we must
 390          * not call pg_cpu_inactive or pg_cpu_active for those CPUs.
 391          */
 392         pause_cpus(NULL, NULL);
 393         for (id = 0; id < NCPU; id++) {
 394                 if ((cp = cpu_get(id)) == NULL)
 395                         continue;
 396                 if ((cp->cpu_flags & CPU_OFFLINE) == 0)
 397                         pg_cpu_inactive(cp);
 398                 pgps[id] = cp->cpu_pg;
 399                 pg_cpu_bootstrap(cp);
 400         }
 401         start_cpus();
 402 
 403         /*
 404          * pg_cpu_fini* and pg_cpu_init* must be called while CPUs are
 405          * not paused. Use two separate loops here so that we do not
 406          * initialize PG data for CPUs until all the old PG data structures
 407          * are torn down.
 408          */
 409         for (id = 0; id < NCPU; id++) {
 410                 if ((cp = cpu_get(id)) == NULL)
 411                         continue;
 412                 pg_cpu_fini(cp, pgps[id]);
 413                 mpo_cpu_remove(id);
 414         }
 415 
 416         /*
 417          * Initialize PG data for each CPU, but leave the bootstrapped
 418          * PG structure in place to avoid running with any PGs containing
 419          * nothing but inactive CPUs.
 420          */
 421         for (id = 0; id < NCPU; id++) {
 422                 if ((cp = cpu_get(id)) == NULL)
 423                         continue;
 424                 mpo_cpu_add(mdp, id);
 425                 pgps[id] = pg_cpu_init(cp, B_TRUE);
 426         }
 427 
 428         /*
 429          * Now that PG data has been initialized for all CPUs in the
 430          * system, replace the bootstrapped PG structure with the
 431          * initialized PG structure and call pg_cpu_active for each CPU.
 432          */
 433         pause_cpus(NULL, NULL);
 434         for (id = 0; id < NCPU; id++) {
 435                 if ((cp = cpu_get(id)) == NULL)
 436                         continue;
 437                 cp->cpu_pg = pgps[id];
 438                 if ((cp->cpu_flags & CPU_OFFLINE) == 0)
 439                         pg_cpu_active(cp);
 440         }
 441         start_cpus();
 442 
 443         mutex_exit(&cpu_lock);
 444 
 445         (void) md_fini_handle(mdp);
 446 }
 447 
 448 /*
 449  * Wrapper for the Sun Cluster error decoding function.
 450  */
 451 static int
 452 cluster_error_decode(int error, char *error_reason, size_t max_reason_len)
 453 {


 598         uint64_t        rv;
 599         timestruc_t     source_tod;
 600         int             spl;
 601 
 602         ASSERT(suspend_supported());
 603         DBG("suspend: %s", __func__);
 604 
 605         sfmmu_ctxdoms_lock();
 606 
 607         mutex_enter(&cpu_lock);
 608 
 609         /* Suspend the watchdog */
 610         watchdog_suspend();
 611 
 612         /* Record the TOD */
 613         mutex_enter(&tod_lock);
 614         source_tod = tod_get();
 615         mutex_exit(&tod_lock);
 616 
 617         /* Pause all other CPUs */
 618         pause_cpus(NULL, NULL);
 619         DBG_PROM("suspend: CPUs paused\n");
 620 
 621         /* Suspend cyclics */
 622         cyclic_suspend();
 623         DBG_PROM("suspend: cyclics suspended\n");
 624 
 625         /* Disable interrupts */
 626         spl = spl8();
 627         DBG_PROM("suspend: spl8()\n");
 628 
 629         source_tick = gettick_counter();
 630         source_stick = gettick();
 631         DBG_PROM("suspend: source_tick: 0x%lx\n", source_tick);
 632         DBG_PROM("suspend: source_stick: 0x%lx\n", source_stick);
 633 
 634         /*
 635          * Call into the HV to initiate the suspend. hv_guest_suspend()
 636          * returns after the guest has been resumed or if the suspend
 637          * operation failed or was cancelled. After a successful suspend,
 638          * the %tick and %stick registers may have changed by an amount