Print this page
XXXX pass in cpu_pause_func via pause_cpus


  73 extern int pm_powering_down;
  74 extern kmutex_t srn_clone_lock;
  75 extern int srn_inuse;
  76 
  77 static int cpr_suspend(int);
  78 static int cpr_resume(int);
  79 static void cpr_suspend_init(int);
  80 #if defined(__x86)
  81 static int cpr_suspend_cpus(void);
  82 static void cpr_resume_cpus(void);
  83 #endif
  84 static int cpr_all_online(void);
  85 static void cpr_restore_offline(void);
  86 
  87 cpr_time_t wholecycle_tv;
  88 int cpr_suspend_succeeded;
  89 pfn_t curthreadpfn;
  90 int curthreadremapped;
  91 
  92 extern cpuset_t cpu_ready_set;
  93 extern void *(*cpu_pause_func)(void *);
  94 
  95 extern processorid_t i_cpr_bootcpuid(void);
  96 extern cpu_t *i_cpr_bootcpu(void);
  97 extern void tsc_adjust_delta(hrtime_t tdelta);
  98 extern void tsc_resume(void);
  99 extern int tsc_resume_in_cyclic;
 100 
 101 /*
 102  * Set this variable to 1, to have device drivers resume in an
 103  * uniprocessor environment. This is to allow drivers that assume
 104  * that they resume on a UP machine to continue to work. Should be
 105  * deprecated once the broken drivers are fixed
 106  */
 107 int cpr_resume_uniproc = 0;
 108 
 109 /*
 110  * save or restore abort_enable;  this prevents a drop
 111  * to kadb or prom during cpr_resume_devices() when
 112  * there is no kbd present;  see abort_sequence_enter()
 113  */


 399          * bring all the offline cpus online
 400          */
 401         if ((ret = cpr_all_online())) {
 402                 mutex_exit(&cpu_lock);
 403                 return (ret);
 404         }
 405 
 406         /*
 407          * Set the affinity to be the boot processor
 408          * This is cleared in either cpr_resume_cpus() or cpr_unpause_cpus()
 409          */
 410         affinity_set(i_cpr_bootcpuid());
 411 
 412         ASSERT(CPU->cpu_id == 0);
 413 
 414         PMD(PMD_SX, ("curthread running on bootcpu\n"))
 415 
 416         /*
 417          * pause all other running CPUs and save the CPU state at the sametime
 418          */
 419         cpu_pause_func = i_cpr_save_context;
 420         pause_cpus(NULL);
 421 
 422         mutex_exit(&cpu_lock);
 423 
 424         return (0);
 425 }
 426 
 427 /*
 428  * Take the system down to a checkpointable state and write
 429  * the state file, the following are sequentially executed:
 430  *
 431  *    - Request all user threads to stop themselves
 432  *    - push out and invalidate user pages
 433  *    - bring statefile inode incore to prevent a miss later
 434  *    - request all daemons to stop
 435  *    - check and make sure all threads are stopped
 436  *    - sync the file system
 437  *    - suspend all devices
 438  *    - block intrpts
 439  *    - dump system state and memory to state file
 440  *    - SPARC code will not be called with CPR_TORAM, caller filters


 775         /*
 776          * Initialize our syscall handlers
 777          */
 778         init_cpu_syscall(CPU);
 779 
 780 #endif
 781 
 782         i_cpr_pre_resume_cpus();
 783 
 784         /*
 785          * Restart the paused cpus
 786          */
 787         mutex_enter(&cpu_lock);
 788         start_cpus();
 789         mutex_exit(&cpu_lock);
 790 
 791         i_cpr_post_resume_cpus();
 792 
 793         mutex_enter(&cpu_lock);
 794         /*
 795          * Restore this cpu to use the regular cpu_pause(), so that
 796          * online and offline will work correctly
 797          */
 798         cpu_pause_func = NULL;
 799 
 800         /*
 801          * clear the affinity set in cpr_suspend_cpus()
 802          */
 803         affinity_clear();
 804 
 805         /*
 806          * offline all the cpus that were brought online during suspend
 807          */
 808         cpr_restore_offline();
 809 
 810         mutex_exit(&cpu_lock);
 811 }
 812 
 813 void
 814 cpr_unpause_cpus(void)
 815 {
 816         /*
 817          * Now restore the system back to what it was before we suspended
 818          */
 819 
 820         PMD(PMD_SX, ("cpr_unpause_cpus: restoring system\n"))
 821 
 822         mutex_enter(&cpu_lock);
 823 
 824         /*
 825          * Restore this cpu to use the regular cpu_pause(), so that
 826          * online and offline will work correctly
 827          */
 828         cpu_pause_func = NULL;
 829 
 830         /*
 831          * Restart the paused cpus
 832          */
 833         start_cpus();
 834 
 835         /*
 836          * clear the affinity set in cpr_suspend_cpus()
 837          */
 838         affinity_clear();
 839 
 840         /*
 841          * offline all the cpus that were brought online during suspend
 842          */
 843         cpr_restore_offline();
 844 
 845         mutex_exit(&cpu_lock);
 846 }
 847 
 848 /*
 849  * Bring the system back up from a checkpoint, at this point


1077         modunload_disable();
1078         PMD(PMD_SX, ("cpr_resume: start kernel threads\n"))
1079         cpr_start_kernel_threads();
1080 
1081 rb_suspend_devices:
1082         CPR_DEBUG(CPR_DEBUG1, "resuming devices...");
1083         CPR_STAT_EVENT_START("  start drivers");
1084 
1085         PMD(PMD_SX,
1086             ("cpr_resume: rb_suspend_devices: cpr_resume_uniproc = %d\n",
1087             cpr_resume_uniproc))
1088 
1089 #if defined(__x86)
1090         /*
1091          * If cpr_resume_uniproc is set, then pause all the other cpus
1092          * apart from the current cpu, so that broken drivers that think
1093          * that they are on a uniprocessor machine will resume
1094          */
1095         if (cpr_resume_uniproc) {
1096                 mutex_enter(&cpu_lock);
1097                 pause_cpus(NULL);
1098                 mutex_exit(&cpu_lock);
1099         }
1100 #endif
1101 
1102         /*
1103          * The policy here is to continue resume everything we can if we did
1104          * not successfully finish suspend; and panic if we are coming back
1105          * from a fully suspended system.
1106          */
1107         PMD(PMD_SX, ("cpr_resume: resume devices\n"))
1108         rc = cpr_resume_devices(ddi_root_node(), 0);
1109 
1110         cpr_sae(0);
1111 
1112         str = "Failed to resume one or more devices.";
1113 
1114         if (rc) {
1115                 if (CPR->c_substate == C_ST_DUMP ||
1116                     (sleeptype == CPR_TORAM &&
1117                     CPR->c_substate == C_ST_NODUMP)) {




  73 extern int pm_powering_down;
  74 extern kmutex_t srn_clone_lock;
  75 extern int srn_inuse;
  76 
  77 static int cpr_suspend(int);
  78 static int cpr_resume(int);
  79 static void cpr_suspend_init(int);
  80 #if defined(__x86)
  81 static int cpr_suspend_cpus(void);
  82 static void cpr_resume_cpus(void);
  83 #endif
  84 static int cpr_all_online(void);
  85 static void cpr_restore_offline(void);
  86 
  87 cpr_time_t wholecycle_tv;
  88 int cpr_suspend_succeeded;
  89 pfn_t curthreadpfn;
  90 int curthreadremapped;
  91 
  92 extern cpuset_t cpu_ready_set;

  93 
  94 extern processorid_t i_cpr_bootcpuid(void);
  95 extern cpu_t *i_cpr_bootcpu(void);
  96 extern void tsc_adjust_delta(hrtime_t tdelta);
  97 extern void tsc_resume(void);
  98 extern int tsc_resume_in_cyclic;
  99 
 100 /*
 101  * Set this variable to 1, to have device drivers resume in an
 102  * uniprocessor environment. This is to allow drivers that assume
 103  * that they resume on a UP machine to continue to work. Should be
 104  * deprecated once the broken drivers are fixed
 105  */
 106 int cpr_resume_uniproc = 0;
 107 
 108 /*
 109  * save or restore abort_enable;  this prevents a drop
 110  * to kadb or prom during cpr_resume_devices() when
 111  * there is no kbd present;  see abort_sequence_enter()
 112  */


 398          * bring all the offline cpus online
 399          */
 400         if ((ret = cpr_all_online())) {
 401                 mutex_exit(&cpu_lock);
 402                 return (ret);
 403         }
 404 
 405         /*
 406          * Set the affinity to be the boot processor
 407          * This is cleared in either cpr_resume_cpus() or cpr_unpause_cpus()
 408          */
 409         affinity_set(i_cpr_bootcpuid());
 410 
 411         ASSERT(CPU->cpu_id == 0);
 412 
 413         PMD(PMD_SX, ("curthread running on bootcpu\n"))
 414 
 415         /*
 416          * pause all other running CPUs and save the CPU state at the sametime
 417          */
 418         pause_cpus(NULL, i_cpr_save_context);

 419 
 420         mutex_exit(&cpu_lock);
 421 
 422         return (0);
 423 }
 424 
 425 /*
 426  * Take the system down to a checkpointable state and write
 427  * the state file, the following are sequentially executed:
 428  *
 429  *    - Request all user threads to stop themselves
 430  *    - push out and invalidate user pages
 431  *    - bring statefile inode incore to prevent a miss later
 432  *    - request all daemons to stop
 433  *    - check and make sure all threads are stopped
 434  *    - sync the file system
 435  *    - suspend all devices
 436  *    - block intrpts
 437  *    - dump system state and memory to state file
 438  *    - SPARC code will not be called with CPR_TORAM, caller filters


 773         /*
 774          * Initialize our syscall handlers
 775          */
 776         init_cpu_syscall(CPU);
 777 
 778 #endif
 779 
 780         i_cpr_pre_resume_cpus();
 781 
 782         /*
 783          * Restart the paused cpus
 784          */
 785         mutex_enter(&cpu_lock);
 786         start_cpus();
 787         mutex_exit(&cpu_lock);
 788 
 789         i_cpr_post_resume_cpus();
 790 
 791         mutex_enter(&cpu_lock);
 792         /*






 793          * clear the affinity set in cpr_suspend_cpus()
 794          */
 795         affinity_clear();
 796 
 797         /*
 798          * offline all the cpus that were brought online during suspend
 799          */
 800         cpr_restore_offline();
 801 
 802         mutex_exit(&cpu_lock);
 803 }
 804 
 805 void
 806 cpr_unpause_cpus(void)
 807 {
 808         /*
 809          * Now restore the system back to what it was before we suspended
 810          */
 811 
 812         PMD(PMD_SX, ("cpr_unpause_cpus: restoring system\n"))
 813 
 814         mutex_enter(&cpu_lock);







 815         /*
 816          * Restart the paused cpus
 817          */
 818         start_cpus();
 819 
 820         /*
 821          * clear the affinity set in cpr_suspend_cpus()
 822          */
 823         affinity_clear();
 824 
 825         /*
 826          * offline all the cpus that were brought online during suspend
 827          */
 828         cpr_restore_offline();
 829 
 830         mutex_exit(&cpu_lock);
 831 }
 832 
 833 /*
 834  * Bring the system back up from a checkpoint, at this point


1062         modunload_disable();
1063         PMD(PMD_SX, ("cpr_resume: start kernel threads\n"))
1064         cpr_start_kernel_threads();
1065 
1066 rb_suspend_devices:
1067         CPR_DEBUG(CPR_DEBUG1, "resuming devices...");
1068         CPR_STAT_EVENT_START("  start drivers");
1069 
1070         PMD(PMD_SX,
1071             ("cpr_resume: rb_suspend_devices: cpr_resume_uniproc = %d\n",
1072             cpr_resume_uniproc))
1073 
1074 #if defined(__x86)
1075         /*
1076          * If cpr_resume_uniproc is set, then pause all the other cpus
1077          * apart from the current cpu, so that broken drivers that think
1078          * that they are on a uniprocessor machine will resume
1079          */
1080         if (cpr_resume_uniproc) {
1081                 mutex_enter(&cpu_lock);
1082                 pause_cpus(NULL, NULL);
1083                 mutex_exit(&cpu_lock);
1084         }
1085 #endif
1086 
1087         /*
1088          * The policy here is to continue resume everything we can if we did
1089          * not successfully finish suspend; and panic if we are coming back
1090          * from a fully suspended system.
1091          */
1092         PMD(PMD_SX, ("cpr_resume: resume devices\n"))
1093         rc = cpr_resume_devices(ddi_root_node(), 0);
1094 
1095         cpr_sae(0);
1096 
1097         str = "Failed to resume one or more devices.";
1098 
1099         if (rc) {
1100                 if (CPR->c_substate == C_ST_DUMP ||
1101                     (sleeptype == CPR_TORAM &&
1102                     CPR->c_substate == C_ST_NODUMP)) {