diff options
author | Li Feng <li1.feng@intel.com> | 2022-02-27 18:07:52 -0800 |
---|---|---|
committer | Chromeos LUCI <chromeos-scoped@luci-project-accounts.iam.gserviceaccount.com> | 2022-04-13 04:29:00 +0000 |
commit | b3b650acddc0774c6494ace197c19b249008331b (patch) | |
tree | fad3b1af394675a053b9845972e43cebb99530c2 | |
parent | 05e483d637cae2a903c09d22340e32baa3457493 (diff) | |
download | chrome-ec-b3b650acddc0774c6494ace197c19b249008331b.tar.gz |
zephyr: ap_pwrseq: Handle S0ix power state entry/exit
S0ix enablement has multiple patches, this is fourth patch
of the series.
Add power state S0ix, transition states S0S0ix and S0ixS0 to power state
handling thread.
BUG=b:203446068 b:203446865 b:225996183
BRANCH=none
TEST=Verified on Nivviks, system enter S0ix from S0.
echo 1 > /var/lib/power_manager/suspend_to_idle
restart powerd
powerd_dbus_suspend
also tested:
suspend_stress_test -c 5
system passed 5 times S0ix test.
Signed-off-by: Li Feng <li1.feng@intel.com>
Change-Id: Idb07ad2b73d86aea98b5628cb90c8d8f4dd07c67
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/3559570
Reviewed-by: Vijay P Hiremath <vijay.p.hiremath@intel.com>
Reviewed-by: Andrew McRae <amcrae@google.com>
4 files changed, 101 insertions, 10 deletions
diff --git a/zephyr/shim/include/power_host_sleep.h b/zephyr/shim/include/power_host_sleep.h index 44034e4274..e4f24b7d88 100644 --- a/zephyr/shim/include/power_host_sleep.h +++ b/zephyr/shim/include/power_host_sleep.h @@ -14,9 +14,11 @@ * by implementation in Zephyr code. */ #if CONFIG_AP_PWRSEQ + #include "ec_commands.h" #include "host_command.h" #include "lpc.h" +#include "system.h" /********************************************************************/ /* power.h */ diff --git a/zephyr/subsys/ap_pwrseq/include/x86_non_dsx_common_pwrseq_sm_handler.h b/zephyr/subsys/ap_pwrseq/include/x86_non_dsx_common_pwrseq_sm_handler.h index b2ce4f74e1..cd7aa87b1d 100644 --- a/zephyr/subsys/ap_pwrseq/include/x86_non_dsx_common_pwrseq_sm_handler.h +++ b/zephyr/subsys/ap_pwrseq/include/x86_non_dsx_common_pwrseq_sm_handler.h @@ -9,9 +9,10 @@ #include <init.h> #include <kernel.h> #include <zephyr/types.h> + #include <ap_power/ap_power.h> #include <ap_power/ap_power_events.h> - +#include <ap_power_host_sleep.h> #include <x86_common_pwrseq.h> #define DT_DRV_COMPAT intel_ap_pwrseq @@ -24,7 +25,7 @@ void init_chipset_pwr_seq_state(void); void request_exit_hardoff(bool should_exit); enum power_states_ndsx pwr_sm_get_state(void); void apshutdown(void); - +void ap_pwrseq_handle_chipset_reset(void); extern const char pwrsm_dbg[][25]; #endif /* __X86_NON_DSX_COMMON_PWRSEQ_SM_HANDLER_H__ */ diff --git a/zephyr/subsys/ap_pwrseq/power_host_sleep.c b/zephyr/subsys/ap_pwrseq/power_host_sleep.c index 1803971620..13e6aba765 100644 --- a/zephyr/subsys/ap_pwrseq/power_host_sleep.c +++ b/zephyr/subsys/ap_pwrseq/power_host_sleep.c @@ -4,7 +4,6 @@ */ #include <ap_power/ap_power_interface.h> -#include <ap_power_host_sleep.h> #include <x86_non_dsx_common_pwrseq_sm_handler.h> LOG_MODULE_DECLARE(ap_pwrseq, CONFIG_AP_PWRSEQ_LOG_LEVEL); diff --git a/zephyr/subsys/ap_pwrseq/x86_non_dsx_common_pwrseq_sm_handler.c b/zephyr/subsys/ap_pwrseq/x86_non_dsx_common_pwrseq_sm_handler.c index 648b46807b..34c3d5df18 100644 --- a/zephyr/subsys/ap_pwrseq/x86_non_dsx_common_pwrseq_sm_handler.c +++ b/zephyr/subsys/ap_pwrseq/x86_non_dsx_common_pwrseq_sm_handler.c @@ -4,6 +4,7 @@ */ #include <init.h> + #include <x86_non_dsx_common_pwrseq_sm_handler.h> static K_KERNEL_STACK_DEFINE(pwrseq_thread_stack, @@ -261,11 +262,13 @@ static int common_pwr_sm_run(int state) /* Notify power event that rails are up */ ap_power_ev_send_callbacks(AP_POWER_STARTUP); - /* TODO: S0ix +#if CONFIG_AP_PWRSEQ_S0IX + /* * Clearing the S0ix flag on the path to S0 * to handle any reset conditions. */ - + ap_power_reset_host_sleep_state(); +#endif return SYS_POWER_STATE_S3; case SYS_POWER_STATE_S3: @@ -289,7 +292,7 @@ static int common_pwr_sm_run(int state) /* All the power rails must be stable */ if (power_signal_get(PWR_ALL_SYS_PWRGD)) { -#if defined(CONFIG_PLATFORM_EC_CHIPSET_RESUME_INIT_HOOK) +#if CONFIG_PLATFORM_EC_CHIPSET_RESUME_INIT_HOOK /* Notify power event before resume */ ap_power_ev_send_callbacks(AP_POWER_RESUME_INIT); #endif @@ -299,13 +302,82 @@ static int common_pwr_sm_run(int state) } break; +#if CONFIG_AP_PWRSEQ_S0IX + case SYS_POWER_STATE_S0ix: + /* System in S0 only if SLP_S0 and SLP_S3 are de-asserted */ + if (power_signals_off(IN_PCH_SLP_S0) && + signals_valid_and_off(IN_PCH_SLP_S3)) { + /* TODO: Make sure ap reset handling is done + * before leaving S0ix. + */ + return SYS_POWER_STATE_S0ixS0; + } else if (!power_signals_on(IN_PGOOD_ALL_CORE)) + return SYS_POWER_STATE_S0; + + break; + + case SYS_POWER_STATE_S0S0ix: + /* + * Check sleep state and notify listeners of S0ix suspend if + * HC already set sleep suspend state. + */ + ap_power_sleep_notify_transition(AP_POWER_SLEEP_SUSPEND); + + /* + * Enable idle task deep sleep. Allow the low power idle task + * to go into deep sleep in S0ix. + */ + enable_sleep(SLEEP_MASK_AP_RUN); + +#if CONFIG_PLATFORM_EC_CHIPSET_RESUME_INIT_HOOK + ap_power_ev_send_callbacks(AP_POWER_SUSPEND_COMPLETE); +#endif + + return SYS_POWER_STATE_S0ix; + + case SYS_POWER_STATE_S0ixS0: + if (power_get_host_sleep_state() != + HOST_SLEEP_EVENT_S0IX_RESUME) + break; + + /* + * Disable idle task deep sleep. This means that the low + * power idle task will not go into deep sleep while in S0. + */ + disable_sleep(SLEEP_MASK_AP_RUN); + +#if CONFIG_PLATFORM_EC_CHIPSET_RESUME_INIT_HOOK + ap_power_ev_send_callbacks(AP_POWER_RESUME_INIT); +#endif + + return SYS_POWER_STATE_S0; + +#endif /* CONFIG_AP_PWRSEQ_S0IX */ + case SYS_POWER_STATE_S0: if (!power_signals_on(IN_PGOOD_ALL_CORE)) { ap_power_force_shutdown(AP_POWER_SHUTDOWN_POWERFAIL); return SYS_POWER_STATE_G3; - } else if (signals_valid_and_on(IN_PCH_SLP_S3)) + } else if (signals_valid_and_on(IN_PCH_SLP_S3)) { return SYS_POWER_STATE_S0S3; - /* TODO: S0ix */ + +#if CONFIG_AP_PWRSEQ_S0IX + /* + * SLP_S0 may assert in system idle scenario without a kernel + * freeze call. This may cause interrupt storm since there is + * no freeze/unfreeze of threads/process in the idle scenario. + * Ignore the SLP_S0 assertions in idle scenario by checking + * the host sleep state. + */ + } else if (power_get_host_sleep_state() + == HOST_SLEEP_EVENT_S0IX_SUSPEND && + power_signals_on(IN_PCH_SLP_S0)) { + + return SYS_POWER_STATE_S0S0ix; + } else { + ap_power_sleep_notify_transition(AP_POWER_SLEEP_RESUME); +#endif /* CONFIG_AP_PWRSEQ_S0IX */ + } break; @@ -325,6 +397,9 @@ static int common_pwr_sm_run(int state) * correctly handle global resets which have a bit of delay * while the SLP_Sx_L signals are asserted then deasserted. */ + /* TODO */ + /* power_s5_up = 0; */ + return SYS_POWER_STATE_S5; case SYS_POWER_STATE_S3S4: @@ -333,10 +408,22 @@ static int common_pwr_sm_run(int state) case SYS_POWER_STATE_S0S3: /* Notify power event before we remove power rails */ ap_power_ev_send_callbacks(AP_POWER_SUSPEND); -#if defined(CONFIG_PLATFORM_EC_CHIPSET_RESUME_INIT_HOOK) +#if CONFIG_PLATFORM_EC_CHIPSET_RESUME_INIT_HOOK /* Notify power event after suspend */ ap_power_ev_send_callbacks(AP_POWER_SUSPEND_COMPLETE); #endif + + /* + * Enable idle task deep sleep. Allow the low power idle task + * to go into deep sleep in S3 or lower. + */ + enable_sleep(SLEEP_MASK_AP_RUN); + +#if CONFIG_AP_PWRSEQ_S0IX + /* Re-initialize S0ix flag */ + ap_power_reset_host_sleep_state(); +#endif + return SYS_POWER_STATE_S3; default: @@ -386,8 +473,10 @@ static void pwrseq_loop_thread(void *p1, void *p2, void *p3) if (curr_state == new_state) new_state = common_pwr_sm_run(curr_state); - if (curr_state != new_state) + if (curr_state != new_state) { pwr_sm_set_state(new_state); + ap_power_set_active_wake_mask(); + } k_msleep(t_wait_ms); } |