summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoby Mathew <soby.mathew@arm.com>2015-04-07 12:16:56 +0100
committerSoby Mathew <soby.mathew@arm.com>2015-04-16 16:39:29 +0100
commit5e4ec4bdb45a7172d02114c308cb1f32917c61ae (patch)
tree00db6312ac774f778defb675583d7a76f016b78b
parenta255baac81c56d7ab8977add71038ff90058a76e (diff)
downloadarm-trusted-firmware-5e4ec4bdb45a7172d02114c308cb1f32917c61ae.tar.gz
PSCI: Add framework to handle composite power states
The state-id field in the power-state parameter of a CPU_SUSPEND call can be used to describe composite power states specific to a platform. The current PSCI implementation does not interpret the state-id field. It relies on the target power level and the state type fields in the power-state parameter to perform state coordination and power management operations. If composite power states are used, then the PSCI implementation cannot make global decisions about these operations since it does not understand the platform specific states. The extended state-id format support for the power state parameter is introduced with this patch. It also adds support to involve the platform in state coordination which facilitates the use of composite power states and improves the support for entering standby states at multiple power domains. The PSCI implementation now defines a generic representation of the power-state parameter. It depends on the platform port to convert the power-state parameter (possibly encoding a composite power state) passed in a CPU_SUSPEND call to this representation. It is an array where each index corresponds to a power level. Each entry contains the local power state the power domain at that power level could enter. The platform also defines the macros PLAT_MAX_RET_STATE and PLAT_MAX_OFF_STATE which lets the PSCI implementation find out which power domains have been requested to enter a retention or power down state. The PSCI implementation does not interpret the local power states defined by the platform. The only constraint is that the PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE. For a power domain tree, an array of local power states is maintained. These are the states requested for each power domain by all the CPUs which the domain is an ancestor of. The CPU power level is not considered. During a request to place multiple power domains in a low power state, the platform is passed an array of requested power-states for each power domain through the plat_get_target_pwr_state() API. It coordinates amongst these states to determine a target local power state for the power domain. Finally, the platform power management hooks are passed the target local power states for each power domain using the generic representation described above. The platform executes operations specific to these target states. The platform hook for placing a power domain in a standby state (plat_pm_ops_t.pwr_domain_standby()) should now be used to only place the CPU power domain in a standby or retention state. The extended state-id power state format can be enabled by setting the build flag PSCI_EXTENDED_STATE_ID=1 and it is disabled by default. NOTE: THE PLATFORM PORTS WILL HAVE TO BE REWORKED TO USE THE NEW FRAMEWORK. Change-Id: I9d4123d97e179529802c1f589baaa4101759d80c
-rw-r--r--Makefile7
-rw-r--r--include/bl31/services/psci.h105
-rw-r--r--include/plat/common/platform.h9
-rw-r--r--plat/common/aarch64/plat_common.c36
-rw-r--r--plat/fvp/fvp_def.h7
-rw-r--r--plat/fvp/fvp_pm.c87
-rw-r--r--plat/fvp/include/platform_def.h13
-rw-r--r--plat/juno/include/platform_def.h13
-rw-r--r--plat/juno/juno_def.h7
-rw-r--r--plat/juno/plat_pm.c86
-rw-r--r--services/std_svc/psci/psci_common.c568
-rw-r--r--services/std_svc/psci/psci_main.c122
-rw-r--r--services/std_svc/psci/psci_off.c35
-rw-r--r--services/std_svc/psci/psci_on.c59
-rw-r--r--services/std_svc/psci/psci_private.h81
-rw-r--r--services/std_svc/psci/psci_setup.c45
-rw-r--r--services/std_svc/psci/psci_suspend.c192
17 files changed, 993 insertions, 479 deletions
diff --git a/Makefile b/Makefile
index aefcd2c6b..b443b34a3 100644
--- a/Makefile
+++ b/Makefile
@@ -68,6 +68,9 @@ ARM_CCI_PRODUCT_ID := 400
ASM_ASSERTION := ${DEBUG}
# Build option to choose whether Trusted firmware uses Coherent memory or not.
USE_COHERENT_MEM := 1
+# Flag used to choose the power state format viz Extended State-ID or the Original
+# format.
+PSCI_EXTENDED_STATE_ID := 0
# Default FIP file name
FIP_NAME := fip.bin
# By default, use the -pedantic option in the gcc command line
@@ -256,6 +259,10 @@ $(eval $(call add_define,LOG_LEVEL))
$(eval $(call assert_boolean,USE_COHERENT_MEM))
$(eval $(call add_define,USE_COHERENT_MEM))
+# Process PSCI_EXTENDED_STATE_ID flag
+$(eval $(call assert_boolean,PSCI_EXTENDED_STATE_ID))
+$(eval $(call add_define,PSCI_EXTENDED_STATE_ID))
+
# Process Generate CoT flags
$(eval $(call assert_boolean,GENERATE_COT))
$(eval $(call assert_boolean,CREATE_KEYS))
diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h
index 4face921f..65fdaac7f 100644
--- a/include/bl31/services/psci.h
+++ b/include/bl31/services/psci.h
@@ -95,23 +95,31 @@
* PSCI CPU_SUSPEND 'power_state' parameter specific defines
******************************************************************************/
#define PSTATE_ID_SHIFT 0
+
+#if PSCI_EXTENDED_STATE_ID
+#define PSTATE_VALID_MASK 0xB0000000
+#define PSTATE_TYPE_SHIFT 30
+#define PSTATE_ID_MASK 0xfffffff
+#else
+#define PSTATE_VALID_MASK 0xFCFE0000
#define PSTATE_TYPE_SHIFT 16
#define PSTATE_PWR_LVL_SHIFT 24
-
#define PSTATE_ID_MASK 0xffff
-#define PSTATE_TYPE_MASK 0x1
#define PSTATE_PWR_LVL_MASK 0x3
-#define PSTATE_VALID_MASK 0xFCFE0000
+
+#define psci_get_pstate_pwrlvl(pstate) ((pstate >> PSTATE_PWR_LVL_SHIFT) & \
+ PSTATE_PWR_LVL_MASK)
+#endif /* __PSCI_EXTENDED_STATE_ID__ */
#define PSTATE_TYPE_STANDBY 0x0
#define PSTATE_TYPE_POWERDOWN 0x1
+#define PSTATE_TYPE_MASK 0x1
#define psci_get_pstate_id(pstate) ((pstate >> PSTATE_ID_SHIFT) & \
PSTATE_ID_MASK)
#define psci_get_pstate_type(pstate) ((pstate >> PSTATE_TYPE_SHIFT) & \
PSTATE_TYPE_MASK)
-#define psci_get_pstate_pwrlvl(pstate) ((pstate >> PSTATE_PWR_LVL_SHIFT) & \
- PSTATE_PWR_LVL_MASK)
+#define psci_check_power_state(pstate) (pstate & PSTATE_VALID_MASK)
/*******************************************************************************
* PSCI CPU_FEATURES feature flag specific defines
@@ -120,6 +128,11 @@
#define FF_PSTATE_SHIFT 1
#define FF_PSTATE_ORIG 0
#define FF_PSTATE_EXTENDED 1
+#if PSCI_EXTENDED_STATE_ID
+#define FF_PSTATE FF_PSTATE_EXTENDED
+#else
+#define FF_PSTATE FF_PSTATE_ORIG
+#endif
/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
#define FF_MODE_SUPPORT_SHIFT 0
@@ -144,25 +157,46 @@
#define PSCI_E_NOT_PRESENT -7
#define PSCI_E_DISABLED -8
-/*******************************************************************************
- * PSCI power domain state related constants.
- ******************************************************************************/
-#define PSCI_STATE_ON 0x0
-#define PSCI_STATE_OFF 0x1
-#define PSCI_STATE_ON_PENDING 0x2
-#define PSCI_STATE_SUSPEND 0x3
-
-#define PSCI_INVALID_DATA -1
-
-#define get_phys_state(x) (x != PSCI_STATE_ON ? \
- PSCI_STATE_OFF : PSCI_STATE_ON)
+#ifndef __ASSEMBLY__
-#define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK)
+#include <stdint.h>
+/*
+ * These are the states reported by the PSCI_AFFINITY_INFO API for the specified
+ * CPU. The definitions of these states can be found in Section 5.7.1 in the
+ * PSCI specification (ARM DEN 0022C).
+ */
+typedef enum {
+ AFF_STATE_ON = 0,
+ AFF_STATE_OFF = 1,
+ AFF_STATE_ON_PENDING = 2
+} aff_info_state_t;
-#ifndef __ASSEMBLY__
+/*
+ * Macro to represent invalid affinity level within PSCI.
+ */
+#define PSCI_INVALID_DATA -1
-#include <stdint.h>
+/*
+ * Type for representing the local power state at a particular level.
+ */
+typedef uint8_t plat_local_state_t;
+
+/*****************************************************************************
+ * This data structure defines the representation of the power state parameter
+ * for its exchange between the generic PSCI code and the platform port. For
+ * example, it is used by the platform port to specify the requested power
+ * states during a power management operation. It is used by the generic code to
+ * inform the platform about the target power states that each level should
+ * enter.
+ ****************************************************************************/
+typedef struct psci_power_state {
+ /*
+ * The pwr_domain_state[] stores the local power state at each level
+ * for the CPU.
+ */
+ plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
+} psci_power_state_t;
/*******************************************************************************
* Structure used to store per-cpu information relevant to the PSCI service.
@@ -170,8 +204,15 @@
* this information will not reside on a cache line shared with another cpu.
******************************************************************************/
typedef struct psci_cpu_data {
- uint32_t power_state; /* The power state from CPU_SUSPEND */
- unsigned char psci_state; /* The state of this CPU as seen by PSCI */
+ /*
+ * Highest power level which takes part in a power management
+ * operation.
+ */
+ int8_t target_pwrlvl;
+ aff_info_state_t aff_info_state;
+
+ /* The local power state of this CPU */
+ plat_local_state_t local_state;
#if !USE_COHERENT_MEM
bakery_info_t pcpu_bakery_info[PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT];
#endif
@@ -182,18 +223,18 @@ typedef struct psci_cpu_data {
* perform common low level pm functions
******************************************************************************/
typedef struct plat_pm_ops {
- void (*pwr_domain_standby)(unsigned int power_state);
+ void (*cpu_standby)(plat_local_state_t cpu_state);
int (*pwr_domain_on)(unsigned long mpidr,
- unsigned long sec_entrypoint,
- unsigned int pwrlvl);
- void (*pwr_domain_off)(unsigned int pwrlvl);
+ unsigned long sec_entrypoint);
+ void (*pwr_domain_off)(psci_power_state_t *target_state);
void (*pwr_domain_suspend)(unsigned long sec_entrypoint,
- unsigned int pwrlvl);
- void (*pwr_domain_on_finish)(unsigned int pwrlvl);
- void (*pwr_domain_suspend_finish)(unsigned int pwrlvl);
+ psci_power_state_t *target_state);
+ void (*pwr_domain_on_finish)(psci_power_state_t *target_state);
+ void (*pwr_domain_suspend_finish)(psci_power_state_t *target_state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
- int (*validate_power_state)(unsigned int power_state);
+ int (*validate_power_state)(unsigned int power_state,
+ psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
} plat_pm_ops_t;
@@ -230,10 +271,6 @@ void __dead2 psci_power_down_wfi(void);
void psci_cpu_on_finish_entry(void);
void psci_cpu_suspend_finish_entry(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *);
-int psci_get_suspend_stateid_by_mpidr(unsigned long);
-int psci_get_suspend_stateid(void);
-int psci_get_suspend_pwrlvl(void);
-
uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index 4d7cd68c2..b78be9709 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -32,7 +32,7 @@
#define __PLATFORM_H__
#include <stdint.h>
-
+#include <psci.h>
/*******************************************************************************
* Forward declarations
@@ -181,6 +181,13 @@ int platform_setup_pm(const struct plat_pm_ops **);
unsigned char *platform_get_power_domain_tree_desc(void);
/*******************************************************************************
+ * Optional PSCI functions (BL3-1).
+ ******************************************************************************/
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu);
+
+/*******************************************************************************
* Optional BL3-1 functions (may be overridden)
******************************************************************************/
void bl31_plat_enable_mmu(uint32_t flags);
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 90574fd66..269509531 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,15 +28,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <assert.h>
+#include <psci.h>
#include <xlat_tables.h>
/*
- * The following 2 platform setup functions are weakly defined. They
+ * The following 3 platform setup functions are weakly defined. They
* provide typical implementations that may be re-used by multiple
* platforms but may also be overridden by a platform if required.
*/
#pragma weak bl31_plat_enable_mmu
#pragma weak bl32_plat_enable_mmu
+#pragma weak plat_get_target_pwr_state
void bl31_plat_enable_mmu(uint32_t flags)
{
@@ -47,3 +50,32 @@ void bl32_plat_enable_mmu(uint32_t flags)
{
enable_mmu_el1(flags);
}
+
+/*
+ * The PSCI generic code uses this API to let the platform participate in state
+ * coordination during a power management operation. It compares the platform
+ * specific local power states requested by each cpu for a given power domain
+ * and returns the coordinated target power state that the domain should
+ * enter. A platform assigns a number to a local power state. This default
+ * implementation assumes that the platform assigns these numbers in order of
+ * increasing depth of the power state i.e. for two power states X & Y, if X < Y
+ * then X represents a shallower power state than Y. As a result, the
+ * coordinated target local power state for a power domain will be the minimum
+ * of the requested local power states.
+ */
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+ assert(ncpu);
+
+ do {
+ temp = *states++;
+ if (temp < target)
+ target = temp;
+ } while (--ncpu);
+
+ return target;
+}
diff --git a/plat/fvp/fvp_def.h b/plat/fvp/fvp_def.h
index b47c7b0d0..ae6e5a724 100644
--- a/plat/fvp/fvp_def.h
+++ b/plat/fvp/fvp_def.h
@@ -165,6 +165,13 @@
#define FVP_PWR_LVL1 MPIDR_AFFLVL1
/*
+ * State IDs for local power states on the FVP.
+ */
+#define FVP_PM_RUN 0 /* Valid for CPUs and Clusters */
+#define FVP_PM_RET 1 /* Valid for only CPUs */
+#define FVP_PM_OFF 2 /* Valid for CPUs and Clusters */
+
+/*
* The number of regions like RO(code), coherent and data required by
* different BL stages which need to be mapped in the MMU.
*/
diff --git a/plat/fvp/fvp_pm.c b/plat/fvp/fvp_pm.c
index f9e8a635f..6545998f4 100644
--- a/plat/fvp/fvp_pm.c
+++ b/plat/fvp/fvp_pm.c
@@ -91,8 +91,11 @@ static void fvp_cluster_pwrdwn_common(void)
/*******************************************************************************
* FVP handler called when a CPU is about to enter standby.
******************************************************************************/
-void fvp_pwr_domain_standby(unsigned int power_state)
+void fvp_cpu_standby(plat_local_state_t cpu_state)
{
+
+ assert(cpu_state == FVP_PM_RET);
+
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
@@ -103,22 +106,15 @@ void fvp_pwr_domain_standby(unsigned int power_state)
/*******************************************************************************
* FVP handler called when a power domain is about to be turned on. The
- * level and mpidr determine the power domain.
+ * mpidr determines the CPU to be turned on.
******************************************************************************/
int fvp_pwr_domain_on(unsigned long mpidr,
- unsigned long sec_entrypoint,
- unsigned int pwrlvl)
+ unsigned long sec_entrypoint)
{
int rc = PSCI_E_SUCCESS;
unsigned int psysr;
/*
- * It's possible to turn on only power level 0 i.e. a cpu
- * on the FVP.
- */
- assert(pwrlvl == FVP_PWR_LVL0);
-
- /*
* Ensure that we do not cancel an inflight power off request
* for the target cpu. That would leave it in a zombie wfi.
* Wait for it to power off, program the jump address for the
@@ -136,11 +132,12 @@ int fvp_pwr_domain_on(unsigned long mpidr,
}
/*******************************************************************************
- * FVP handler called when a power domain is about to be turned off.
+ * FVP handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
-void fvp_pwr_domain_off(unsigned int pwrlvl)
+void fvp_pwr_domain_off(psci_power_state_t *target_state)
{
- assert(pwrlvl <= FVP_PWR_LVL1);
+ assert(target_state->pwr_domain_state[FVP_PWR_LVL0] == FVP_PM_OFF);
/*
* If execution reaches this stage then this power domain will be
@@ -149,20 +146,28 @@ void fvp_pwr_domain_off(unsigned int pwrlvl)
*/
fvp_cpu_pwrdwn_common();
- if (pwrlvl != FVP_PWR_LVL0)
+ if (target_state->pwr_domain_state[FVP_PWR_LVL1] == FVP_PM_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
- * FVP handler called when a power domain is about to be suspended.
+ * FVP handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
void fvp_pwr_domain_suspend(unsigned long sec_entrypoint,
- unsigned int pwrlvl)
+ psci_power_state_t *target_state)
{
unsigned long mpidr;
- assert(pwrlvl <= FVP_PWR_LVL1);
+ /*
+ * FVP has retention only at cpu level. Just return
+ * as nothing is to be done for retention.
+ */
+ if (target_state->pwr_domain_state[FVP_PWR_LVL0] == FVP_PM_RET)
+ return;
+
+ assert(target_state->pwr_domain_state[FVP_PWR_LVL0] == FVP_PM_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
@@ -177,25 +182,26 @@ void fvp_pwr_domain_suspend(unsigned long sec_entrypoint,
fvp_cpu_pwrdwn_common();
/* Perform the common cluster specific operations */
- if (pwrlvl != FVP_PWR_LVL0)
+ if (target_state->pwr_domain_state[FVP_PWR_LVL1] == FVP_PM_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
* FVP handler called when a power domain has just been powered on after
- * being turned off earlier.
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
******************************************************************************/
-void fvp_pwr_domain_on_finish(unsigned int pwrlvl)
+void fvp_pwr_domain_on_finish(psci_power_state_t *target_state)
{
unsigned long mpidr;
- assert(pwrlvl <= FVP_PWR_LVL1);
+ assert(target_state->pwr_domain_state[FVP_PWR_LVL0] == FVP_PM_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Perform the common cluster specific operations */
- if (pwrlvl != FVP_PWR_LVL0) {
+ if (target_state->pwr_domain_state[FVP_PWR_LVL1] == FVP_PM_OFF) {
/*
* This CPU might have woken up whilst the cluster was
* attempting to power down. In this case the FVP power
@@ -228,14 +234,21 @@ void fvp_pwr_domain_on_finish(unsigned int pwrlvl)
}
/*******************************************************************************
- * FVP handler called when a powr domain has just been powered on after
- * having been suspended earlier.
+ * FVP handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
-void fvp_pwr_domain_suspend_finish(unsigned int pwrlvl)
+void fvp_pwr_domain_suspend_finish(psci_power_state_t *target_state)
{
- fvp_pwr_domain_on_finish(pwrlvl);
+ /*
+ * Nothing to be done on waking up from retention from CPU level.
+ */
+ if (target_state->pwr_domain_state[FVP_PWR_LVL0] == FVP_PM_RET)
+ return;
+
+ fvp_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
@@ -264,16 +277,30 @@ static void __dead2 fvp_system_reset(void)
/*******************************************************************************
* FVP handler called to check the validity of the power state parameter.
******************************************************************************/
-int fvp_validate_power_state(unsigned int power_state)
+int fvp_validate_power_state(unsigned int power_state, psci_power_state_t *req_state)
{
+ int pstate = psci_get_pstate_type(power_state);
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int i;
+
+ assert(req_state);
+
+ if (pwr_lvl > PLAT_MAX_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
/* Sanity check the requested state */
- if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
+ if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on power level 0
* i.e. a cpu on the fvp. Ignore any other power level.
*/
- if (psci_get_pstate_pwrlvl(power_state) != FVP_PWR_LVL0)
+ if (pwr_lvl != FVP_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
+
+ req_state->pwr_domain_state[FVP_PWR_LVL0] = FVP_PM_RET;
+ } else {
+ for (i = FVP_PWR_LVL0; i <= pwr_lvl; i++)
+ req_state->pwr_domain_state[i] = FVP_PM_OFF;
}
/*
@@ -289,7 +316,7 @@ int fvp_validate_power_state(unsigned int power_state)
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
static const plat_pm_ops_t fvp_plat_pm_ops = {
- .pwr_domain_standby = fvp_pwr_domain_standby,
+ .cpu_standby = fvp_cpu_standby,
.pwr_domain_on = fvp_pwr_domain_on,
.pwr_domain_off = fvp_pwr_domain_off,
.pwr_domain_suspend = fvp_pwr_domain_suspend,
diff --git a/plat/fvp/include/platform_def.h b/plat/fvp/include/platform_def.h
index 6da5ac9f7..48cdaa014 100644
--- a/plat/fvp/include/platform_def.h
+++ b/plat/fvp/include/platform_def.h
@@ -102,6 +102,19 @@
#define PLAT_NUM_PWR_DOMAINS (FVP_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLAT_MAX_PWR_LVL FVP_PWR_LVL1
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE FVP_PM_RET
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE FVP_PM_OFF
+
#define MAX_IO_DEVICES 3
#define MAX_IO_HANDLES 4
diff --git a/plat/juno/include/platform_def.h b/plat/juno/include/platform_def.h
index fb4203c95..929372e64 100644
--- a/plat/juno/include/platform_def.h
+++ b/plat/juno/include/platform_def.h
@@ -94,6 +94,19 @@
#define PLATFORM_CORE_COUNT (JUNO_CLUSTER0_CORE_COUNT + \
JUNO_CLUSTER1_CORE_COUNT)
#define PLAT_MAX_PWR_LVL JUNO_PWR_LVL1
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE JUNO_PM_RET
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE JUNO_PM_OFF
+
#define MAX_IO_DEVICES 3
#define MAX_IO_HANDLES 4
diff --git a/plat/juno/juno_def.h b/plat/juno/juno_def.h
index 11405c08a..09ca7bd79 100644
--- a/plat/juno/juno_def.h
+++ b/plat/juno/juno_def.h
@@ -166,6 +166,13 @@
#define JUNO_PWR_LVL1 MPIDR_AFFLVL1
/*
+ * Power management State IDs for Juno.
+ */
+#define JUNO_PM_RUN 0
+#define JUNO_PM_RET 1
+#define JUNO_PM_OFF 2
+
+/*
* The number of regions like RO(code), coherent and data required by
* different BL stages which need to be mapped in the MMU.
*/
diff --git a/plat/juno/plat_pm.c b/plat/juno/plat_pm.c
index 358114466..c19d99c76 100644
--- a/plat/juno/plat_pm.c
+++ b/plat/juno/plat_pm.c
@@ -59,16 +59,31 @@ static void juno_program_mailbox(uint64_t mpidr, uint64_t address)
/*******************************************************************************
* Juno handler called to check the validity of the power state parameter.
******************************************************************************/
-int32_t juno_validate_power_state(unsigned int power_state)
+int32_t juno_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
{
+ int pstate = psci_get_pstate_type(power_state);
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int i;
+
+ assert(req_state);
+
+ if (pwr_lvl > PLAT_MAX_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
/* Sanity check the requested state */
- if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
+ if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only at power level 0 i.e.
* a cpu on the Juno. Ignore any other power level.
*/
- if (psci_get_pstate_pwrlvl(power_state) != JUNO_PWR_LVL0)
+ if (pwr_lvl != JUNO_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
+
+ req_state->pwr_domain_state[JUNO_PWR_LVL0] = JUNO_PM_RET;
+ } else {
+ for (i = JUNO_PWR_LVL0; i <= pwr_lvl; i++)
+ req_state->pwr_domain_state[i] = JUNO_PM_OFF;
}
/*
@@ -83,17 +98,15 @@ int32_t juno_validate_power_state(unsigned int power_state)
/*******************************************************************************
* Juno handler called when a power domain is about to be turned on. The
- * level and mpidr determine the power domain.
+ * mpidr determines the CPU to be turned on.
******************************************************************************/
int32_t juno_pwr_domain_on(uint64_t mpidr,
- uint64_t sec_entrypoint,
- uint32_t pwrlvl)
+ uint64_t sec_entrypoint)
{
/*
* SCP takes care of powering up parent power domains so we
* only need to care about level 0
*/
- assert(pwrlvl == JUNO_PWR_LVL0);
/*
* Setup mailbox with address for CPU entrypoint when it next powers up
@@ -108,13 +121,14 @@ int32_t juno_pwr_domain_on(uint64_t mpidr,
/*******************************************************************************
* Juno handler called when a power level has just been powered on after
- * being turned off earlier.
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
******************************************************************************/
-void juno_pwr_domain_on_finish(uint32_t pwrlvl)
+void juno_pwr_domain_on_finish(psci_power_state_t *target_state)
{
unsigned long mpidr;
- assert(pwrlvl <= JUNO_PWR_LVL1);
+ assert(target_state->pwr_domain_state[JUNO_PWR_LVL0] == JUNO_PM_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
@@ -123,7 +137,7 @@ void juno_pwr_domain_on_finish(uint32_t pwrlvl)
* Perform the common cluster specific operations i.e enable coherency
* if this cluster was off.
*/
- if (pwrlvl != JUNO_PWR_LVL0)
+ if (target_state->pwr_domain_state[JUNO_PWR_LVL1] == JUNO_PM_OFF)
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
/* Enable the gic cpu interface */
@@ -142,7 +156,7 @@ void juno_pwr_domain_on_finish(uint32_t pwrlvl)
* called for power domain at the highest power level which will be powered
* down. It performs the actions common to the OFF and SUSPEND calls.
******************************************************************************/
-static void juno_power_down_common(uint32_t pwrlvl)
+static void juno_power_down_common(psci_power_state_t *target_state)
{
uint32_t cluster_state = scpi_power_on;
@@ -150,7 +164,7 @@ static void juno_power_down_common(uint32_t pwrlvl)
arm_gic_cpuif_deactivate();
/* Cluster is to be turned off, so disable coherency */
- if (pwrlvl > JUNO_PWR_LVL0) {
+ if (target_state->pwr_domain_state[JUNO_PWR_LVL1] == JUNO_PM_OFF) {
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
cluster_state = scpi_power_off;
}
@@ -166,40 +180,56 @@ static void juno_power_down_common(uint32_t pwrlvl)
}
/*******************************************************************************
- * Handler called when a power domain is about to be turned off.
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
-static void juno_pwr_domain_off(uint32_t pwrlvl)
+static void juno_pwr_domain_off(psci_power_state_t *target_state)
{
- assert(pwrlvl <= JUNO_PWR_LVL1);
+ assert(target_state->pwr_domain_state[JUNO_PWR_LVL0] == JUNO_PM_OFF);
- juno_power_down_common(pwrlvl);
+ juno_power_down_common(target_state);
}
/*******************************************************************************
- * Handler called when a power domain is about to be suspended.
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
******************************************************************************/
static void juno_pwr_domain_suspend(uint64_t sec_entrypoint,
- uint32_t pwrlvl)
+ psci_power_state_t *target_state)
{
- assert(pwrlvl <= JUNO_PWR_LVL1);
+ /*
+ * Juno has retention only at cpu level. Just return
+ * as nothing is to be done for retention.
+ */
+ if (target_state->pwr_domain_state[JUNO_PWR_LVL0] == JUNO_PM_RET)
+ return;
+
+ assert(target_state->pwr_domain_state[JUNO_PWR_LVL0] == JUNO_PM_OFF);
/*
* Setup mailbox with address for CPU entrypoint when it next powers up.
*/
juno_program_mailbox(read_mpidr_el1(), sec_entrypoint);
- juno_power_down_common(pwrlvl);
+ juno_power_down_common(target_state);
}
/*******************************************************************************
* Juno handler called when a power domain has just been powered on after
- * having been suspended earlier.
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
-static void juno_pwr_domain_suspend_finish(uint32_t pwrlvl)
+static void juno_pwr_domain_suspend_finish(psci_power_state_t *target_state)
{
- juno_pwr_domain_on_finish(pwrlvl);
+ /*
+ * Return as nothing is to be done on waking up from retention.
+ */
+ if (target_state->pwr_domain_state[JUNO_PWR_LVL0] == JUNO_PM_RET)
+ return;
+
+ juno_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
@@ -238,12 +268,14 @@ static void __dead2 juno_system_reset(void)
}
/*******************************************************************************
- * Handler called when a power domain is about to enter standby.
+ * Handler called when the CPU power domain is about to enter standby.
******************************************************************************/
-void juno_pwr_domain_standby(unsigned int power_state)
+void juno_cpu_standby(plat_local_state_t cpu_state)
{
unsigned int scr;
+ assert(cpu_state == JUNO_PM_RET);
+
scr = read_scr_el3();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT);
@@ -265,7 +297,7 @@ static const plat_pm_ops_t juno_ops = {
.pwr_domain_on = juno_pwr_domain_on,
.pwr_domain_on_finish = juno_pwr_domain_on_finish,
.pwr_domain_off = juno_pwr_domain_off,
- .pwr_domain_standby = juno_pwr_domain_standby,
+ .cpu_standby = juno_cpu_standby,
.pwr_domain_suspend = juno_pwr_domain_suspend,
.pwr_domain_suspend_finish = juno_pwr_domain_suspend_finish,
.system_off = juno_system_off,
diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c
index 704578d05..87cd9376e 100644
--- a/services/std_svc/psci/psci_common.c
+++ b/services/std_svc/psci/psci_common.c
@@ -45,6 +45,26 @@
*/
const spd_pm_ops_t *psci_spd_pm;
+/*
+ * PSCI requested local power state map. This array is used to store the local
+ * power states requested by a CPU for power levels from level 1 to
+ * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
+ * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
+ * CPU are the same.
+ *
+ * During state coordination, the platform is passed an array containing the
+ * local states requested for a particular non cpu power domain by each cpu
+ * of which this domain is an ancestor.
+ *
+ * TODO: Dense packing of the requested states will cause cache thrashing
+ * when multiple power domains write to it. If we allocate the requested
+ * states at each power level in a cache-line aligned per-domain memory,
+ * the cache thrashing can be avoided.
+ */
+static plat_local_state_t
+ psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
+
+
/*******************************************************************************
* Arrays that hold the platform's power domain tree information for state
* management of power domains.
@@ -65,37 +85,60 @@ cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
******************************************************************************/
const plat_pm_ops_t *psci_plat_pm_ops;
-/*******************************************************************************
+/******************************************************************************
* Check that the maximum power level supported by the platform makes sense
- * ****************************************************************************/
+ *****************************************************************************/
CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
assert_platform_max_pwrlvl_check);
-/*******************************************************************************
- * This function is passed a cpu_index and the highest level in the topology
- * tree. It iterates through the nodes to find the highest power level at which
- * a domain is physically powered off.
- ******************************************************************************/
-uint32_t psci_find_max_phys_off_pwrlvl(uint32_t end_pwrlvl,
- unsigned int cpu_idx)
+/*
+ * The plat_local_state used by the platform is one of these types: RUN,
+ * RETENTION and OFF. The platform can define further sub-states for each type
+ * apart from RUN. This categorization is done to verify the sanity of the
+ * psci_power_state passed by the platform and to print debug information. The
+ * categorization is done on the basis of the following conditions:
+ *
+ * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
+ *
+ * 2. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
+ * STATE_TYPE_RETN.
+ *
+ * 3. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
+ * STATE_TYPE_OFF.
+ */
+typedef enum{
+ STATE_TYPE_RUN = 0,
+ STATE_TYPE_RETN,
+ STATE_TYPE_OFF
+} plat_local_state_type_t;
+
+/* The macro used to categorize plat_local_state. */
+#define find_local_state_type(plat_local_state) \
+ ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \
+ ? STATE_TYPE_OFF : STATE_TYPE_RETN) \
+ : STATE_TYPE_RUN)
+
+/******************************************************************************
+ * Check that the maximum retention level supported by the platform is less
+ * than the maximum off level.
+ *****************************************************************************/
+CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
+ assert_platform_max_off_and_retn_state_check);
+
+/******************************************************************************
+ * This function ensures that the power state parameter in a CPU_SUSPEND request
+ * is valid. If so, it returns the requested states for each power level.
+ *****************************************************************************/
+int psci_validate_power_state(unsigned int power_state,
+ psci_power_state_t *state_info)
{
- int max_pwrlvl, level;
- unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
-
- if (psci_get_phys_state(cpu_idx, PSCI_CPU_PWR_LVL) != PSCI_STATE_OFF)
- return PSCI_INVALID_DATA;
-
- max_pwrlvl = PSCI_CPU_PWR_LVL;
-
- for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
- if (psci_get_phys_state(parent_idx, level) == PSCI_STATE_OFF)
- max_pwrlvl = level;
-
- parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
- }
+ /* Check SBZ bits in power state are zero */
+ if (psci_check_power_state(power_state))
+ return PSCI_E_INVALID_PARAMS;
- return max_pwrlvl;
+ /* Validate the power_state using platform pm_ops */
+ return psci_plat_pm_ops->validate_power_state(power_state, state_info);
}
/*******************************************************************************
@@ -107,17 +150,6 @@ int get_power_on_target_pwrlvl(void)
{
int pwrlvl;
-#if DEBUG
- unsigned int state;
-
- /*
- * Sanity check the state of the cpu. It should be either suspend or "on
- * pending"
- */
- state = psci_get_state(platform_my_core_pos(), PSCI_CPU_PWR_LVL);
- assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
-#endif
-
/*
* Assume that this cpu was suspended and retrieve its target power
* level. If it is invalid then it could only have been turned off
@@ -130,6 +162,125 @@ int get_power_on_target_pwrlvl(void)
return pwrlvl;
}
+/******************************************************************************
+ * Helper function to update the requested local power state array. This array
+ * does not store the requested state for the CPU power level. Hence an
+ * assertion is added to prevent us from accessing the wrong index.
+ *****************************************************************************/
+static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
+ unsigned int cpu_idx,
+ plat_local_state_t req_pwr_state)
+{
+ assert(pwrlvl > PSCI_CPU_PWR_LVL);
+ psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
+}
+
+/******************************************************************************
+ * This function initializes the psci_req_local_pwr_states.
+ *****************************************************************************/
+void psci_init_req_local_pwr_states()
+{
+ int lvl, idx;
+
+ /* Initialize the requested state of all power domains higher
+ higher than the CPU as OFF. */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+ for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++)
+ psci_set_req_local_pwr_state(lvl,
+ idx,
+ PLAT_MAX_OFF_STATE);
+ }
+}
+
+/******************************************************************************
+ * Helper function to return a reference to an array containing the local power
+ * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
+ * array will be the number of cpu power domains of which this power domain is
+ * an ancestor. These requested states will be used to determine a suitable
+ * target state for this power domain during psci state coordination. An
+ * assertion is added to prevent us from accessing the CPU power level.
+ *****************************************************************************/
+static plat_local_state_t *psci_get_req_local_pwr_states(int pwrlvl,
+ int cpu_idx)
+{
+ assert(pwrlvl > PSCI_CPU_PWR_LVL);
+
+ return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
+}
+
+/******************************************************************************
+ * Helper function to return the current local power state of each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
+ * function will be called after a cpu is powered on to find the local state
+ * each power domain has emerged from.
+ *****************************************************************************/
+static void psci_get_target_local_pwr_states(uint32_t end_pwrlvl,
+ psci_power_state_t *target_state)
+{
+ int lvl;
+ unsigned int parent_idx;
+ plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+ pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
+ parent_idx = psci_cpu_pd_nodes[platform_my_core_pos()].parent_node;
+
+ /* Copy the local power state from node to state_info */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+#if !USE_COHERENT_MEM
+ /*
+ * If using normal memory for psci_non_cpu_pd_nodes, we need
+ * to flush before reading the local power state as another
+ * cpu in the same power domain could have updated it.
+ */
+ flush_dcache_range(
+ (uint64_t)&psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
+ parent_idx = psci_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+ /* Zero out the higher levels */
+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+ target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+}
+
+/******************************************************************************
+ * Helper function to set the target local power state that each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
+ * enter. This function will be called after coordination of requested power
+ * states has been done for each power level.
+ *****************************************************************************/
+static void psci_set_target_local_pwr_states(uint32_t end_pwrlvl,
+ psci_power_state_t *target_state)
+{
+ int lvl;
+ unsigned int parent_idx;
+ plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+ psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
+
+ /*
+ * Need to flush as local_state will be accessed with Data Cache
+ * disabled during power on
+ */
+ flush_cpu_data(psci_svc_cpu_data.local_state);
+
+ parent_idx = psci_cpu_pd_nodes[platform_my_core_pos()].parent_node;
+
+ /* Copy the local_state from state_info */
+ for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
+ psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
+#if !USE_COHERENT_MEM
+ flush_dcache_range(
+ (uint64_t)&psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ parent_idx = psci_cpu_pd_nodes[parent_idx].parent_node;
+ }
+}
+
+
/*******************************************************************************
* PSCI helper function to get the parent nodes corresponding to a cpu_index.
******************************************************************************/
@@ -146,23 +297,196 @@ void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
}
}
-/*******************************************************************************
- * This function is passed a cpu_index and the highest level in the topology
- * tree and the state which each node should transition to. It updates the
- * state of each node between the specified power levels.
- ******************************************************************************/
-void psci_do_state_coordination(int end_pwrlvl,
- unsigned int cpu_idx,
- uint32_t state)
+/******************************************************************************
+ * This function is invoked post CPU power up and initialization. It sets the
+ * affinity info state, target power state and requested power state for the
+ * current CPU and all its ancestor power domains to RUN.
+ *****************************************************************************/
+void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl)
{
- int level;
- unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
- psci_set_state(cpu_idx, state, PSCI_CPU_PWR_LVL);
+ int lvl;
+ unsigned int parent_idx, cpu_idx = platform_my_core_pos();
+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+ /* Reset the local_state to RUN for the non cpu power domains. */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ psci_non_cpu_pd_nodes[parent_idx].local_state =
+ PSCI_LOCAL_STATE_RUN;
+#if !USE_COHERENT_MEM
+ flush_dcache_range(
+ (uint64_t)&psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ psci_set_req_local_pwr_state(lvl,
+ cpu_idx,
+ PSCI_LOCAL_STATE_RUN);
+ parent_idx = psci_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+ /* Set the affinity info state to ON */
+ psci_set_aff_info_state(AFF_STATE_ON);
+
+ psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+ flush_cpu_data(psci_svc_cpu_data);
+}
+
+/******************************************************************************
+ * This function is passed the local power states requested for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl). It updates the array of requested power
+ * states with this information.
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * retrieves the states requested by all the cpus of which the power domain at
+ * that level is an ancestor. It passes this information to the platform to
+ * coordinate and return the target power state. If the target state for a level
+ * is RUN then subsequent levels are not considered. At the CPU level, state
+ * coordination is not required. Hence, the requested and the target states are
+ * the same.
+ *
+ * The 'state_info' is updated with the target state for each level between the
+ * CPU and the 'end_pwrlvl' and returned to the caller.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ *****************************************************************************/
+void psci_do_state_coordination(int end_pwrlvl, psci_power_state_t *state_info)
+{
+ unsigned int lvl, parent_idx, cpu_idx = platform_my_core_pos();
+ unsigned int start_idx, ncpus;
+ plat_local_state_t target_state, *req_states;
+
+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+ /* For level 0, the requested state will be equivalent
+ to target state */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+ /* First update the requested power state */
+ psci_set_req_local_pwr_state(lvl, cpu_idx,
+ state_info->pwr_domain_state[lvl]);
+
+ /* Get the requested power states for this power level */
+ start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
+ req_states = psci_get_req_local_pwr_states(lvl, start_idx);
+
+ /*
+ * Let the platform coordinate amongst the requested states at
+ * this power level and return the target local power state.
+ */
+ ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
+ target_state = plat_get_target_pwr_state(lvl,
+ req_states,
+ ncpus);
+
+ state_info->pwr_domain_state[lvl] = target_state;
+
+ /* Break early if the negotiated target power state is RUN */
+ if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+ break;
- for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
- psci_set_state(parent_idx, state, level);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
+
+ /*
+ * This is for cases when we break out of the above loop early because
+ * the target power state is RUN at a power level < end_pwlvl.
+ * We update the requested power state from state_info and then
+ * set the target state as RUN.
+ */
+ for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
+ psci_set_req_local_pwr_state(lvl, cpu_idx,
+ state_info->pwr_domain_state[lvl]);
+ state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+
+ }
+
+ /* Update the target state in the power domain nodes */
+ psci_set_target_local_pwr_states(end_pwrlvl, state_info);
+}
+
+/******************************************************************************
+ * This function validates a suspend request by making sure that if a standby
+ * state is requested then no power level is turned off and the highest power
+ * level is placed in a standby/retention state.
+ *
+ * It also ensures that the state level X will enter is not shallower than the
+ * state level X + 1 will enter.
+ *
+ * This validation will be enabled only for DEBUG builds as the platform is
+ * expected to perform these validations as well.
+ *****************************************************************************/
+int psci_validate_suspend_req(psci_power_state_t *state_info,
+ unsigned int is_power_down_state)
+{
+ unsigned int max_off_lvl, target_lvl, max_retn_lvl;
+ plat_local_state_type_t type, prev_type;
+ int i;
+
+ /* Find the target suspend power level */
+ target_lvl = psci_find_target_suspend_lvl(state_info);
+ if (target_lvl == PSCI_INVALID_DATA)
+ return PSCI_E_INVALID_PARAMS;
+
+ prev_type = STATE_TYPE_RUN;
+ for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
+ type = find_local_state_type(state_info->pwr_domain_state[i]);
+ if (type < prev_type)
+ return PSCI_E_INVALID_PARAMS;
+
+ prev_type = type;
+ }
+
+ /* Find the highest off power level */
+ max_off_lvl = psci_find_max_off_lvl(state_info);
+
+ /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
+ max_retn_lvl = PSCI_INVALID_DATA;
+ if (target_lvl != max_off_lvl)
+ max_retn_lvl = target_lvl;
+
+ /*
+ * If this is not a request for a power down state then max off level
+ * has to be invalid and max retention level has to be a valid power
+ * level.
+ */
+ if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_DATA ||
+ max_retn_lvl == PSCI_INVALID_DATA))
+ return PSCI_E_INVALID_PARAMS;
+
+ return PSCI_E_SUCCESS;
+}
+
+/******************************************************************************
+ * This function finds the highest power level which will be powered down
+ * amongst all the power levels specified in the 'state_info' structure
+ *****************************************************************************/
+unsigned int psci_find_max_off_lvl(psci_power_state_t *state_info)
+{
+ int i;
+
+ for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+ if (is_local_state_off(state_info->pwr_domain_state[i]))
+ return i;
+ }
+
+ return PSCI_INVALID_DATA;
+}
+
+/******************************************************************************
+ * This functions finds the level of the highest power domain which will be
+ * placed in a low power state during a suspend operation.
+ *****************************************************************************/
+unsigned int psci_find_target_suspend_lvl(psci_power_state_t *state_info)
+{
+ int i;
+
+ for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+ if (!is_local_state_run(state_info->pwr_domain_state[i]))
+ return i;
+ }
+
+ return PSCI_INVALID_DATA;
}
/*******************************************************************************
@@ -271,97 +595,6 @@ int psci_get_ns_ep_info(entry_point_info_t *ep,
}
/*******************************************************************************
- * This function takes an index and level of a power domain node in the topology
- * tree and returns its state. State of a non-leaf node needs to be calculated.
- ******************************************************************************/
-unsigned short psci_get_state(unsigned int idx,
- int level)
-{
- /* A cpu node just contains the state which can be directly returned */
- if (level == PSCI_CPU_PWR_LVL) {
- flush_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state);
- return get_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state);
- }
-
-#if !USE_COHERENT_MEM
- flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes[idx],
- sizeof(psci_non_cpu_pd_nodes[idx]));
-#endif
- /*
- * For a power level higher than a cpu, the state has to be
- * calculated. It depends upon the value of the reference count
- * which is managed by each node at the next lower power level
- * e.g. for a cluster, each cpu increments/decrements the reference
- * count. If the reference count is 0 then the power level is
- * OFF else ON.
- */
- if (psci_non_cpu_pd_nodes[idx].ref_count)
- return PSCI_STATE_ON;
- else
- return PSCI_STATE_OFF;
-}
-
-/*******************************************************************************
- * This function takes an index and level of a power domain node in the topology
- * tree and a target state. State of a non-leaf node needs to be converted to
- * a reference count. State of a leaf node can be set directly.
- ******************************************************************************/
-void psci_set_state(unsigned int idx,
- unsigned short state,
- int level)
-{
- /*
- * For a power level higher than a cpu, the state is used
- * to decide whether the reference count is incremented or
- * decremented. Entry into the ON_PENDING state does not have
- * effect.
- */
- if (level > PSCI_CPU_PWR_LVL) {
- switch (state) {
- case PSCI_STATE_ON:
- psci_non_cpu_pd_nodes[idx].ref_count++;
- break;
- case PSCI_STATE_OFF:
- case PSCI_STATE_SUSPEND:
- psci_non_cpu_pd_nodes[idx].ref_count--;
- break;
- case PSCI_STATE_ON_PENDING:
- /*
- * A power level higher than a cpu will not undergo
- * a state change when it is about to be turned on
- */
- return;
- default:
- assert(0);
-
-#if !USE_COHERENT_MEM
- flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes[idx],
- sizeof(psci_non_cpu_pd_nodes[idx]));
-#endif
- }
- } else {
- set_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state, state);
- flush_cpu_data_by_index(idx, psci_svc_cpu_data.psci_state);
- }
-}
-
-/*******************************************************************************
- * A power domain could be on, on_pending, suspended or off. These are the
- * logical states it can be in. Physically either it is off or on. When it is in
- * the state on_pending then it is about to be turned on. It is not possible to
- * tell whether that's actually happened or not. So we err on the side of
- * caution & treat the power domain as being turned off.
- ******************************************************************************/
-unsigned short psci_get_phys_state(unsigned int idx,
- int level)
-{
- unsigned int state;
-
- state = psci_get_state(idx, level);
- return get_phys_state(state);
-}
-
-/*******************************************************************************
* Generic handler which is called when a cpu is physically powered on. It
* traverses the node information and finds the highest power level powered
* off and performs generic, architectural, platform setup and state management
@@ -371,34 +604,32 @@ unsigned short psci_get_phys_state(unsigned int idx,
* coherency at the interconnect level in addition to gic cpu interface.
******************************************************************************/
void psci_power_up_finish(int end_pwrlvl,
- pwrlvl_power_on_finisher_t pon_handler)
+ pwrlvl_power_on_finisher_t power_on_handler)
{
unsigned int cpu_idx = platform_my_core_pos();
- unsigned int max_phys_off_pwrlvl;
+ psci_power_state_t state_info;
/*
- * This function acquires the lock corresponding to each power
- * level so that by the time all locks are taken, the system topology
- * is snapshot and state management can be done safely.
+ * This function acquires the lock corresponding to each power level so
+ * that by the time all locks are taken, the system topology is snapshot
+ * and state management can be done safely.
*/
psci_acquire_pwr_domain_locks(end_pwrlvl,
cpu_idx);
- max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(end_pwrlvl,
- cpu_idx);
- assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA);
+ memset(&state_info, 0, sizeof(state_info));
+ psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
- /* Perform generic, architecture and platform specific handling */
- pon_handler(cpu_idx, max_phys_off_pwrlvl);
+ /*
+ * Perform generic, architecture and platform specific handling.
+ */
+ power_on_handler(cpu_idx, &state_info);
/*
- * This function updates the state of each power instance
- * corresponding to the cpu index in the range of power levels
- * specified.
+ * Set the requested and target state of this CPU and all the higher
+ * power domains which are ancestors of this CPU to run.
*/
- psci_do_state_coordination(end_pwrlvl,
- cpu_idx,
- PSCI_STATE_ON);
+ psci_set_pwr_domains_to_run(end_pwrlvl);
/*
* This loop releases the lock corresponding to each power level
@@ -456,31 +687,36 @@ int psci_spd_migrate_info(uint64_t *mpidr)
void psci_print_power_domain_map(void)
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
- unsigned int idx, state;
+ unsigned int idx, type;
/* This array maps to the PSCI_STATE_X definitions in psci.h */
- static const char *psci_state_str[] = {
+ static const char *psci_state_type_str[] = {
"ON",
+ "RETENTION",
"OFF",
- "ON_PENDING",
- "SUSPEND"
};
INFO("PSCI Power Domain Map:\n");
- for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); idx++) {
- state = psci_get_state(idx, psci_non_cpu_pd_nodes[idx].level);
- INFO(" Domain Node : Level %u, parent_node %d, State %s\n",
+ for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
+ idx++) {
+ type = find_local_state_type(
+ psci_non_cpu_pd_nodes[idx].local_state);
+ INFO(" Domain Node : Level %u, parent_node %d,"
+ " State %s (0x%x)\n",
psci_non_cpu_pd_nodes[idx].level,
psci_non_cpu_pd_nodes[idx].parent_node,
- psci_state_str[state]);
+ psci_state_type_str[type],
+ psci_non_cpu_pd_nodes[idx].local_state);
}
for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
- state = psci_get_state(idx, PSCI_CPU_PWR_LVL);
- INFO(" CPU Node : MPID 0x%lx, parent_node %d, State %s\n",
+ type = find_local_state_type(psci_get_cpu_local_state());
+ INFO(" CPU Node : MPID 0x%lx, parent_node %d,"
+ " State %s (0x%x)\n",
psci_cpu_pd_nodes[idx].mpidr,
psci_cpu_pd_nodes[idx].parent_node,
- psci_state_str[state]);
+ psci_state_type_str[type],
+ psci_get_cpu_local_state());
}
#endif
}
diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c
index 64d27e169..7ab92a74a 100644
--- a/services/std_svc/psci/psci_main.c
+++ b/services/std_svc/psci/psci_main.c
@@ -31,9 +31,10 @@
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
+#include <debug.h>
#include <runtime_svc.h>
#include <std_svc.h>
-#include <debug.h>
+#include <string.h>
#include <platform.h>
#include "psci_private.h"
@@ -93,72 +94,80 @@ int psci_cpu_suspend(unsigned int power_state,
unsigned long context_id)
{
int rc;
- unsigned int target_pwrlvl, pstate_type;
+ unsigned int target_pwrlvl, is_power_down_state;
entry_point_info_t ep;
+ psci_power_state_t state_info;
+ plat_local_state_t cpu_pd_state;
+
+ /* Validate the power_state parameter */
+ memset(&state_info, 0, sizeof(state_info));
+ rc = psci_validate_power_state(power_state, &state_info);
+ if (rc != PSCI_E_SUCCESS) {
+ assert(rc == PSCI_E_INVALID_PARAMS);
+ return rc;
+ }
- /* Check SBZ bits in power state are zero */
- if (psci_validate_power_state(power_state))
- return PSCI_E_INVALID_PARAMS;
+ /*
+ * Get the value of the state type bit from the power state parameter.
+ */
+ is_power_down_state = psci_get_pstate_type(power_state);
- /* Sanity check the requested state */
- target_pwrlvl = psci_get_pstate_pwrlvl(power_state);
- if (target_pwrlvl > PLAT_MAX_PWR_LVL)
- return PSCI_E_INVALID_PARAMS;
+ /* Sanity check the requested suspend levels */
+ assert (psci_validate_suspend_req(&state_info, is_power_down_state)
+ == PSCI_E_SUCCESS);
- /* Validate the power_state using platform pm_ops */
- if (psci_plat_pm_ops->validate_power_state) {
- rc = psci_plat_pm_ops->validate_power_state(power_state);
- if (rc != PSCI_E_SUCCESS) {
- assert(rc == PSCI_E_INVALID_PARAMS);
- return PSCI_E_INVALID_PARAMS;
- }
- }
+ target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
- /* Validate the entrypoint using platform pm_ops */
- if (psci_plat_pm_ops->validate_ns_entrypoint) {
- rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
- if (rc != PSCI_E_SUCCESS) {
- assert(rc == PSCI_E_INVALID_PARAMS);
+ /* Fast path for CPU standby.*/
+ if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
+ if (!psci_plat_pm_ops->cpu_standby)
return PSCI_E_INVALID_PARAMS;
- }
- }
- /* Determine the 'state type' in the 'power_state' parameter */
- pstate_type = psci_get_pstate_type(power_state);
-
- /*
- * Ensure that we have a platform specific handler for entering
- * a standby state.
- */
- if (pstate_type == PSTATE_TYPE_STANDBY) {
- if (!psci_plat_pm_ops->pwr_domain_standby)
- return PSCI_E_INVALID_PARAMS;
+ /*
+ * Set the state of the CPU power domain to the platform
+ * specific retention state and enter the standby state. Upon
+ * exit from standby, set the state back to RUN.
+ */
+ cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
+ psci_set_cpu_local_state(cpu_pd_state);
+ psci_plat_pm_ops->cpu_standby(cpu_pd_state);
+ psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
- psci_plat_pm_ops->pwr_domain_standby(power_state);
return PSCI_E_SUCCESS;
}
/*
- * Verify and derive the re-entry information for
- * the non-secure world from the non-secure state from
- * where this call originated.
+ * If a power down state has been requested, we need to verify entry
+ * point and program entry information.
*/
- rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
- if (rc != PSCI_E_SUCCESS)
- return rc;
+ if (is_power_down_state) {
+ if (psci_plat_pm_ops->validate_ns_entrypoint) {
+ rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
+ if (rc != PSCI_E_SUCCESS) {
+ assert(rc == PSCI_E_INVALID_PARAMS);
+ return PSCI_E_INVALID_PARAMS;
+ }
+ }
- /* Save PSCI power state parameter for the core in suspend context */
- psci_set_suspend_power_state(power_state);
+ /*
+ * Verify and derive the re-entry information for
+ * the non-secure world from the non-secure state from
+ * where this call originated.
+ */
+ rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
+ if (rc != PSCI_E_SUCCESS)
+ return rc;
+ }
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this CPU.
*/
psci_cpu_suspend_start(&ep,
- target_pwrlvl);
+ target_pwrlvl,
+ &state_info,
+ is_power_down_state);
- /* Reset PSCI power state parameter for the core. */
- psci_set_suspend_power_state(PSCI_INVALID_DATA);
return PSCI_E_SUCCESS;
}
@@ -186,26 +195,18 @@ int psci_cpu_off(void)
int psci_affinity_info(unsigned long target_affinity,
unsigned int lowest_affinity_level)
{
- unsigned int cpu_idx;
- unsigned char cpu_pwr_domain_state;
+ unsigned int target_idx;
/* We dont support level higher than PSCI_CPU_PWR_LVL */
if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Calculate the cpu index of the target */
- cpu_idx = platform_get_core_pos(target_affinity);
- if (cpu_idx == -1)
+ target_idx = platform_get_core_pos(target_affinity);
+ if (target_idx == -1)
return PSCI_E_INVALID_PARAMS;
- cpu_pwr_domain_state = psci_get_state(cpu_idx, PSCI_CPU_PWR_LVL);
-
- /* A suspended cpu is available & on for the OS */
- if (cpu_pwr_domain_state == PSCI_STATE_SUSPEND) {
- cpu_pwr_domain_state = PSCI_STATE_ON;
- }
-
- return cpu_pwr_domain_state;
+ return psci_get_aff_info_state_by_idx(target_idx);
}
int psci_migrate(unsigned long target_cpu)
@@ -283,10 +284,9 @@ int psci_features(unsigned int psci_fid)
if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
/*
- * The trusted firmware uses the original power state format
- * and does not support OS Initiated Mode.
+ * The trusted firmware does not support OS Initiated Mode.
*/
- return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) |
+ return (FF_PSTATE << FF_PSTATE_SHIFT) |
((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
}
diff --git a/services/std_svc/psci/psci_off.c b/services/std_svc/psci/psci_off.c
index 6a1be43ec..36c0ce5a1 100644
--- a/services/std_svc/psci/psci_off.c
+++ b/services/std_svc/psci/psci_off.c
@@ -37,6 +37,18 @@
#include "psci_private.h"
/******************************************************************************
+ * Construct the psci_power_state for CPU_OFF. Request the deepest OFF state
+ * for all the power levels.
+ ******************************************************************************/
+static void psci_create_deepest_off_state_req(psci_power_state_t *state_info)
+{
+ int lvl;
+
+ for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+ state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
+}
+
+/******************************************************************************
* Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu power domain off, power
* domains at higher levels will be turned off as far as possible. It finds
@@ -52,7 +64,7 @@
int psci_cpu_off_start(int end_pwrlvl)
{
int rc, idx = platform_my_core_pos();
- unsigned int max_phys_off_pwrlvl;
+ psci_power_state_t state_info;
/*
* This function must only be called on platforms where the
@@ -79,29 +91,30 @@ int psci_cpu_off_start(int end_pwrlvl)
goto exit;
}
+ /* Construct the psci_power_state for CPU_OFF */
+ psci_create_deepest_off_state_req(&state_info);
+
/*
- * This function updates the state of each power domain instance
- * corresponding to the cpu index in the range of power levels
- * specified.
+ * This function is passed the requested state info and
+ * it returns the negotiated state info for each power level upto
+ * the end level specified.
*/
- psci_do_state_coordination(end_pwrlvl,
- idx,
- PSCI_STATE_OFF);
+ psci_do_state_coordination(end_pwrlvl, &state_info);
- max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(end_pwrlvl, idx);
- assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA);
+ /* Set the PSCI state to OFF */
+ psci_set_aff_info_state(AFF_STATE_OFF);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
- psci_do_pwrdown_cache_maintenance(max_phys_off_pwrlvl);
+ psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
- psci_plat_pm_ops->pwr_domain_off(max_phys_off_pwrlvl);
+ psci_plat_pm_ops->pwr_domain_off(&state_info);
exit:
/*
diff --git a/services/std_svc/psci/psci_on.c b/services/std_svc/psci/psci_on.c
index f7ae95fe4..6ec880ece 100644
--- a/services/std_svc/psci/psci_on.c
+++ b/services/std_svc/psci/psci_on.c
@@ -44,19 +44,41 @@
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
******************************************************************************/
-static int cpu_on_validate_state(unsigned int psci_state)
+static int cpu_on_validate_state(aff_info_state_t aff_state)
{
- if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
+ if (aff_state == AFF_STATE_ON)
return PSCI_E_ALREADY_ON;
- if (psci_state == PSCI_STATE_ON_PENDING)
+ if (aff_state == AFF_STATE_ON_PENDING)
return PSCI_E_ON_PENDING;
- assert(psci_state == PSCI_STATE_OFF);
+ assert(aff_state == AFF_STATE_OFF);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
+ * This function sets the aff_info_state in the per-cpu data of the CPU
+ * specified by cpu_idx
+ ******************************************************************************/
+static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx,
+ aff_info_state_t aff_state)
+{
+
+ set_cpu_data_by_index(cpu_idx,
+ psci_svc_cpu_data.aff_info_state,
+ aff_state);
+
+ /*
+ * The aff_info_state will always be accessed with caches on and share a
+ * cache line with other per-cpu data. This cache line might get
+ * invalidated due to maintenance operations on other per-cpu
+ * data. Hence, it is safer to update main memory with the latest state
+ * information.
+ */
+ flush_cpu_data_by_index(cpu_idx, psci_svc_cpu_data.aff_info_state);
+}
+
+/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It performs the generic, architectural, platform setup and state
* management to power on the target cpu e.g. it will ensure that
@@ -67,8 +89,8 @@ static int cpu_on_validate_state(unsigned int psci_state)
* platform handler as it can return error.
******************************************************************************/
int psci_cpu_on_start(unsigned long target_cpu,
- entry_point_info_t *ep,
- int end_pwrlvl)
+ entry_point_info_t *ep,
+ int end_pwrlvl)
{
int rc;
unsigned long psci_entrypoint;
@@ -88,7 +110,7 @@ int psci_cpu_on_start(unsigned long target_cpu,
* Generic management: Ensure that the cpu is off to be
* turned on.
*/
- rc = cpu_on_validate_state(psci_get_state(target_idx, PSCI_CPU_PWR_LVL));
+ rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
if (rc != PSCI_E_SUCCESS)
goto exit;
@@ -112,19 +134,15 @@ int psci_cpu_on_start(unsigned long target_cpu,
* steps to power on.
*/
rc = psci_plat_pm_ops->pwr_domain_on(target_cpu,
- psci_entrypoint,
- MPIDR_AFFLVL0);
+ psci_entrypoint);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
/*
- * This function updates the state of each power domain instance
- * corresponding to the target cpu index in the range of power
- * levels specified.
+ * Set the PSCI state of the target cpu to ON_PENDING.
*/
if (rc == PSCI_E_SUCCESS) {
- psci_do_state_coordination(end_pwrlvl,
- target_idx,
- PSCI_STATE_ON_PENDING);
+ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+
/*
* Store the re-entry information for the non-secure world.
*/
@@ -141,25 +159,24 @@ exit:
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
void psci_cpu_on_finish(unsigned int cpu_idx,
- int max_off_pwrlvl)
+ psci_power_state_t *state_info)
{
- /* Ensure we have been explicitly woken up by another cpu */
- assert(psci_get_state(cpu_idx, PSCI_CPU_PWR_LVL)
- == PSCI_STATE_ON_PENDING);
-
/*
* Plat. management: Perform the platform specific actions
* for this cpu e.g. enabling the gic or zeroing the mailbox
* register. The actual state of this cpu has already been
* changed.
*/
- psci_plat_pm_ops->pwr_domain_on_finish(max_off_pwrlvl);
+ psci_plat_pm_ops->pwr_domain_on_finish(state_info);
/*
* Arch. management: Enable data cache and manage stack memory
*/
psci_do_pwrup_cache_maintenance();
+ /* Ensure we have been explicitly woken up by another cpu */
+ assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
+
/*
* All the platform specific actions for turning this cpu
* on have completed. Perform enough arch.initialization
diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h
index c2c8523fd..dea78cc88 100644
--- a/services/std_svc/psci/psci_private.h
+++ b/services/std_svc/psci/psci_private.h
@@ -80,11 +80,56 @@
define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64))
/*
+ * Helper macros to get/set the fields of PSCI per-cpu data.
+ */
+#define psci_set_aff_info_state(aff_state) \
+ set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
+#define psci_get_aff_info_state() \
+ get_cpu_data(psci_svc_cpu_data.aff_info_state)
+#define psci_get_aff_info_state_by_idx(idx) \
+ get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
+#define psci_get_suspend_pwrlvl() \
+ get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
+#define psci_set_suspend_pwrlvl(target_lvl) \
+ set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
+#define psci_set_cpu_local_state(state) \
+ set_cpu_data(psci_svc_cpu_data.local_state, state)
+#define psci_get_cpu_local_state() \
+ get_cpu_data(psci_svc_cpu_data.local_state)
+
+/*
* Helper macros for the CPU level spinlocks
*/
#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
+/* Helper macro to identify a CPU standby request in PSCI Suspend call */
+#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
+ (((!is_power_down_state) && (retn_lvl == 0)) ? 1 : 0)
+
+/* The local state macro used to represent RUN state. */
+#define PSCI_LOCAL_STATE_RUN 0
+
+/*
+ * Macro to test whether the plat_local_state is RUN state
+ */
+#define is_local_state_run(plat_local_state) \
+ (plat_local_state == PSCI_LOCAL_STATE_RUN)
+
+/*
+ * Macro to test whether the plat_local_state is RETENTION state
+ */
+#define is_local_state_retn(plat_local_state) \
+ ((plat_local_state > PSCI_LOCAL_STATE_RUN) && \
+ (plat_local_state <= PLAT_MAX_RET_STATE))
+
+/*
+ * Macro to test whether the plat_local_state is OFF state
+ */
+#define is_local_state_off(plat_local_state) \
+ ((plat_local_state > PLAT_MAX_RET_STATE) && \
+ (plat_local_state <= PLAT_MAX_OFF_STATE))
+
/*******************************************************************************
* The following two data structures implement the power domain tree. The tree
* is used to track the state of all the nodes i.e. power domain instances
@@ -109,7 +154,8 @@ typedef struct non_cpu_pwr_domain_node {
/* Index of the parent power domain node */
unsigned int parent_node;
- unsigned char ref_count;
+ plat_local_state_t local_state;
+
unsigned char level;
#if USE_COHERENT_MEM
bakery_lock_t lock;
@@ -135,7 +181,7 @@ typedef struct cpu_pwr_domain_node {
} cpu_pd_node_t;
typedef void (*pwrlvl_power_on_finisher_t)(unsigned int cpu_idx,
- int max_off_pwrlvl);
+ psci_power_state_t *state_info);
/*******************************************************************************
* Data prototypes
@@ -154,28 +200,30 @@ extern const spd_pm_ops_t *psci_spd_pm;
* Function prototypes
******************************************************************************/
/* Private exported functions from psci_common.c */
-unsigned short psci_get_state(unsigned int idx, int level);
-unsigned short psci_get_phys_state(unsigned int idx, int level);
-void psci_set_state(unsigned int idx, unsigned short state, int level);
+int psci_validate_power_state(unsigned int power_state,
+ psci_power_state_t *state_info);
int psci_validate_mpidr(unsigned long mpidr);
int get_power_on_target_pwrlvl(void);
+void psci_init_req_local_pwr_states(void);
void psci_power_up_finish(int end_pwrlvl,
- pwrlvl_power_on_finisher_t pon_handler);
+ pwrlvl_power_on_finisher_t power_on_handler);
int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
int end_lvl,
unsigned int node_index[]);
void psci_do_state_coordination(int end_pwrlvl,
- unsigned int cpu_idx,
- uint32_t state);
+ psci_power_state_t *state_info);
void psci_acquire_pwr_domain_locks(int end_pwrlvl,
unsigned int cpu_idx);
void psci_release_pwr_domain_locks(int end_pwrlvl,
unsigned int cpu_idx);
+int psci_validate_suspend_req(psci_power_state_t *state_info,
+ unsigned int is_power_down_state_req);
+unsigned int psci_find_max_off_lvl(psci_power_state_t *state_info);
+unsigned int psci_find_target_suspend_lvl(psci_power_state_t *state_info);
+void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl);
void psci_print_power_domain_map(void);
-uint32_t psci_find_max_phys_off_pwrlvl(uint32_t end_pwrlvl,
- unsigned int cpu_idx);
int psci_spd_migrate_info(uint64_t *mpidr);
/* Private exported functions from psci_on.c */
@@ -184,18 +232,19 @@ int psci_cpu_on_start(unsigned long target_cpu,
int end_pwrlvl);
void psci_cpu_on_finish(unsigned int cpu_idx,
- int max_off_pwrlvl);
+ psci_power_state_t *state_info);
/* Private exported functions from psci_off.c */
int psci_cpu_off_start(int end_pwrlvl);
-/* Private exported functions from psci_suspend.c */
+/* Private exported functions from psci_pwrlvl_suspend.c */
void psci_cpu_suspend_start(entry_point_info_t *ep,
- int end_pwrlvl);
-void psci_cpu_suspend_finish(unsigned int cpu_idx,
- int max_off_pwrlvl);
+ int end_pwrlvl,
+ psci_power_state_t *state_info,
+ unsigned int is_power_down_state_req);
-void psci_set_suspend_power_state(unsigned int power_state);
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+ psci_power_state_t *state_info);
/* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(uint32_t pwr_level);
diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c
index 2754237dc..01ebca55f 100644
--- a/services/std_svc/psci/psci_setup.c
+++ b/services/std_svc/psci/psci_setup.c
@@ -62,32 +62,36 @@ static void psci_init_pwr_domain_node(int array_idx, int parent_idx, int level)
psci_non_cpu_pd_nodes[array_idx].level = level;
psci_lock_init(psci_non_cpu_pd_nodes, array_idx);
psci_non_cpu_pd_nodes[array_idx].parent_node = parent_idx;
+ psci_non_cpu_pd_nodes[array_idx].local_state =
+ PLAT_MAX_OFF_STATE;
#if !USE_COHERENT_MEM
- flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes[array_idx],
- sizeof(psci_non_cpu_pd_nodes[array_idx]));
+ flush_dcache_range(
+ (uint64_t) &psci_non_cpu_pd_nodes[array_idx],
+ sizeof(psci_non_cpu_pd_nodes[array_idx]));
#endif
} else {
+ psci_cpu_data_t *svc_cpu_data;
psci_cpu_pd_nodes[array_idx].parent_node = parent_idx;
/* Initialize with an invalid mpidr */
psci_cpu_pd_nodes[array_idx].mpidr = -1;
- /*
- * Mark the cpu as OFF. Higher power level reference counts
- * have already been memset to 0
- */
- set_cpu_data_by_index(array_idx,
- psci_svc_cpu_data.psci_state,
- PSCI_STATE_OFF);
+ svc_cpu_data =
+ &(_cpu_data_by_index(array_idx)->psci_svc_cpu_data);
+
+ /* Set the Affinity Info for the cores as OFF */
+ svc_cpu_data->aff_info_state = AFF_STATE_OFF;
- /* Invalidate the suspend context for the node */
- set_cpu_data_by_index(array_idx,
- psci_svc_cpu_data.power_state,
- PSCI_INVALID_DATA);
+ /* Invalidate the suspend level for the cpu */
+ svc_cpu_data->target_pwrlvl = PSCI_INVALID_DATA;
- flush_cpu_data_by_index(array_idx, psci_svc_cpu_data);
+ /* Set the power state to OFF state */
+ svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
+
+ flush_dcache_range((uint64_t)svc_cpu_data,
+ sizeof(*svc_cpu_data));
cm_set_context_by_index(array_idx,
(void *) &psci_ns_context[array_idx],
@@ -146,7 +150,7 @@ void populate_power_domain_tree(unsigned char *plat_array,
* - Index of first free entry in psci_non_cpu_pd_nodes[] or
* psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
*/
- while (num_levels >= 0) {
+ while (num_levels >= PSCI_CPU_PWR_LVL) {
num_nodes_at_next_lvl = 0;
/*
* For each entry (parent node) at this level in the plat_array:
@@ -176,7 +180,7 @@ void populate_power_domain_tree(unsigned char *plat_array,
num_levels--;
/* Reset the index for the cpu power domain array */
- if (num_levels == 0)
+ if (num_levels == PSCI_CPU_PWR_LVL)
node_index = 0;
}
@@ -235,12 +239,13 @@ int32_t psci_setup(void)
flush_dcache_range((uint64_t) &psci_cpu_pd_nodes,
sizeof(psci_cpu_pd_nodes));
+ psci_init_req_local_pwr_states();
+
/*
- * Mark the current CPU and its parent power domains as ON. No need to lock
- * as this is the primary cpu.
+ * Set the requested and target state of this CPU and all the higher
+ * power domain levels for this CPU to run.
*/
- psci_do_state_coordination(PLAT_MAX_PWR_LVL, platform_my_core_pos(),
- PSCI_STATE_ON);
+ psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
platform_setup_pm(&psci_plat_pm_ops);
assert(psci_plat_pm_ops);
diff --git a/services/std_svc/psci/psci_suspend.c b/services/std_svc/psci/psci_suspend.c
index 53a2a303e..bce78cf76 100644
--- a/services/std_svc/psci/psci_suspend.c
+++ b/services/std_svc/psci/psci_suspend.c
@@ -42,82 +42,97 @@
#include "psci_private.h"
/*******************************************************************************
- * This function saves the power state parameter passed in the current PSCI
- * cpu_suspend call in the per-cpu data array.
+ * This function does generic and platform specific operations after a wake-up
+ * from standby/retention states at multiple power levels.
******************************************************************************/
-void psci_set_suspend_power_state(unsigned int power_state)
+static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
+ psci_power_state_t *state_info,
+ unsigned int end_pwrlvl)
{
- set_cpu_data(psci_svc_cpu_data.power_state, power_state);
- flush_cpu_data(psci_svc_cpu_data.power_state);
-}
+ psci_acquire_pwr_domain_locks(end_pwrlvl,
+ cpu_idx);
-/*******************************************************************************
- * This function gets the power level till which the current cpu could be
- * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
- * power state is invalid.
- ******************************************************************************/
-int psci_get_suspend_pwrlvl(void)
-{
- unsigned int power_state;
+ /*
+ * Plat. management: Allow the platform to do operations
+ * on waking up from retention.
+ */
+ psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
- power_state = get_cpu_data(psci_svc_cpu_data.power_state);
+ /*
+ * Set the requested and target state of this CPU and all the higher
+ * power domain levels for this CPU to run.
+ */
+ psci_set_pwr_domains_to_run(end_pwrlvl);
- return ((power_state == PSCI_INVALID_DATA) ?
- power_state : psci_get_pstate_pwrlvl(power_state));
+ psci_release_pwr_domain_locks(end_pwrlvl,
+ cpu_idx);
}
/*******************************************************************************
- * This function gets the state id of the current cpu from the power state
- * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
- * power state saved is invalid.
+ * This function does generic and platform specific suspend to power down
+ * operations.
******************************************************************************/
-int psci_get_suspend_stateid(void)
+static void psci_suspend_to_pwrdown_start(int end_pwrlvl,
+ entry_point_info_t *ep,
+ psci_power_state_t *state_info)
{
- unsigned int power_state;
-
- power_state = get_cpu_data(psci_svc_cpu_data.power_state);
+ /* Save PSCI target power level for the suspend finisher handler */
+ psci_set_suspend_pwrlvl(end_pwrlvl);
- return ((power_state == PSCI_INVALID_DATA) ?
- power_state : psci_get_pstate_id(power_state));
-}
+ /*
+ * Flush the target power level as it will be accessed on power up with
+ * Data cache disabled.
+ */
+ flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
-/*******************************************************************************
- * This function gets the state id of the cpu specified by the cpu index
- * from the power state parameter saved in the per-cpu data array. Returns
- * PSCI_INVALID_DATA if the power state saved is invalid.
- ******************************************************************************/
-int psci_get_suspend_stateid_by_mpidr(unsigned long cpu_idx)
-{
- unsigned int power_state;
+ /*
+ * Call the cpu suspend handler registered by the Secure Payload
+ * Dispatcher to let it do any book-keeping. If the handler encounters an
+ * error, it's expected to assert within
+ */
+ if (psci_spd_pm && psci_spd_pm->svc_suspend)
+ psci_spd_pm->svc_suspend(0);
- power_state = get_cpu_data_by_index(cpu_idx,
- psci_svc_cpu_data.power_state);
+ /*
+ * Store the re-entry information for the non-secure world.
+ */
+ cm_init_context(platform_my_core_pos(), ep);
- return ((power_state == PSCI_INVALID_DATA) ?
- power_state : psci_get_pstate_id(power_state));
+ /*
+ * Arch. management. Perform the necessary steps to flush all
+ * cpu caches. Currently we assume that the power level correspond
+ * the cache level.
+ * TODO : Introduce a mechanism to query the cache level to flush
+ * and the cpu-ops power down to perform from the platform.
+ */
+ psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(state_info));
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with suspending the cpu power domain, power domains
- * at higher levels until the target power level will be suspended as well.
- * It finds the highest level where a domain has to be suspended by traversing
- * the node information and then performs generic, architectural, platform
- * setup and state management required to suspend that power domain and domains
- * below it. * e.g. For a cpu that's to be suspended, it could mean programming
- * the power controller whereas for a cluster that's to be suspended, it will
- * call the platform specific code which will disable coherency at the
- * interconnect level if the cpu is the last in the cluster and also the
- * program the power controller.
+ * at higher levels until the target power level will be suspended as well. It
+ * coordinates with the platform to negotiate the target state for each of
+ * the power domain level till the target power domain level. It then performs
+ * generic, architectural, platform setup and state management required to
+ * suspend that power domain level and power domain levels below it.
+ * e.g. For a cpu that's to be suspended, it could mean programming the
+ * power controller whereas for a cluster that's to be suspended, it will call
+ * the platform specific code which will disable coherency at the interconnect
+ * level if the cpu is the last in the cluster and also the program the power
+ * controller.
*
* All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
-void psci_cpu_suspend_start(entry_point_info_t *ep, int end_pwrlvl)
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+ int end_pwrlvl,
+ psci_power_state_t *state_info,
+ unsigned int is_power_down_state)
{
int skip_wfi = 0;
- unsigned int max_phys_off_pwrlvl, idx = platform_my_core_pos();
+ unsigned int idx = platform_my_core_pos();
unsigned long psci_entrypoint;
/*
@@ -146,39 +161,20 @@ void psci_cpu_suspend_start(entry_point_info_t *ep, int end_pwrlvl)
}
/*
- * Call the cpu suspend handler registered by the Secure Payload
- * Dispatcher to let it do any bookeeping. If the handler encounters an
- * error, it's expected to assert within
- */
- if (psci_spd_pm && psci_spd_pm->svc_suspend)
- psci_spd_pm->svc_suspend(0);
-
- /*
- * This function updates the state of each power domain instance
- * corresponding to the cpu index in the range of power levels
- * specified.
- */
- psci_do_state_coordination(end_pwrlvl,
- idx,
- PSCI_STATE_SUSPEND);
-
- max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(end_pwrlvl,
- idx);
- assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA);
-
- /*
- * Store the re-entry information for the non-secure world.
+ * This function is passed the requested state info and
+ * it returns the negotiated state info for each power level upto
+ * the end level specified.
*/
- cm_init_context(platform_my_core_pos(), ep);
+ psci_do_state_coordination(end_pwrlvl, state_info);
- /* Set the secure world (EL3) re-entry point after BL1 */
- psci_entrypoint = (unsigned long) psci_cpu_suspend_finish_entry;
+ psci_entrypoint = 0;
+ if (is_power_down_state) {
+ psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
- /*
- * Arch. management. Perform the necessary steps to flush all
- * cpu caches.
- */
- psci_do_pwrdown_cache_maintenance(max_phys_off_pwrlvl);
+ /* Set the secure world (EL3) re-entry point after BL1. */
+ psci_entrypoint =
+ (unsigned long) psci_cpu_suspend_finish_entry;
+ }
/*
* Plat. management: Allow the platform to perform the
@@ -186,8 +182,7 @@ void psci_cpu_suspend_start(entry_point_info_t *ep, int end_pwrlvl)
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
- psci_plat_pm_ops->pwr_domain_suspend(psci_entrypoint,
- max_phys_off_pwrlvl);
+ psci_plat_pm_ops->pwr_domain_suspend(psci_entrypoint, state_info);
exit:
/*
@@ -195,23 +190,40 @@ exit:
* reverse order to which they were acquired.
*/
psci_release_pwr_domain_locks(end_pwrlvl,
- idx);
- if (!skip_wfi)
+ idx);
+ if (skip_wfi)
+ return;
+
+ if (is_power_down_state)
psci_power_down_wfi();
+
+ /*
+ * We will reach here if only retention/standby states have been
+ * requested at multiple power levels. This means that the cpu
+ * context will be preserved.
+ */
+ wfi();
+
+ /*
+ * After we wake up from context retaining suspend, call the
+ * context retaining suspend finisher.
+ */
+ psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl);
}
/*******************************************************************************
* The following functions finish an earlier suspend request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
-void psci_cpu_suspend_finish(unsigned int cpu_idx, int max_off_pwrlvl)
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+ psci_power_state_t *state_info)
{
int32_t suspend_level;
uint64_t counter_freq;
/* Ensure we have been woken up from a suspended state */
- assert(psci_get_state(cpu_idx, PSCI_CPU_PWR_LVL)
- == PSCI_STATE_SUSPEND);
+ assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
+ state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
/*
* Plat. management: Perform the platform specific actions
@@ -220,7 +232,7 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx, int max_off_pwrlvl)
* wrong then assert as there is no way to recover from this
* situation.
*/
- psci_plat_pm_ops->pwr_domain_suspend_finish(max_off_pwrlvl);
+ psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/*
* Arch. management: Enable the data cache, manage stack memory and
@@ -244,8 +256,8 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx, int max_off_pwrlvl)
psci_spd_pm->svc_suspend_finish(suspend_level);
}
- /* Invalidate the suspend context for the node */
- psci_set_suspend_power_state(PSCI_INVALID_DATA);
+ /* Invalidate the suspend level for the cpu */
+ psci_set_suspend_pwrlvl(PSCI_INVALID_DATA);
/*
* Generic management: Now we just need to retrieve the