summaryrefslogtreecommitdiff
path: root/plat/imx/imx8m
diff options
context:
space:
mode:
authorJacky Bai <ping.bai@nxp.com>2020-01-08 16:56:01 +0800
committerJacky Bai <ping.bai@nxp.com>2023-03-01 10:18:03 +0800
commit88a264657fad2f71369fec4b53478e8a595d10e9 (patch)
treee0c9629581263a3dfead7132c61e0a98a565db4c /plat/imx/imx8m
parentdd108c3c1fe3f958a38ae255e57b41e5453d077f (diff)
downloadarm-trusted-firmware-88a264657fad2f71369fec4b53478e8a595d10e9.tar.gz
feat(imx8mq): add workaround code for ERR11171 on imx8mq
This new workaround takes advantage of the per core IMR registers in GPC in order to unmask the IRQ0, still generated by the 12bit in IOMUX_GPR register (which now remains always set), so it can only wake up one core at the time.Also, this entire workaround has now been moved here in TF-A, allowing the kernel side to be minimal. Another advantage this workaround brings is the removal of the 50us delay (which was necessary before in gic_raise_softirq in kernel) by allowing the core that is waking up to mask his own IRQ0 in the suspend finish callback. One important change here is the way the cores are woken up in dram_dvfs_handler. Since the wake up mechanism has changed from asserting the 12th bit in IOMUX_GPR and leaving the IMR1 1st bit on for each core to exactly the reverse, that is, leaving the IOMUX_GPR 12th bit always set and then masking/unmasking the IMR1 1st bit for each independent core, we need to use the imx_gpc_core_wake to wake up the cores. Also, the 50us udelay is moved to TF-A (inside imx_pwr_domain_off) from kernel(gic_raise_softirq), since the new cpuidle workaround does not need it in order to clean the IOMUX_GPC 12bit. For now, the udelay seems to be still needed in order to delay the affinity info OFF for the dying core. This is something that needs further investigation. Signed-off-by: Abel Vesa <abel.vesa@nxp.com> Signed-off-by: Jacky Bai <ping.bai@nxp.com> Change-Id: I9f17ff6fc3452b8225a50b232964712aafeab78a
Diffstat (limited to 'plat/imx/imx8m')
-rw-r--r--plat/imx/imx8m/gpc_common.c6
-rw-r--r--plat/imx/imx8m/imx8m_psci_common.c3
-rw-r--r--plat/imx/imx8m/imx8mq/gpc.c252
-rw-r--r--plat/imx/imx8m/imx8mq/imx8mq_psci.c18
-rw-r--r--plat/imx/imx8m/imx8mq/platform.mk1
-rw-r--r--plat/imx/imx8m/include/gpc.h6
6 files changed, 269 insertions, 17 deletions
diff --git a/plat/imx/imx8m/gpc_common.c b/plat/imx/imx8m/gpc_common.c
index e674d7a31..1cc8f6e15 100644
--- a/plat/imx/imx8m/gpc_common.c
+++ b/plat/imx/imx8m/gpc_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -29,6 +29,9 @@ DEFINE_BAKERY_LOCK(gpc_lock);
#pragma weak imx_set_cpu_pwr_on
#pragma weak imx_set_cpu_lpm
#pragma weak imx_set_cluster_powerdown
+#pragma weak imx_set_sys_wakeup
+#pragma weak imx_noc_slot_config
+#pragma weak imx_gpc_handler
void imx_set_cpu_secure_entry(unsigned int core_id, uintptr_t sec_entrypoint)
{
@@ -211,7 +214,6 @@ void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
}
}
-#pragma weak imx_noc_slot_config
/*
* this function only need to be override by platform
* that support noc power down, for example: imx8mm.
diff --git a/plat/imx/imx8m/imx8m_psci_common.c b/plat/imx/imx8m/imx8m_psci_common.c
index 8f545d652..48eb8a661 100644
--- a/plat/imx/imx8m/imx8m_psci_common.c
+++ b/plat/imx/imx8m/imx8m_psci_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,6 +24,7 @@
* reuse below ones.
*/
#pragma weak imx_validate_power_state
+#pragma weak imx_pwr_domain_off
#pragma weak imx_domain_suspend
#pragma weak imx_domain_suspend_finish
#pragma weak imx_get_sys_suspend_power_state
diff --git a/plat/imx/imx8m/imx8mq/gpc.c b/plat/imx/imx8m/imx8mq/gpc.c
index fa83324e1..329841c91 100644
--- a/plat/imx/imx8m/imx8mq/gpc.c
+++ b/plat/imx/imx8m/imx8mq/gpc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,14 +8,210 @@
#include <stdint.h>
#include <stdbool.h>
+#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
-#include <platform_def.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
#include <services/std_svc.h>
#include <gpc.h>
+#include <platform_def.h>
+
+#define FSL_SIP_CONFIG_GPC_MASK U(0x00)
+#define FSL_SIP_CONFIG_GPC_UNMASK U(0x01)
+#define FSL_SIP_CONFIG_GPC_SET_WAKE U(0x02)
+#define FSL_SIP_CONFIG_GPC_PM_DOMAIN U(0x03)
+#define FSL_SIP_CONFIG_GPC_SET_AFF U(0x04)
+#define FSL_SIP_CONFIG_GPC_CORE_WAKE U(0x05)
+
+#define MAX_HW_IRQ_NUM U(128)
+#define MAX_IMR_NUM U(4)
+
+static uint32_t gpc_saved_imrs[16];
+static uint32_t gpc_wake_irqs[4];
+static uint32_t gpc_imr_offset[] = {
+ IMX_GPC_BASE + IMR1_CORE0_A53,
+ IMX_GPC_BASE + IMR1_CORE1_A53,
+ IMX_GPC_BASE + IMR1_CORE2_A53,
+ IMX_GPC_BASE + IMR1_CORE3_A53,
+ IMX_GPC_BASE + IMR1_CORE0_M4,
+};
+
+spinlock_t gpc_imr_lock[4];
+
+static void gpc_imr_core_spin_lock(unsigned int core_id)
+{
+ spin_lock(&gpc_imr_lock[core_id]);
+}
+
+static void gpc_imr_core_spin_unlock(unsigned int core_id)
+{
+ spin_unlock(&gpc_imr_lock[core_id]);
+}
+
+static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx)
+{
+ uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
+
+ gpc_imr_core_spin_lock(core_id);
+
+ gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg);
+ mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]);
+
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx)
+{
+ uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
+ uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4];
+
+ gpc_imr_core_spin_lock(core_id);
+
+ mmio_write_32(reg, val);
+
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+/*
+ * On i.MX8MQ, only in system suspend mode, the A53 cluster can
+ * enter LPM mode and shutdown the A53 PLAT power domain. So LPM
+ * wakeup only used for system suspend. when system enter suspend,
+ * any A53 CORE can be the last core to suspend the system, But
+ * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster
+ * from LPM, so save C0's IMRs before suspend, restore back after
+ * resume.
+ */
+void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
+{
+ unsigned int imr, core;
+
+ if (pdn) {
+ for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
+ for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
+ gpc_save_imr_lpm(core, imr);
+ }
+ }
+ } else {
+ for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
+ for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
+ gpc_restore_imr_lpm(core, imr);
+ }
+ }
+ }
+}
+
+static void imx_gpc_hwirq_mask(unsigned int hwirq)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ if (hwirq >= MAX_HW_IRQ_NUM) {
+ return;
+ }
+
+ gpc_imr_core_spin_lock(0);
+ reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val |= 1 << hwirq % 32;
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(0);
+}
+
+static void imx_gpc_hwirq_unmask(unsigned int hwirq)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ if (hwirq >= MAX_HW_IRQ_NUM) {
+ return;
+ }
+
+ gpc_imr_core_spin_lock(0);
+ reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val &= ~(1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(0);
+}
+
+static void imx_gpc_set_wake(uint32_t hwirq, bool on)
+{
+ uint32_t mask, idx;
+
+ if (hwirq >= MAX_HW_IRQ_NUM) {
+ return;
+ }
+
+ mask = 1 << hwirq % 32;
+ idx = hwirq / 32;
+ gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
+ gpc_wake_irqs[idx] & ~mask;
+}
+
+static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask)
+{
+ gpc_imr_core_spin_lock(core_id);
+ if (mask) {
+ mmio_setbits_32(gpc_imr_offset[core_id], 1);
+ } else {
+ mmio_clrbits_32(gpc_imr_offset[core_id], 1);
+ }
+
+ dsb();
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+void imx_gpc_core_wake(uint32_t cpumask)
+{
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpumask & (1 << i)) {
+ imx_gpc_mask_irq0(i, false);
+ }
+ }
+}
+
+void imx_gpc_set_a53_core_awake(uint32_t core_id)
+{
+ imx_gpc_mask_irq0(core_id, true);
+}
+
+static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) {
+ return;
+ }
+
+ /*
+ * using the mask/unmask bit as affinity function.unmask the
+ * IMR bit to enable IRQ wakeup for this core.
+ */
+ gpc_imr_core_spin_lock(cpu_idx);
+ reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val &= ~(1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(cpu_idx);
+
+ /* clear affinity of other core */
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpu_idx != i) {
+ gpc_imr_core_spin_lock(i);
+ reg = gpc_imr_offset[i] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val |= (1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(i);
+ }
+ }
+}
/* use wfi power down the core */
void imx_set_cpu_pwr_off(unsigned int core_id)
@@ -124,26 +320,56 @@ void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state)
}
}
+int imx_gpc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3)
+{
+ switch (x1) {
+ case FSL_SIP_CONFIG_GPC_CORE_WAKE:
+ imx_gpc_core_wake(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_SET_WAKE:
+ imx_gpc_set_wake(x2, x3);
+ break;
+ case FSL_SIP_CONFIG_GPC_MASK:
+ imx_gpc_hwirq_mask(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_UNMASK:
+ imx_gpc_hwirq_unmask(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_SET_AFF:
+ imx_gpc_set_affinity(x2, x3);
+ break;
+ default:
+ return SMC_UNK;
+ }
+
+ return 0;
+}
+
void imx_gpc_init(void)
{
uint32_t val;
- int i;
+ unsigned int i, j;
+
/* mask all the interrupt by default */
- for (i = 0; i < 4; i++) {
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
+ for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) {
+ mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0);
+ }
}
+
/* Due to the hardware design requirement, need to make
* sure GPR interrupt(#32) is unmasked during RUN mode to
* avoid entering DSM mode by mistake.
*/
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53, 0xFFFFFFFE);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53, 0xFFFFFFFE);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53, 0xFFFFFFFE);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53, 0xFFFFFFFE);
+ for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ mmio_write_32(gpc_imr_offset[i], ~0x1);
+ }
+
+ /* leave the IOMUX_GPC bit 12 on for core wakeup */
+ mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12);
/* use external IRQs to wakeup C0~C3 from LPM */
val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
diff --git a/plat/imx/imx8m/imx8mq/imx8mq_psci.c b/plat/imx/imx8m/imx8mq/imx8mq_psci.c
index 6209fea89..8b4a6d573 100644
--- a/plat/imx/imx8m/imx8mq/imx8mq_psci.c
+++ b/plat/imx/imx8m/imx8mq/imx8mq_psci.c
@@ -9,6 +9,7 @@
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
+#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
@@ -40,6 +41,21 @@ int imx_validate_power_state(unsigned int power_state,
return PSCI_E_SUCCESS;
}
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ plat_gic_cpuif_disable();
+ imx_set_cpu_pwr_off(core_id);
+
+ /*
+ * TODO: Find out why this is still
+ * needed in order not to break suspend
+ */
+ udelay(50);
+}
+
void imx_domain_suspend(const psci_power_state_t *target_state)
{
uint64_t base_addr = BL31_START;
@@ -88,6 +104,8 @@ void imx_domain_suspend_finish(const psci_power_state_t *target_state)
/* check the core level power status */
if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ /* mark this core as awake by masking IRQ0 */
+ imx_gpc_set_a53_core_awake(core_id);
/* clear the core lpm setting */
imx_set_cpu_lpm(core_id, false);
/* enable the gic cpu interface */
diff --git a/plat/imx/imx8m/imx8mq/platform.mk b/plat/imx/imx8m/imx8mq/platform.mk
index 284ccee9d..b1c189fa0 100644
--- a/plat/imx/imx8m/imx8mq/platform.mk
+++ b/plat/imx/imx8m/imx8mq/platform.mk
@@ -49,6 +49,7 @@ ENABLE_PIE := 1
USE_COHERENT_MEM := 1
RESET_TO_BL31 := 1
A53_DISABLE_NON_TEMPORAL_HINT := 0
+WARMBOOT_ENABLE_DCACHE_EARLY := 1
ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
diff --git a/plat/imx/imx8m/include/gpc.h b/plat/imx/imx8m/include/gpc.h
index a41030e5f..2cbcdf953 100644
--- a/plat/imx/imx8m/include/gpc.h
+++ b/plat/imx/imx8m/include/gpc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -72,4 +72,8 @@ void imx_clear_rbc_count(void);
void imx_anamix_override(bool enter);
void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on);
+#if defined(PLAT_imx8mq)
+void imx_gpc_set_a53_core_awake(uint32_t core_id);
+#endif
+
#endif /*IMX8M_GPC_H */