diff options
Diffstat (limited to 'drivers/memory')
-rw-r--r-- | drivers/memory/atmel-ebi.c | 4 | ||||
-rw-r--r-- | drivers/memory/emif.c | 678 | ||||
-rw-r--r-- | drivers/memory/fsl_ifc.c | 8 | ||||
-rw-r--r-- | drivers/memory/pl353-smc.c | 315 | ||||
-rw-r--r-- | drivers/memory/stm32-fmc2-ebi.c | 4 |
5 files changed, 22 insertions, 987 deletions
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index 14386d0b5f57..c267283b01fd 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -600,8 +600,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) child); ret = atmel_ebi_dev_disable(ebi, child); - if (ret) + if (ret) { + of_node_put(child); return ret; + } } } diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index f7825eef5894..762d0c0f0716 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -41,7 +41,6 @@ * @node: node in the device list * @base: base address of memory-mapped IO registers. * @dev: device pointer. - * @addressing table with addressing information from the spec * @regs_cache: An array of 'struct emif_regs' that stores * calculated register values for different * frequencies, to avoid re-calculating them on @@ -61,7 +60,6 @@ struct emif_data { unsigned long irq_state; void __iomem *base; struct device *dev; - const struct lpddr2_addressing *addressing; struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES]; struct emif_regs *curr_regs; struct emif_platform_data *plat_data; @@ -72,7 +70,6 @@ struct emif_data { static struct emif_data *emif1; static DEFINE_SPINLOCK(emif_lock); static unsigned long irq_state; -static u32 t_ck; /* DDR clock period in ps */ static LIST_HEAD(device_list); #ifdef CONFIG_DEBUG_FS @@ -170,15 +167,6 @@ static inline void __exit emif_debugfs_exit(struct emif_data *emif) #endif /* - * Calculate the period of DDR clock from frequency value - */ -static void set_ddr_clk_period(u32 freq) -{ - /* Divide 10^12 by frequency to get period in ps */ - t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq); -} - -/* * Get bus width used by EMIF. Note that this may be different from the * bus width of the DDR devices used. For instance two 16-bit DDR devices * may be connected to a given CS of EMIF. In this case bus width as far @@ -196,19 +184,6 @@ static u32 get_emif_bus_width(struct emif_data *emif) return width; } -/* - * Get the CL from SDRAM_CONFIG register - */ -static u32 get_cl(struct emif_data *emif) -{ - u32 cl; - void __iomem *base = emif->base; - - cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT; - - return cl; -} - static void set_lpmode(struct emif_data *emif, u8 lpmode) { u32 temp; @@ -328,203 +303,6 @@ static const struct lpddr2_addressing *get_addressing_table( return &lpddr2_jedec_addressing_table[index]; } -/* - * Find the the right timing table from the array of timing - * tables of the device using DDR clock frequency - */ -static const struct lpddr2_timings *get_timings_table(struct emif_data *emif, - u32 freq) -{ - u32 i, min, max, freq_nearest; - const struct lpddr2_timings *timings = NULL; - const struct lpddr2_timings *timings_arr = emif->plat_data->timings; - struct device *dev = emif->dev; - - /* Start with a very high frequency - 1GHz */ - freq_nearest = 1000000000; - - /* - * Find the timings table such that: - * 1. the frequency range covers the required frequency(safe) AND - * 2. the max_freq is closest to the required frequency(optimal) - */ - for (i = 0; i < emif->plat_data->timings_arr_size; i++) { - max = timings_arr[i].max_freq; - min = timings_arr[i].min_freq; - if ((freq >= min) && (freq <= max) && (max < freq_nearest)) { - freq_nearest = max; - timings = &timings_arr[i]; - } - } - - if (!timings) - dev_err(dev, "%s: couldn't find timings for - %dHz\n", - __func__, freq); - - dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n", - __func__, freq, freq_nearest); - - return timings; -} - -static u32 get_sdram_ref_ctrl_shdw(u32 freq, - const struct lpddr2_addressing *addressing) -{ - u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi; - - /* Scale down frequency and t_refi to avoid overflow */ - freq_khz = freq / 1000; - t_refi = addressing->tREFI_ns / 100; - - /* - * refresh rate to be set is 'tREFI(in us) * freq in MHz - * division by 10000 to account for change in units - */ - val = t_refi * freq_khz / 10000; - ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT; - - return ref_ctrl_shdw; -} - -static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings, - const struct lpddr2_min_tck *min_tck, - const struct lpddr2_addressing *addressing) -{ - u32 tim1 = 0, val = 0; - - val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1; - tim1 |= val << T_WTR_SHIFT; - - if (addressing->num_banks == B8) - val = DIV_ROUND_UP(timings->tFAW, t_ck*4); - else - val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck)); - tim1 |= (val - 1) << T_RRD_SHIFT; - - val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1; - tim1 |= val << T_RC_SHIFT; - - val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck)); - tim1 |= (val - 1) << T_RAS_SHIFT; - - val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1; - tim1 |= val << T_WR_SHIFT; - - val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1; - tim1 |= val << T_RCD_SHIFT; - - val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1; - tim1 |= val << T_RP_SHIFT; - - return tim1; -} - -static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings, - const struct lpddr2_min_tck *min_tck, - const struct lpddr2_addressing *addressing) -{ - u32 tim1 = 0, val = 0; - - val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1; - tim1 = val << T_WTR_SHIFT; - - /* - * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps - * to tFAW for de-rating - */ - if (addressing->num_banks == B8) { - val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1; - } else { - val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck); - val = max(min_tck->tRRD, val) - 1; - } - tim1 |= val << T_RRD_SHIFT; - - val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck); - tim1 |= (val - 1) << T_RC_SHIFT; - - val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck); - val = max(min_tck->tRASmin, val) - 1; - tim1 |= val << T_RAS_SHIFT; - - val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1; - tim1 |= val << T_WR_SHIFT; - - val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck)); - tim1 |= (val - 1) << T_RCD_SHIFT; - - val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck)); - tim1 |= (val - 1) << T_RP_SHIFT; - - return tim1; -} - -static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings, - const struct lpddr2_min_tck *min_tck, - const struct lpddr2_addressing *addressing, - u32 type) -{ - u32 tim2 = 0, val = 0; - - val = min_tck->tCKE - 1; - tim2 |= val << T_CKE_SHIFT; - - val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1; - tim2 |= val << T_RTP_SHIFT; - - /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */ - val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1; - tim2 |= val << T_XSNR_SHIFT; - - /* XSRD same as XSNR for LPDDR2 */ - tim2 |= val << T_XSRD_SHIFT; - - val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1; - tim2 |= val << T_XP_SHIFT; - - return tim2; -} - -static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings, - const struct lpddr2_min_tck *min_tck, - const struct lpddr2_addressing *addressing, - u32 type, u32 ip_rev, u32 derated) -{ - u32 tim3 = 0, val = 0, t_dqsck; - - val = timings->tRAS_max_ns / addressing->tREFI_ns - 1; - val = val > 0xF ? 0xF : val; - tim3 |= val << T_RAS_MAX_SHIFT; - - val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1; - tim3 |= val << T_RFC_SHIFT; - - t_dqsck = (derated == EMIF_DERATED_TIMINGS) ? - timings->tDQSCK_max_derated : timings->tDQSCK_max; - if (ip_rev == EMIF_4D5) - val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1; - else - val = DIV_ROUND_UP(t_dqsck, t_ck) - 1; - - tim3 |= val << T_TDQSCKMAX_SHIFT; - - val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1; - tim3 |= val << ZQ_ZQCS_SHIFT; - - val = DIV_ROUND_UP(timings->tCKESR, t_ck); - val = max(min_tck->tCKESR, val) - 1; - tim3 |= val << T_CKESR_SHIFT; - - if (ip_rev == EMIF_4D5) { - tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT; - - val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1; - tim3 |= val << T_PDLL_UL_SHIFT; - } - - return tim3; -} - static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing, bool cs1_used, bool cal_resistors_per_cs) { @@ -589,117 +367,6 @@ static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing, return alert; } -static u32 get_read_idle_ctrl_shdw(u8 volt_ramp) -{ - u32 idle = 0, val = 0; - - /* - * Maximum value in normal conditions and increased frequency - * when voltage is ramping - */ - if (volt_ramp) - val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1; - else - val = 0x1FF; - - /* - * READ_IDLE_CTRL register in EMIF4D has same offset and fields - * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts - */ - idle |= val << DLL_CALIB_INTERVAL_SHIFT; - idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT; - - return idle; -} - -static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp) -{ - u32 calib = 0, val = 0; - - if (volt_ramp == DDR_VOLTAGE_RAMPING) - val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1; - else - val = 0; /* Disabled when voltage is stable */ - - calib |= val << DLL_CALIB_INTERVAL_SHIFT; - calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT; - - return calib; -} - -static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings, - u32 freq, u8 RL) -{ - u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0; - - val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1; - phy |= val << READ_LATENCY_SHIFT_4D; - - if (freq <= 100000000) - val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY; - else if (freq <= 200000000) - val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY; - else - val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY; - - phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D; - - return phy; -} - -static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl) -{ - u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay; - - /* - * DLL operates at 266 MHz. If DDR frequency is near 266 MHz, - * half-delay is not needed else set half-delay - */ - if (freq >= 265000000 && freq < 267000000) - half_delay = 0; - else - half_delay = 1; - - phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5; - phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS, - t_ck) - 1) << READ_LATENCY_SHIFT_4D5); - - return phy; -} - -static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void) -{ - u32 fifo_we_slave_ratio; - - fifo_we_slave_ratio = DIV_ROUND_CLOSEST( - EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck); - - return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 | - fifo_we_slave_ratio << 22; -} - -static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void) -{ - u32 fifo_we_slave_ratio; - - fifo_we_slave_ratio = DIV_ROUND_CLOSEST( - EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck); - - return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 | - fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23; -} - -static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void) -{ - u32 fifo_we_slave_ratio; - - fifo_we_slave_ratio = DIV_ROUND_CLOSEST( - EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck); - - return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 | - fifo_we_slave_ratio << 13; -} - static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev) { u32 pwr_mgmt_ctrl = 0, timeout; @@ -822,51 +489,6 @@ static void get_temperature_level(struct emif_data *emif) } /* - * Program EMIF shadow registers that are not dependent on temperature - * or voltage - */ -static void setup_registers(struct emif_data *emif, struct emif_regs *regs) -{ - void __iomem *base = emif->base; - - writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW); - writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW); - writel(regs->pwr_mgmt_ctrl_shdw, - base + EMIF_POWER_MANAGEMENT_CTRL_SHDW); - - /* Settings specific for EMIF4D5 */ - if (emif->plat_data->ip_rev != EMIF_4D5) - return; - writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW); - writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW); - writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW); -} - -/* - * When voltage ramps dll calibration and forced read idle should - * happen more often - */ -static void setup_volt_sensitive_regs(struct emif_data *emif, - struct emif_regs *regs, u32 volt_state) -{ - u32 calib_ctrl; - void __iomem *base = emif->base; - - /* - * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as - * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_* - * is an alias of the respective read_idle_ctrl_shdw_* (members of - * a union). So, the below code takes care of both cases - */ - if (volt_state == DDR_VOLTAGE_RAMPING) - calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp; - else - calib_ctrl = regs->dll_calib_ctrl_shdw_normal; - - writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW); -} - -/* * setup_temperature_sensitive_regs() - set the timings for temperature * sensitive registers. This happens once at initialisation time based * on the temperature at boot time and subsequently based on the temperature @@ -1508,7 +1130,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev) } list_add(&emif->node, &device_list); - emif->addressing = get_addressing_table(emif->plat_data->device_info); /* Save pointers to each other in emif and device structures */ emif->dev = &pdev->dev; @@ -1563,305 +1184,6 @@ static void emif_shutdown(struct platform_device *pdev) disable_and_clear_all_interrupts(emif); } -static int get_emif_reg_values(struct emif_data *emif, u32 freq, - struct emif_regs *regs) -{ - u32 ip_rev, phy_type; - u32 cl, type; - const struct lpddr2_timings *timings; - const struct lpddr2_min_tck *min_tck; - const struct ddr_device_info *device_info; - const struct lpddr2_addressing *addressing; - struct emif_data *emif_for_calc; - struct device *dev; - - dev = emif->dev; - /* - * If the devices on this EMIF instance is duplicate of EMIF1, - * use EMIF1 details for the calculation - */ - emif_for_calc = emif->duplicate ? emif1 : emif; - timings = get_timings_table(emif_for_calc, freq); - addressing = emif_for_calc->addressing; - if (!timings || !addressing) { - dev_err(dev, "%s: not enough data available for %dHz", - __func__, freq); - return -1; - } - - device_info = emif_for_calc->plat_data->device_info; - type = device_info->type; - ip_rev = emif_for_calc->plat_data->ip_rev; - phy_type = emif_for_calc->plat_data->phy_type; - - min_tck = emif_for_calc->plat_data->min_tck; - - set_ddr_clk_period(freq); - - regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing); - regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck, - addressing); - regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck, - addressing, type); - regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck, - addressing, type, ip_rev, EMIF_NORMAL_TIMINGS); - - cl = get_cl(emif); - - if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) { - regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d( - timings, freq, cl); - } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) { - regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl); - regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5(); - regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5(); - regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5(); - } else { - return -1; - } - - /* Only timeout values in pwr_mgmt_ctrl_shdw register */ - regs->pwr_mgmt_ctrl_shdw = - get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) & - (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK); - - if (ip_rev & EMIF_4D) { - regs->read_idle_ctrl_shdw_normal = - get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE); - - regs->read_idle_ctrl_shdw_volt_ramp = - get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING); - } else if (ip_rev & EMIF_4D5) { - regs->dll_calib_ctrl_shdw_normal = - get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE); - - regs->dll_calib_ctrl_shdw_volt_ramp = - get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING); - } - - if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) { - regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4, - addressing); - - regs->sdram_tim1_shdw_derated = - get_sdram_tim_1_shdw_derated(timings, min_tck, - addressing); - - regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings, - min_tck, addressing, type, ip_rev, - EMIF_DERATED_TIMINGS); - } - - regs->freq = freq; - - return 0; -} - -/* - * get_regs() - gets the cached emif_regs structure for a given EMIF instance - * given frequency(freq): - * - * As an optimisation, every EMIF instance other than EMIF1 shares the - * register cache with EMIF1 if the devices connected on this instance - * are same as that on EMIF1(indicated by the duplicate flag) - * - * If we do not have an entry corresponding to the frequency given, we - * allocate a new entry and calculate the values - * - * Upon finding the right reg dump, save it in curr_regs. It can be - * directly used for thermal de-rating and voltage ramping changes. - */ -static struct emif_regs *get_regs(struct emif_data *emif, u32 freq) -{ - int i; - struct emif_regs **regs_cache; - struct emif_regs *regs = NULL; - struct device *dev; - - dev = emif->dev; - if (emif->curr_regs && emif->curr_regs->freq == freq) { - dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq); - return emif->curr_regs; - } - - if (emif->duplicate) - regs_cache = emif1->regs_cache; - else - regs_cache = emif->regs_cache; - - for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) { - if (regs_cache[i]->freq == freq) { - regs = regs_cache[i]; - dev_dbg(dev, - "%s: reg dump found in reg cache for %u Hz\n", - __func__, freq); - break; - } - } - - /* - * If we don't have an entry for this frequency in the cache create one - * and calculate the values - */ - if (!regs) { - regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC); - if (!regs) - return NULL; - - if (get_emif_reg_values(emif, freq, regs)) { - devm_kfree(emif->dev, regs); - return NULL; - } - - /* - * Now look for an un-used entry in the cache and save the - * newly created struct. If there are no free entries - * over-write the last entry - */ - for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) - ; - - if (i >= EMIF_MAX_NUM_FREQUENCIES) { - dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n", - __func__); - i = EMIF_MAX_NUM_FREQUENCIES - 1; - devm_kfree(emif->dev, regs_cache[i]); - } - regs_cache[i] = regs; - } - - return regs; -} - -static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state) -{ - dev_dbg(emif->dev, "%s: voltage notification : %d", __func__, - volt_state); - - if (!emif->curr_regs) { - dev_err(emif->dev, - "%s: volt-notify before registers are ready: %d\n", - __func__, volt_state); - return; - } - - setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state); -} - -/* - * TODO: voltage notify handling should be hooked up to - * regulator framework as soon as the necessary support - * is available in mainline kernel. This function is un-used - * right now. - */ -static void __attribute__((unused)) volt_notify_handling(u32 volt_state) -{ - struct emif_data *emif; - - spin_lock_irqsave(&emif_lock, irq_state); - - list_for_each_entry(emif, &device_list, node) - do_volt_notify_handling(emif, volt_state); - do_freq_update(); - - spin_unlock_irqrestore(&emif_lock, irq_state); -} - -static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq) -{ - struct emif_regs *regs; - - regs = get_regs(emif, new_freq); - if (!regs) - return; - - emif->curr_regs = regs; - - /* - * Update the shadow registers: - * Temperature and voltage-ramp sensitive settings are also configured - * in terms of DDR cycles. So, we need to update them too when there - * is a freq change - */ - dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz", - __func__, new_freq); - setup_registers(emif, regs); - setup_temperature_sensitive_regs(emif, regs); - setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE); - - /* - * Part of workaround for errata i728. See do_freq_update() - * for more details - */ - if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) - set_lpmode(emif, EMIF_LP_MODE_DISABLE); -} - -/* - * TODO: frequency notify handling should be hooked up to - * clock framework as soon as the necessary support is - * available in mainline kernel. This function is un-used - * right now. - */ -static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq) -{ - struct emif_data *emif; - - /* - * NOTE: we are taking the spin-lock here and releases it - * only in post-notifier. This doesn't look good and - * Sparse complains about it, but this seems to be - * un-avoidable. We need to lock a sequence of events - * that is split between EMIF and clock framework. - * - * 1. EMIF driver updates EMIF timings in shadow registers in the - * frequency pre-notify callback from clock framework - * 2. clock framework sets up the registers for the new frequency - * 3. clock framework initiates a hw-sequence that updates - * the frequency EMIF timings synchronously. - * - * All these 3 steps should be performed as an atomic operation - * vis-a-vis similar sequence in the EMIF interrupt handler - * for temperature events. Otherwise, there could be race - * conditions that could result in incorrect EMIF timings for - * a given frequency - */ - spin_lock_irqsave(&emif_lock, irq_state); - - list_for_each_entry(emif, &device_list, node) - do_freq_pre_notify_handling(emif, new_freq); -} - -static void do_freq_post_notify_handling(struct emif_data *emif) -{ - /* - * Part of workaround for errata i728. See do_freq_update() - * for more details - */ - if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) - set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH); -} - -/* - * TODO: frequency notify handling should be hooked up to - * clock framework as soon as the necessary support is - * available in mainline kernel. This function is un-used - * right now. - */ -static void __attribute__((unused)) freq_post_notify_handling(void) -{ - struct emif_data *emif; - - list_for_each_entry(emif, &device_list, node) - do_freq_post_notify_handling(emif); - - /* - * Lock is done in pre-notify handler. See freq_pre_notify_handling() - * for more details - */ - spin_unlock_irqrestore(&emif_lock, irq_state); -} - #if defined(CONFIG_OF) static const struct of_device_id emif_of_match[] = { { .compatible = "ti,emif-4d" }, diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 89f99b5b6450..d062c2f8250f 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -97,7 +97,6 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) iounmap(ctrl->gregs); dev_set_drvdata(&dev->dev, NULL); - kfree(ctrl); return 0; } @@ -209,7 +208,8 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); + fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev), + GFP_KERNEL); if (!fsl_ifc_ctrl_dev) return -ENOMEM; @@ -219,8 +219,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); if (!fsl_ifc_ctrl_dev->gregs) { dev_err(&dev->dev, "failed to get memory region\n"); - ret = -ENODEV; - goto err; + return -ENODEV; } if (of_property_read_bool(dev->dev.of_node, "little-endian")) { @@ -295,6 +294,7 @@ err_irq: free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); err: + iounmap(fsl_ifc_ctrl_dev->gregs); return ret; } diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index 9c0a28416777..f84b98278745 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -8,263 +8,22 @@ */ #include <linux/clk.h> -#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/pl353-smc.h> #include <linux/amba/bus.h> -/* Register definitions */ -#define PL353_SMC_MEMC_STATUS_OFFS 0 /* Controller status reg, RO */ -#define PL353_SMC_CFG_CLR_OFFS 0xC /* Clear config reg, WO */ -#define PL353_SMC_DIRECT_CMD_OFFS 0x10 /* Direct command reg, WO */ -#define PL353_SMC_SET_CYCLES_OFFS 0x14 /* Set cycles register, WO */ -#define PL353_SMC_SET_OPMODE_OFFS 0x18 /* Set opmode register, WO */ -#define PL353_SMC_ECC_STATUS_OFFS 0x400 /* ECC status register */ -#define PL353_SMC_ECC_MEMCFG_OFFS 0x404 /* ECC mem config reg */ -#define PL353_SMC_ECC_MEMCMD1_OFFS 0x408 /* ECC mem cmd1 reg */ -#define PL353_SMC_ECC_MEMCMD2_OFFS 0x40C /* ECC mem cmd2 reg */ -#define PL353_SMC_ECC_VALUE0_OFFS 0x418 /* ECC value 0 reg */ - -/* Controller status register specific constants */ -#define PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT 6 - -/* Clear configuration register specific constants */ -#define PL353_SMC_CFG_CLR_INT_CLR_1 0x10 -#define PL353_SMC_CFG_CLR_ECC_INT_DIS_1 0x40 -#define PL353_SMC_CFG_CLR_INT_DIS_1 0x2 -#define PL353_SMC_CFG_CLR_DEFAULT_MASK (PL353_SMC_CFG_CLR_INT_CLR_1 | \ - PL353_SMC_CFG_CLR_ECC_INT_DIS_1 | \ - PL353_SMC_CFG_CLR_INT_DIS_1) - -/* Set cycles register specific constants */ -#define PL353_SMC_SET_CYCLES_T0_MASK 0xF -#define PL353_SMC_SET_CYCLES_T0_SHIFT 0 -#define PL353_SMC_SET_CYCLES_T1_MASK 0xF -#define PL353_SMC_SET_CYCLES_T1_SHIFT 4 -#define PL353_SMC_SET_CYCLES_T2_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T2_SHIFT 8 -#define PL353_SMC_SET_CYCLES_T3_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T3_SHIFT 11 -#define PL353_SMC_SET_CYCLES_T4_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T4_SHIFT 14 -#define PL353_SMC_SET_CYCLES_T5_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T5_SHIFT 17 -#define PL353_SMC_SET_CYCLES_T6_MASK 0xF -#define PL353_SMC_SET_CYCLES_T6_SHIFT 20 - -/* ECC status register specific constants */ -#define PL353_SMC_ECC_STATUS_BUSY BIT(6) -#define PL353_SMC_ECC_REG_SIZE_OFFS 4 - -/* ECC memory config register specific constants */ -#define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC -#define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0x3 - -#define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \ - (2 << 21)) /* UpdateRegs operation */ - -#define PL353_NAND_ECC_CMD1 ((0x80) | /* Write command */ \ - (0 << 8) | /* Read command */ \ - (0x30 << 16) | /* Read End command */ \ - (1 << 24)) /* Read End command calid */ - -#define PL353_NAND_ECC_CMD2 ((0x85) | /* Write col change cmd */ \ - (5 << 8) | /* Read col change cmd */ \ - (0xE0 << 16) | /* Read col change end cmd */ \ - (1 << 24)) /* Read col change end cmd valid */ -#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ) /** * struct pl353_smc_data - Private smc driver structure * @memclk: Pointer to the peripheral clock - * @aclk: Pointer to the APER clock + * @aclk: Pointer to the AXI peripheral clock */ struct pl353_smc_data { struct clk *memclk; struct clk *aclk; }; -/* SMC virtual register base */ -static void __iomem *pl353_smc_base; - -/** - * pl353_smc_set_buswidth - Set memory buswidth - * @bw: Memory buswidth (8 | 16) - * Return: 0 on success or negative errno. - */ -int pl353_smc_set_buswidth(unsigned int bw) -{ - if (bw != PL353_SMC_MEM_WIDTH_8 && bw != PL353_SMC_MEM_WIDTH_16) - return -EINVAL; - - writel(bw, pl353_smc_base + PL353_SMC_SET_OPMODE_OFFS); - writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + - PL353_SMC_DIRECT_CMD_OFFS); - - return 0; -} -EXPORT_SYMBOL_GPL(pl353_smc_set_buswidth); - -/** - * pl353_smc_set_cycles - Set memory timing parameters - * @timings: NAND controller timing parameters - * - * Sets NAND chip specific timing parameters. - */ -void pl353_smc_set_cycles(u32 timings[]) -{ - /* - * Set write pulse timing. This one is easy to extract: - * - * NWE_PULSE = tWP - */ - timings[0] &= PL353_SMC_SET_CYCLES_T0_MASK; - timings[1] = (timings[1] & PL353_SMC_SET_CYCLES_T1_MASK) << - PL353_SMC_SET_CYCLES_T1_SHIFT; - timings[2] = (timings[2] & PL353_SMC_SET_CYCLES_T2_MASK) << - PL353_SMC_SET_CYCLES_T2_SHIFT; - timings[3] = (timings[3] & PL353_SMC_SET_CYCLES_T3_MASK) << - PL353_SMC_SET_CYCLES_T3_SHIFT; - timings[4] = (timings[4] & PL353_SMC_SET_CYCLES_T4_MASK) << - PL353_SMC_SET_CYCLES_T4_SHIFT; - timings[5] = (timings[5] & PL353_SMC_SET_CYCLES_T5_MASK) << - PL353_SMC_SET_CYCLES_T5_SHIFT; - timings[6] = (timings[6] & PL353_SMC_SET_CYCLES_T6_MASK) << - PL353_SMC_SET_CYCLES_T6_SHIFT; - timings[0] |= timings[1] | timings[2] | timings[3] | - timings[4] | timings[5] | timings[6]; - - writel(timings[0], pl353_smc_base + PL353_SMC_SET_CYCLES_OFFS); - writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + - PL353_SMC_DIRECT_CMD_OFFS); -} -EXPORT_SYMBOL_GPL(pl353_smc_set_cycles); - -/** - * pl353_smc_ecc_is_busy - Read ecc busy flag - * Return: the ecc_status bit from the ecc_status register. 1 = busy, 0 = idle - */ -bool pl353_smc_ecc_is_busy(void) -{ - return ((readl(pl353_smc_base + PL353_SMC_ECC_STATUS_OFFS) & - PL353_SMC_ECC_STATUS_BUSY) == PL353_SMC_ECC_STATUS_BUSY); -} -EXPORT_SYMBOL_GPL(pl353_smc_ecc_is_busy); - -/** - * pl353_smc_get_ecc_val - Read ecc_valueN registers - * @ecc_reg: Index of the ecc_value reg (0..3) - * Return: the content of the requested ecc_value register. - * - * There are four valid ecc_value registers. The argument is truncated to stay - * within this valid boundary. - */ -u32 pl353_smc_get_ecc_val(int ecc_reg) -{ - u32 addr, reg; - - addr = PL353_SMC_ECC_VALUE0_OFFS + - (ecc_reg * PL353_SMC_ECC_REG_SIZE_OFFS); - reg = readl(pl353_smc_base + addr); - - return reg; -} -EXPORT_SYMBOL_GPL(pl353_smc_get_ecc_val); - -/** - * pl353_smc_get_nand_int_status_raw - Get NAND interrupt status bit - * Return: the raw_int_status1 bit from the memc_status register - */ -int pl353_smc_get_nand_int_status_raw(void) -{ - u32 reg; - - reg = readl(pl353_smc_base + PL353_SMC_MEMC_STATUS_OFFS); - reg >>= PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT; - reg &= 1; - - return reg; -} -EXPORT_SYMBOL_GPL(pl353_smc_get_nand_int_status_raw); - -/** - * pl353_smc_clr_nand_int - Clear NAND interrupt - */ -void pl353_smc_clr_nand_int(void) -{ - writel(PL353_SMC_CFG_CLR_INT_CLR_1, - pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); -} -EXPORT_SYMBOL_GPL(pl353_smc_clr_nand_int); - -/** - * pl353_smc_set_ecc_mode - Set SMC ECC mode - * @mode: ECC mode (BYPASS, APB, MEM) - * Return: 0 on success or negative errno. - */ -int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode) -{ - u32 reg; - int ret = 0; - - switch (mode) { - case PL353_SMC_ECCMODE_BYPASS: - case PL353_SMC_ECCMODE_APB: - case PL353_SMC_ECCMODE_MEM: - - reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - reg &= ~PL353_SMC_ECC_MEMCFG_MODE_MASK; - reg |= mode << PL353_SMC_ECC_MEMCFG_MODE_SHIFT; - writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - - break; - default: - ret = -EINVAL; - } - - return ret; -} -EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_mode); - -/** - * pl353_smc_set_ecc_pg_size - Set SMC ECC page size - * @pg_sz: ECC page size - * Return: 0 on success or negative errno. - */ -int pl353_smc_set_ecc_pg_size(unsigned int pg_sz) -{ - u32 reg, sz; - - switch (pg_sz) { - case 0: - sz = 0; - break; - case SZ_512: - sz = 1; - break; - case SZ_1K: - sz = 2; - break; - case SZ_2K: - sz = 3; - break; - default: - return -EINVAL; - } - - reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - reg &= ~PL353_SMC_ECC_MEMCFG_PGSIZE_MASK; - reg |= sz; - writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - - return 0; -} -EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_pg_size); - static int __maybe_unused pl353_smc_suspend(struct device *dev) { struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev); @@ -277,8 +36,8 @@ static int __maybe_unused pl353_smc_suspend(struct device *dev) static int __maybe_unused pl353_smc_resume(struct device *dev) { - int ret; struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev); + int ret; ret = clk_enable(pl353_smc->aclk); if (ret) { @@ -296,77 +55,31 @@ static int __maybe_unused pl353_smc_resume(struct device *dev) return ret; } -static struct amba_driver pl353_smc_driver; - static SIMPLE_DEV_PM_OPS(pl353_smc_dev_pm_ops, pl353_smc_suspend, pl353_smc_resume); -/** - * pl353_smc_init_nand_interface - Initialize the NAND interface - * @adev: Pointer to the amba_device struct - * @nand_node: Pointer to the pl353_nand device_node struct - */ -static void pl353_smc_init_nand_interface(struct amba_device *adev, - struct device_node *nand_node) -{ - unsigned long timeout; - - pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8); - writel(PL353_SMC_CFG_CLR_INT_CLR_1, - pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); - writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + - PL353_SMC_DIRECT_CMD_OFFS); - - timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT; - /* Wait till the ECC operation is complete */ - do { - if (pl353_smc_ecc_is_busy()) - cpu_relax(); - else - break; - } while (!time_after_eq(jiffies, timeout)); - - if (time_after_eq(jiffies, timeout)) - return; - - writel(PL353_NAND_ECC_CMD1, - pl353_smc_base + PL353_SMC_ECC_MEMCMD1_OFFS); - writel(PL353_NAND_ECC_CMD2, - pl353_smc_base + PL353_SMC_ECC_MEMCMD2_OFFS); -} - static const struct of_device_id pl353_smc_supported_children[] = { { .compatible = "cfi-flash" }, { .compatible = "arm,pl353-nand-r2p1", - .data = pl353_smc_init_nand_interface }, {} }; static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) { + struct device_node *of_node = adev->dev.of_node; + const struct of_device_id *match = NULL; struct pl353_smc_data *pl353_smc; struct device_node *child; - struct resource *res; int err; - struct device_node *of_node = adev->dev.of_node; - static void (*init)(struct amba_device *adev, - struct device_node *nand_node); - const struct of_device_id *match = NULL; pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL); if (!pl353_smc) return -ENOMEM; - /* Get the NAND controller virtual address */ - res = &adev->res; - pl353_smc_base = devm_ioremap_resource(&adev->dev, res); - if (IS_ERR(pl353_smc_base)) - return PTR_ERR(pl353_smc_base); - pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk"); if (IS_ERR(pl353_smc->aclk)) { dev_err(&adev->dev, "aclk clock not found.\n"); @@ -388,15 +101,11 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) err = clk_prepare_enable(pl353_smc->memclk); if (err) { dev_err(&adev->dev, "Unable to enable memory clock.\n"); - goto out_clk_dis_aper; + goto disable_axi_clk; } amba_set_drvdata(adev, pl353_smc); - /* clear interrupts */ - writel(PL353_SMC_CFG_CLR_DEFAULT_MASK, - pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); - /* Find compatible children. Only a single child is supported */ for_each_available_child_of_node(of_node, child) { match = of_match_node(pl353_smc_supported_children, child); @@ -407,20 +116,18 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) break; } if (!match) { + err = -ENODEV; dev_err(&adev->dev, "no matching children\n"); - goto out_clk_disable; + goto disable_mem_clk; } - init = match->data; - if (init) - init(adev, child); of_platform_device_create(child, NULL, &adev->dev); return 0; -out_clk_disable: +disable_mem_clk: clk_disable_unprepare(pl353_smc->memclk); -out_clk_dis_aper: +disable_axi_clk: clk_disable_unprepare(pl353_smc->aclk); return err; @@ -436,8 +143,8 @@ static void pl353_smc_remove(struct amba_device *adev) static const struct amba_id pl353_ids[] = { { - .id = 0x00041353, - .mask = 0x000fffff, + .id = 0x00041353, + .mask = 0x000fffff, }, { 0, 0 }, }; diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c index 4d5758c419c5..ffec26a99313 100644 --- a/drivers/memory/stm32-fmc2-ebi.c +++ b/drivers/memory/stm32-fmc2-ebi.c @@ -1048,16 +1048,19 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) if (ret) { dev_err(dev, "could not retrieve reg property: %d\n", ret); + of_node_put(child); return ret; } if (bank >= FMC2_MAX_BANKS) { dev_err(dev, "invalid reg value: %d\n", bank); + of_node_put(child); return -EINVAL; } if (ebi->bank_assigned & BIT(bank)) { dev_err(dev, "bank already assigned: %d\n", bank); + of_node_put(child); return -EINVAL; } @@ -1066,6 +1069,7 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) if (ret) { dev_err(dev, "setup chip select %d failed: %d\n", bank, ret); + of_node_put(child); return ret; } } |