diff options
Diffstat (limited to 'drivers')
62 files changed, 21243 insertions, 54 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 43d6ba83a191..a714292630ad 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -665,6 +665,7 @@ config IBM_BSR between several cores on a system source "drivers/char/ipmi/Kconfig" +source "drivers/char/aspeed/Kconfig" config DS1620 tristate "NetWinder thermometer support" diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 438f71317c5c..ee197cbce74c 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -85,6 +85,7 @@ obj-$(CONFIG_TOSHIBA) += toshiba.o obj-$(CONFIG_I8K) += i8k.o obj-$(CONFIG_DS1620) += ds1620.o obj-$(CONFIG_HW_RANDOM) += hw_random/ +obj-$(CONFIG_AST_MISC) += aspeed/ obj-$(CONFIG_PPDEV) += ppdev.o obj-$(CONFIG_NWBUTTON) += nwbutton.o obj-$(CONFIG_NWFLASH) += nwflash.o diff --git a/drivers/char/aspeed/Kconfig b/drivers/char/aspeed/Kconfig new file mode 100644 index 000000000000..7aee8d3ddcb2 --- /dev/null +++ b/drivers/char/aspeed/Kconfig @@ -0,0 +1,52 @@ +# +# MISC configuration for ASPEED SOCs +# + +if ARCH_ASPEED +menuconfig AST_MISC + tristate 'MISC drivers for ASPEED SOCs' + help + We can select misc drivers for ASPEED SOC in this sub-function. + +if AST_MISC +config AST_VIDEO + tristate "ASPEED Video Engine driver" + default n + help + Driver for AST Video Engine + +config ADC_CAT9883 + tristate "CAT 9883 ADC driver" + default n + help + Driver for CAT 9883 + +config AST_SPI_BIOS + tristate "ASPEED SPI BIOS flash register" + default n + help + Driver for SPI BIOS flash register + +config AST_PECI + tristate "ASPEED PECI Controller" + default n + help + Driver for PECI Controller + +config AST_KCS + tristate 'ASPEED KCS support' + help + Support for the KCS channels on the ASPEED chips, + providing /dev/kcs0, 1 and 2 (note, some machines may not + provide all of these ports, depending on how the serial port + pins are configured. + +config AST_GPIO + tristate "ASPEED GPIO Controller" + default n + help + Driver for GPIO Controller included in ASPEED SOCs. + +endif # CONFIG_AST_MISC +endif # CONFIG_AST + diff --git a/drivers/char/aspeed/Makefile b/drivers/char/aspeed/Makefile new file mode 100644 index 000000000000..517b2b7f84b9 --- /dev/null +++ b/drivers/char/aspeed/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the ASPEED drivers. +# + +obj-$(CONFIG_AST_VIDEO) += ast_video.o +obj-$(CONFIG_ADC_CAT9883) += adc_cat9883.o +obj-$(CONFIG_AST_KCS) += ast_kcs.o +obj-$(CONFIG_AST_GPIO) += ast_gpio.o +obj-$(CONFIG_AST_PECI) += ast_peci.o diff --git a/drivers/char/aspeed/ast_peci.c b/drivers/char/aspeed/ast_peci.c new file mode 100644 index 000000000000..1f7cae3c1aab --- /dev/null +++ b/drivers/char/aspeed/ast_peci.c @@ -0,0 +1,508 @@ +/******************************************************************************** +* File Name : ast_peci.c +* Author : Ryan Chen +* Description : AST PECI Controller +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +* +* Version : 1.0 +* History : +* 1. 2013/01/30 Ryan Chen create this file +* +********************************************************************************/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <asm/uaccess.h> + +#include <asm/io.h> +#include <linux/delay.h> +#include <linux/miscdevice.h> +#ifdef CONFIG_COLDFIRE +#include <asm/arch/regs-peci.h> +#else +#include <plat/regs-peci.h> +#endif + +//#define CONFIG_AST_PECI_DEBUG + +#ifdef CONFIG_AST_PECI_DEBUG + #define PECI_DBG(fmt, args...) printk("%s(): " fmt, __FUNCTION__, ## args) +#else + #define PECI_DBG(fmt, args...) +#endif + +/***********************************************************************/ +struct timing_negotiation { + u8 msg_timing; + u8 addr_timing; +}; + +struct xfer_msg { + u8 client_addr; + u8 tx_len; + u8 rx_len; + u8 tx_fcs; + u8 rx_fcs; + u8 fcs_en; + u8 sw_fcs; + u8 *tx_buf; + u8 *rx_buf; + u32 sts; +}; + +#define PECI_DEVICE "/dev/ast-peci" + +//IOCTL .. +#define PECIIOC_BASE 'P' + +#define AST_PECI_IOCRTIMING _IOR(PECIIOC_BASE, 0, struct timing_negotiation*) +#define AST_PECI_IOCWTIMING _IOW(PECIIOC_BASE, 1, struct timing_negotiation*) +#define AST_PECI_IOCXFER _IOWR(PECIIOC_BASE, 2, struct xfer_msg*) + + +/***********************************************************************/ + +static struct ast_peci_data { + struct device *misc_dev; + void __iomem *reg_base; /* virtual */ + int irq; //PECI IRQ number + int open_count; + struct completion xfer_complete; + u32 sts; + struct mutex lock; +} ast_peci; + +static inline void +ast_peci_write(u32 val, u32 reg) +{ + PECI_DBG("write offset: %x, val: %x \n",reg,val); + writel(val, ast_peci.reg_base + reg); +} + +static inline u32 +ast_peci_read(u32 reg) +{ + u32 val = readl(ast_peci.reg_base + reg); + PECI_DBG("read offset: %x, val: %x \n",reg,val); + return val; +} + +static long ast_peci_ioctl(struct file *fp, + unsigned int cmd, unsigned long arg) +{ + long ret = 0; + void __user *argp = (void __user *)arg; + struct xfer_msg msg; + struct timing_negotiation tim_ng; + u32 peci_head; + int i=0; + u32 *tx_buf0 = (u32 *) (ast_peci.reg_base + AST_PECI_W_DATA0); + u32 *tx_buf1 = (u32 *) (ast_peci.reg_base + AST_PECI_W_DATA4); + u32 *rx_buf0 = (u32 *) (ast_peci.reg_base + AST_PECI_R_DATA0); + u32 *rx_buf1 = (u32 *) (ast_peci.reg_base + AST_PECI_R_DATA4); + u32 rx_data; + + PECI_DBG("ast_peci_ioctl cmd %x \n", cmd); + + switch(cmd) { + case AST_PECI_IOCRTIMING: + tim_ng.msg_timing = PECI_TIMING_MESSAGE_GET(ast_peci_read(AST_PECI_TIMING)); + tim_ng.addr_timing = PECI_TIMING_ADDRESS_GET(ast_peci_read(AST_PECI_TIMING)); + if (copy_to_user(argp, &tim_ng, sizeof(struct timing_negotiation))) + ret = -EFAULT; + break; + + case AST_PECI_IOCWTIMING: + if (copy_from_user(&tim_ng, argp, sizeof(struct timing_negotiation))) { + ret = -EFAULT; + } else { + ast_peci_write(PECI_TIMING_MESSAGE(tim_ng.msg_timing) | + PECI_TIMING_ADDRESS(tim_ng.addr_timing), AST_PECI_TIMING); + } + break; + + case AST_PECI_IOCXFER: + //Check cmd operation sts + while(ast_peci_read(AST_PECI_CMD) & PECI_CMD_FIRE) { + printk("wait for free \n"); + }; + + if (copy_from_user(&msg, argp, sizeof(struct xfer_msg))) { + ret = -EFAULT; + break; + } + +#ifdef CONFIG_AST_PECI_DEBUG + printk("fcs_en %d, client_addr %x, tx_len %d, rx_len %d",msg.fcs_en ,msg.client_addr, msg.tx_len, msg.rx_len); + printk("\ntx_buf : "); + for(i = 0;i< msg.tx_len; i++) + printk(" %x ",msg.tx_buf[i]); + printk("\n"); +#endif + + if(msg.fcs_en) + peci_head = PECI_TAGET_ADDR(msg.client_addr) | + PECI_WRITE_LEN(msg.tx_len) | + PECI_READ_LEN(msg.rx_len) | PECI_AW_FCS_EN; + else + peci_head = PECI_TAGET_ADDR(msg.client_addr) | + PECI_WRITE_LEN(msg.tx_len) | + PECI_READ_LEN(msg.rx_len); + + + ast_peci_write(peci_head, AST_PECI_CMD_CTRL); + + for(i = 0; i < msg.tx_len; i++) { + if(i < 16) { + if(i%4 == 0) + tx_buf0[i/4] = 0; + tx_buf0[i/4] |= (msg.tx_buf[i] << ((i%4)*8)) ; + } else { + if(i%4 == 0) + tx_buf1[i/4] = 0; + tx_buf1[i/4] |= (msg.tx_buf[i] << ((i%4)*8)) ; + } + } + +#ifdef CONFIG_AST_PECI_DEBUG + printk("\nWD \n "); + ast_peci_read(AST_PECI_W_DATA0); + ast_peci_read(AST_PECI_W_DATA1); + ast_peci_read(AST_PECI_W_DATA2); + ast_peci_read(AST_PECI_W_DATA3); + ast_peci_read(AST_PECI_W_DATA4); + ast_peci_read(AST_PECI_W_DATA5); + ast_peci_read(AST_PECI_W_DATA6); + ast_peci_read(AST_PECI_W_DATA7); +#endif + init_completion(&ast_peci.xfer_complete); + //Fire Command + ast_peci_write(PECI_CMD_FIRE, AST_PECI_CMD); + + + ret = wait_for_completion_interruptible_timeout(&ast_peci.xfer_complete, 30*HZ); + + if (ret == 0) + printk("peci controller timed out\n"); + + for(i = 0; i < msg.rx_len; i++) { + if(i < 16) { + switch(i%4) { + case 0: + rx_data = rx_buf0[i/4]; + + msg.rx_buf[i] = rx_data & 0xff; + break; + case 1: + msg.rx_buf[i] = (rx_data & 0xff00) >> 8; + break; + case 2: + msg.rx_buf[i] = (rx_data & 0xff0000) >> 16; + break; + case 3: + msg.rx_buf[i] = (rx_data & 0xff000000) >> 24; + break; + + } + } else { + switch(i%4) { + case 0: + rx_data = rx_buf1[i/4]; + msg.rx_buf[i] = rx_data & 0xff; + break; + case 1: + msg.rx_buf[i] = (rx_data & 0xff00) >> 8; + break; + case 2: + msg.rx_buf[i] = (rx_data & 0xff0000) >> 16; + break; + case 3: + msg.rx_buf[i] = (rx_data & 0xff000000) >> 24; + break; + + } + } + } +#ifdef CONFIG_AST_PECI_DEBUG + printk("\nRD \n"); + ast_peci_read(AST_PECI_R_DATA0); + ast_peci_read(AST_PECI_R_DATA1); + ast_peci_read(AST_PECI_R_DATA2); + ast_peci_read(AST_PECI_R_DATA3); + ast_peci_read(AST_PECI_R_DATA4); + ast_peci_read(AST_PECI_R_DATA5); + ast_peci_read(AST_PECI_R_DATA6); + ast_peci_read(AST_PECI_R_DATA7); + + printk("rx_buf : "); + for(i = 0;i< msg.rx_len; i++) + printk("%x ",msg.rx_buf[i]); + printk("\n"); +#endif + msg.sts = ast_peci.sts; + msg.rx_fcs = PECI_CAPTURE_READ_FCS(ast_peci_read(AST_PECI_CAP_FCS)); + if (copy_to_user(argp, &msg, sizeof(struct xfer_msg))) + ret = -EFAULT; + + break; + default: + printk("ast_peci_ioctl command fail\n"); + ret = -ENOTTY; + break; + } + + return ret; +} + +static int ast_peci_open(struct inode *inode, struct file *file) +{ + PECI_DBG("ast_peci_open\n"); + + + /* Flush input queue on first open */ + if (ast_peci.open_count) + return -1; + + ast_peci.open_count++; + + + return 0; +} + +static int ast_peci_release(struct inode *inode, struct file *file) +{ + PECI_DBG("ast_peci_release\n"); + ast_peci.open_count--; + + return 0; +} + +static irqreturn_t ast_peci_handler(int this_irq, void *dev_id) +{ + ast_peci.sts = (0x1f & ast_peci_read(AST_PECI_INT_STS)); + + switch(ast_peci.sts) { + case PECI_INT_TIMEOUT: + printk("PECI_INT_TIMEOUT \n"); + ast_peci_write(PECI_INT_TIMEOUT, AST_PECI_INT_STS); + break; + case PECI_INT_CONNECT: + printk("PECI_INT_CONNECT \n"); + ast_peci_write(PECI_INT_CONNECT, AST_PECI_INT_STS); + break; + case PECI_INT_W_FCS_BAD: + printk("PECI_INT_W_FCS_BAD \n"); + ast_peci_write(PECI_INT_W_FCS_BAD, AST_PECI_INT_STS); + break; + case PECI_INT_W_FCS_ABORT: + printk("PECI_INT_W_FCS_ABORT \n"); + ast_peci_write(PECI_INT_W_FCS_ABORT, AST_PECI_INT_STS); + break; + case PECI_INT_CMD_DONE: + printk("PECI_INT_CMD_DONE \n"); + ast_peci_write(PECI_INT_CMD_DONE, AST_PECI_INT_STS); + ast_peci_write(0, AST_PECI_CMD); + break; + default: + printk("no one handle .... \n"); + break; + + } + + complete(&ast_peci.xfer_complete); + + return IRQ_HANDLED; + +} + +static void ast_peci_ctrl_init(void) +{ + //PECI Timing Setting : should 4 times of peci clk period 64 = 16 * 4 ?? + ast_peci_write(PECI_TIMING_MESSAGE(64) | PECI_TIMING_ADDRESS(64), AST_PECI_TIMING); + + + //PECI Programmable AWFCS + //ast_peci_write(ast_peci, PECI_PROGRAM_AW_FCS, AST_PECI_EXP_FCS); + + //TODO ..... + //Clear Interrupt + ast_peci_write(PECI_INT_TIMEOUT | PECI_INT_CONNECT | + PECI_INT_W_FCS_BAD | PECI_INT_W_FCS_ABORT | + PECI_INT_CMD_DONE, AST_PECI_INT_STS); + + //PECI Negotiation Selection , interrupt enable + //Set nego mode : 1st bit of addr negotiation + ast_peci_write(PECI_INT_TIMEOUT | PECI_INT_CONNECT | + PECI_INT_W_FCS_BAD | PECI_INT_W_FCS_ABORT | + PECI_INT_CMD_DONE, AST_PECI_INT_CTRL); + + //PECI Spec wide speed rangs [2kbps~2Mbps] + //Sampling 8/16, READ mode : Point Sampling , CLK source : 24Mhz , DIV by 8 : 3 --> CLK is 3Mhz + //PECI CTRL Enable + + ast_peci_write(PECI_CTRL_SAMPLING(8) | PECI_CTRL_CLK_DIV(3) | + PECI_CTRL_PECI_EN | + PECI_CTRL_PECI_CLK_EN, AST_PECI_CTRL); +} + +static const struct file_operations ast_peci_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = ast_peci_ioctl, + .open = ast_peci_open, + .release = ast_peci_release, +}; + +struct miscdevice ast_peci_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ast-peci", + .fops = &ast_peci_fops, +}; + +static int ast_peci_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret=0; + + + PECI_DBG("ast_peci_probe\n"); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (NULL == res) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); + ret = -ENOENT; + goto out; + } + + if (!request_mem_region(res->start, resource_size(res), res->name)) { + dev_err(&pdev->dev, "cannot reserved region\n"); + ret = -ENXIO; + goto out; + } + + ast_peci.reg_base = ioremap(res->start, resource_size(res)); + if (!ast_peci.reg_base) { + ret = -EIO; + goto out_region; + } + + ast_peci.irq = platform_get_irq(pdev, 0); + if (ast_peci.irq < 0) { + dev_err(&pdev->dev, "no irq specified\n"); + ret = -ENOENT; + goto out_region; + } + + ret = request_irq(ast_peci.irq, ast_peci_handler, IRQF_SHARED, + "ast-peci", &ast_peci); + + if (ret) { + printk(KERN_INFO "PECI: Failed request irq %d\n", ast_peci.irq); + goto out_region; + } + + ret = misc_register(&ast_peci_misc); + if (ret){ + printk(KERN_ERR "PECI : failed to request interrupt\n"); + goto out_irq; + } + + ast_peci_ctrl_init(); + + printk(KERN_INFO "ast_peci: driver successfully loaded.\n"); + + return 0; + + +out_irq: + free_irq(ast_peci.irq, NULL); +out_region: + release_mem_region(res->start, res->end - res->start + 1); +out: + printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret); + return ret; +} + +static int ast_peci_remove(struct platform_device *pdev) +{ + struct resource *res; + + PECI_DBG("ast_peci_remove\n"); + + misc_deregister(&ast_peci_misc); + + free_irq(ast_peci.irq, &ast_peci); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + iounmap(ast_peci.reg_base); + + release_mem_region(res->start, res->end - res->start + 1); + + return 0; +} + +#ifdef CONFIG_PM +static int +ast_peci_suspend(struct platform_device *pdev, pm_message_t state) +{ + printk("ast_peci_suspend : TODO \n"); + return 0; +} + +static int +ast_peci_resume(struct platform_device *pdev) +{ + ast_peci_ctrl_init(); + return 0; +} + +#else +#define ast_peci_suspend NULL +#define ast_peci_resume NULL +#endif + +static struct platform_driver ast_peci_driver = { + .probe = ast_peci_probe, + .remove = __devexit_p(ast_peci_remove), + .suspend = ast_peci_suspend, + .resume = ast_peci_resume, + .driver = { + .name = "ast_peci", + .owner = THIS_MODULE, + }, +}; + +static int __init +ast_peci_init(void) +{ + return platform_driver_register(&ast_peci_driver); +} + +static void __exit +ast_peci_exit(void) +{ + platform_driver_unregister(&ast_peci_driver); +} + +module_init(ast_peci_init); +module_exit(ast_peci_exit); + +MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>"); +MODULE_DESCRIPTION("PECI driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a4424c8b9085..1963ba6336cf 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -265,9 +265,13 @@ static ssize_t gpio_value_store(struct device *dev, if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; - else if (!test_bit(FLAG_IS_OUT, &desc->flags)) +#if 0 //Ryan Modify for AST GPIO Feature + else if (!test_bit(FLAG_IS_OUT, &desc->flags)) { status = -EPERM; + } else { +#else else { +#endif long value; status = strict_strtol(buf, 0, &value); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index c709e821f04b..681782be1b90 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -854,6 +854,22 @@ config SENSORS_LIS3LV02D This driver can also be built as a module. If so, the module will be called lis3lv02d. +config SENSORS_AST_ADC + tristate "ASPEED ADC Controller Driver" + depends on ARCH_ASPEED + default n + help + This driver provides support for the ASPEED ADC + Controller, which provides an Voltage Sensor. + +config SENSORS_AST_PWM_FAN + tristate "ASPEED PWM & FAN Tacho Controller Driver" + depends on ARCH_ASPEED + default n + help + This driver provides support for the ASPEED PWM & FAN Tacho + Controller, which provides an Sensor, fan control. + config SENSORS_APPLESMC tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" depends on INPUT && X86 diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 58fc5be5355d..562972747cce 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o +obj-$(CONFIG_SENSORS_AST_ADC) += ast_adc.o +obj-$(CONFIG_SENSORS_AST_PWM_FAN) += ast_pwm_fan.o obj-$(CONFIG_SENSORS_AMS) += ams/ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o diff --git a/drivers/hwmon/ast_adc.c b/drivers/hwmon/ast_adc.c new file mode 100644 index 000000000000..0969e398a8c8 --- /dev/null +++ b/drivers/hwmon/ast_adc.c @@ -0,0 +1,734 @@ +/* + * ast_adc.c + * + * ASPEED ADC controller driver + * + * Copyright (C) 2012-2020 ASPEED Technology Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * History: + * 2012.11.26: Initial version [Ryan Chen] + */ + +/* attr ADC sysfs 0~max adc channel +* 0 - show/store enable +* 3 - show value +* 1 - show/store alarm_en set enable +* 2 - show alarm get statuse +* 4 - show/store upper +* 5 - show/store lower */ + + +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/mutex.h> +#include <linux/hwmon-sysfs.h> +#include <linux/hwmon.h> +#include <linux/workqueue.h> +#include <linux/sysfs.h> +#include <linux/err.h> + +#include <mach/hardware.h> +#include <asm/irq.h> +#include <asm/io.h> + +#include <plat/regs-adc.h> +#include <plat/ast-scu.h> + + +#define REST_DESIGN 0 + +struct adc_vcc_ref_data { + int v2; + int r1; + int r2; +}; + +static struct adc_vcc_ref_data adc_vcc_ref[5] = { + [0] = { + .v2 = 0, + .r1 = 5600, + .r2 = 1000, + }, + [1] = { + .v2 = -12, + .r1 = 1000, + .r2 = 10000, + }, + [2] = { + .v2 = 0, + .r1 = 1800, + .r2 = 1000, + }, + [3] = { + .v2 = -5, + .r1 = 2200, + .r2 = 10000, + }, + [4] = { + .v2 = 0, + .r1 = 56000, + .r2 = 1000, + }, +}; + +struct ast_adc_data { + struct device *hwmon_dev; + void __iomem *reg_base; /* virtual */ + int irq; //ADC IRQ number + int compen_value; //Compensating value +}; + +struct ast_adc_data *ast_adc; + +static u8 ast_get_adc_en(struct ast_adc_data *ast_adc, u8 adc_ch); + + +static inline void +ast_adc_write(struct ast_adc_data *ast_adc, u32 val, u32 reg) +{ +// printk("write offset: %x, val: %x \n",reg,val); + writel(val, ast_adc->reg_base+ reg); +} + +static inline u32 +ast_adc_read(struct ast_adc_data *ast_adc, u32 reg) +{ + u32 val = readl(ast_adc->reg_base + reg); +// printk("read offset: %x, val: %x \n",reg,val); + return val; +} + +static void ast_adc_ctrl_init(void) +{ + u32 pclk; + ast_adc_write(ast_adc, AST_ADC_CTRL_COMPEN | AST_ADC_CTRL_NORMAL | AST_ADC_CTRL_EN, AST_ADC_CTRL); + + //Set wait a sensing cycle t (s) = 1000 * 12 * (1/PCLK) * 2 * (ADC0c[31:17] + 1) * (ADC0c[9:0] +1) + //ex : pclk = 48Mhz , ADC0c[31:17] = 0, ADC0c[9:0] = 0x40 : 64, ADC0c[31:17] = 0x3e7 : 999 + // --> 0.0325s = 12 * 2 * (0x3e7 + 1) *(64+1) / 48000000 + // --> 0.0005s = 12 * 2 * (0x3e7 + 1) / 48000000 + + pclk = ast_get_pclk(); + +#if defined(CONFIG_ARCH_AST2300) + ast_adc_write(ast_adc, 0x3e7, AST_ADC_CLK); + + ast_adc_write(ast_adc, AST_ADC_CTRL_CH12_EN | AST_ADC_CTRL_COMPEN_CLR| ast_adc_read(ast_adc, AST_ADC_CTRL), AST_ADC_CTRL); + + mdelay(50); + + //compensating value = 0x200 - ADC10[9:0] + if(ast_adc_read(ast_adc, AST_ADC_CH12_13) & (0x1 << 8)) + ast_adc->compen_value = 0x200 - (ast_adc_read(ast_adc, AST_ADC_CH12_13) & AST_ADC_L_CH_MASK); + else + ast_adc->compen_value = 0 - (ast_adc_read(ast_adc, AST_ADC_CH12_13) & AST_ADC_L_CH_MASK); + + printk("compensating value %d \n",ast_adc->compen_value); + +#elif defined(CONFIG_ARCH_AST2400) + + //For AST2400 A0 workaround ... ADC0c = 1 ; +// ast_adc_write(ast_adc, 1, AST_ADC_CLK); +// ast_adc_write(ast_adc, (0x3e7<< 17) | 0x40, AST_ADC_CLK); + ast_adc_write(ast_adc, 0x40, AST_ADC_CLK); + + ast_adc_write(ast_adc, AST_ADC_CTRL_CH0_EN | AST_ADC_CTRL_COMPEN | AST_ADC_CTRL_NORMAL | AST_ADC_CTRL_EN, AST_ADC_CTRL); + + ast_adc_read(ast_adc, AST_ADC_CTRL); + + mdelay(1); + + //compensating value = 0x200 - ADC10[9:0] + ast_adc->compen_value = 0x200 - (ast_adc_read(ast_adc, AST_ADC_CH0_1) & AST_ADC_L_CH_MASK); + printk("compensating value %d \n",ast_adc->compen_value); + +#elif defined(CONFIG_ARCH_AST2500) +// TODO ... +// scu read trim +// write trim 0xc4 [3:0] + + ast_adc_write(ast_adc, 0x40, AST_ADC_CLK); + + ast_adc_write(ast_adc, AST_ADC_CTRL_NORMAL | AST_ADC_CTRL_EN, AST_ADC_CTRL); + + while(!ast_adc_read(ast_adc, AST_ADC_CTRL) & 0x100); + + ast_adc_write(ast_adc, AST_ADC_CTRL_COMPEN | AST_ADC_CTRL_NORMAL | AST_ADC_CTRL_EN, AST_ADC_CTRL); + + while(ast_adc_read(ast_adc, AST_ADC_CTRL) & AST_ADC_CTRL_COMPEN); + + //compensating value = 0x200 - ADC10[9:0] + ast_adc->compen_value = 0x200 - ((ast_adc_read(ast_adc, AST_ADC_TRIM) >> 16) & 0x3ff); + printk("compensating value %d \n",ast_adc->compen_value); + +#else +#err "No define for ADC " +#endif + + ast_adc_write(ast_adc, AST_ADC_CTRL_NORMAL | AST_ADC_CTRL_EN, AST_ADC_CTRL); + +} + +static u16 +ast_get_adc_hyster_lower(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + u16 tmp=0; + tmp = ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) & AST_ADC_L_BOUND; + +// printk("read val = %d \n",tmp); + + return tmp; + +} + +static void +ast_set_adc_hyster_lower(struct ast_adc_data *ast_adc, u8 adc_ch, u16 value) +{ + ast_adc_write(ast_adc, + (ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) & ~AST_ADC_L_BOUND) | + value, + AST_ADC_HYSTER0 + (adc_ch *4)); + +} + +static u16 +ast_get_adc_hyster_upper(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + u16 tmp=0; + tmp = ((ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) & AST_ADC_H_BOUND) >> 16); + +// printk("read val = %d \n",tmp); + + return tmp; +} + +static void +ast_set_adc_hyster_upper(struct ast_adc_data *ast_adc, u8 adc_ch, u32 value) +{ + ast_adc_write(ast_adc, + (ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) & ~AST_ADC_H_BOUND) | + (value << 16), + AST_ADC_HYSTER0 + (adc_ch *4)); + +} + +static u8 +ast_get_adc_hyster_en(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + //tacho source + if(ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) & AST_ADC_HYSTER_EN) + return 1; + else + return 0; +} + +static void +ast_set_adc_hyster_en(struct ast_adc_data *ast_adc, u8 adc_ch, u8 enable) +{ + //tacho source + if(enable == 1) + ast_adc_write(ast_adc, + ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) | AST_ADC_HYSTER_EN, + AST_ADC_HYSTER0 + (adc_ch *4)); + else + ast_adc_write(ast_adc, + ast_adc_read(ast_adc, AST_ADC_HYSTER0 + (adc_ch *4)) & ~AST_ADC_HYSTER_EN, + AST_ADC_HYSTER0 + (adc_ch *4)); +} + +static u16 +ast_get_adc_lower(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + u16 tmp=0; + tmp = ast_adc_read(ast_adc, AST_ADC_BOUND0 + (adc_ch *4)) & AST_ADC_L_BOUND; + +// printk("read val = %d \n",tmp); + + return tmp; + +} + +static void +ast_set_adc_lower(struct ast_adc_data *ast_adc, u8 adc_ch, u16 value) +{ + ast_adc_write(ast_adc, + (ast_adc_read(ast_adc, AST_ADC_BOUND0 + (adc_ch *4)) & ~AST_ADC_L_BOUND) | + value, + AST_ADC_BOUND0 + (adc_ch *4)); + +} + +static u16 +ast_get_adc_upper(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + u16 tmp=0; + tmp = ((ast_adc_read(ast_adc, AST_ADC_BOUND0 + (adc_ch *4)) & AST_ADC_H_BOUND) >> 16); + + printk("read val = %d \n",tmp); + + return tmp; + + +} + +static void +ast_set_adc_upper(struct ast_adc_data *ast_adc, u8 adc_ch, u32 value) +{ + ast_adc_write(ast_adc, + (ast_adc_read(ast_adc, AST_ADC_BOUND0 + (adc_ch *4)) & ~AST_ADC_H_BOUND) | + (value << 16), + AST_ADC_BOUND0 + (adc_ch *4)); + +} + + +static u8 +ast_get_adc_alarm(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + //adc ch source + if(ast_adc_read(ast_adc, AST_ADC_IER) & (0x1 << adc_ch)) + return 1; + else + return 0; +} + +static u16 +ast_get_adc_value(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + int tmp; + + switch(adc_ch) { + case 0: + tmp = ast_adc_read(ast_adc, AST_ADC_CH0_1) & AST_ADC_L_CH_MASK; + break; + case 1: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH0_1) & AST_ADC_H_CH_MASK) >> 16; + break; + case 2: + tmp = ast_adc_read(ast_adc, AST_ADC_CH2_3) & AST_ADC_L_CH_MASK; + break; + case 3: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH2_3) & AST_ADC_H_CH_MASK) >> 16; + break; + case 4: + tmp = ast_adc_read(ast_adc, AST_ADC_CH4_5) & AST_ADC_L_CH_MASK; + break; + case 5: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH4_5) & AST_ADC_H_CH_MASK) >> 16; + break; + case 6: + tmp = ast_adc_read(ast_adc, AST_ADC_CH6_7) & AST_ADC_L_CH_MASK; + break; + case 7: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH6_7) & AST_ADC_H_CH_MASK) >> 16; + break; + case 8: + tmp = ast_adc_read(ast_adc, AST_ADC_CH8_9) & AST_ADC_L_CH_MASK; + break; + case 9: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH8_9) & AST_ADC_H_CH_MASK) >> 16; + break; + case 10: + tmp = ast_adc_read(ast_adc, AST_ADC_CH10_11) & AST_ADC_L_CH_MASK; + break; + case 11: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH10_11) & AST_ADC_H_CH_MASK) >> 16; + break; + case 12: + tmp = ast_adc_read(ast_adc, AST_ADC_CH12_13) & AST_ADC_L_CH_MASK; + break; + case 13: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH12_13) & AST_ADC_H_CH_MASK) >> 16; + break; + case 14: + tmp = ast_adc_read(ast_adc, AST_ADC_CH14_15) & AST_ADC_L_CH_MASK; + break; + case 15: + tmp = (ast_adc_read(ast_adc, AST_ADC_CH14_15) & AST_ADC_H_CH_MASK) >> 16; + break; + + } + + tmp += ast_adc->compen_value; + +// printk("voltage = %d \n",tmp); + + return tmp; + +} + +static u8 +ast_get_adc_en(struct ast_adc_data *ast_adc, u8 adc_ch) +{ + u8 tmp=0; + + if(ast_adc_read(ast_adc, AST_ADC_CTRL) & (0x1 << (16+adc_ch))) + tmp = 1; + else + tmp = 0; + + return tmp; + +} + +static void +ast_set_adc_en(struct ast_adc_data *ast_adc, u8 adc_ch, u8 enable) +{ + if(enable) + ast_adc_write(ast_adc, ast_adc_read(ast_adc, AST_ADC_CTRL) | (0x1 << (16+adc_ch)), AST_ADC_CTRL); + else + ast_adc_write(ast_adc, ast_adc_read(ast_adc, AST_ADC_CTRL) & ~(0x1 << (16+adc_ch)), AST_ADC_CTRL); +} + + +/* attr ADC sysfs 0~max adc channel +* 0 - show/store channel enable +* 1 - show value +* 2 - show alarm get statuse +* 3 - show/store upper +* 4 - show/store lower +* 5 - show/store hystersis enable +* 6 - show/store hystersis upper +* 7 - show/store hystersis low +*/ + +static ssize_t +ast_show_adc(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); + u16 tmp; + u32 voltage,tmp1, tmp2,tmp3; + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //channel enable, disable + return sprintf(sysfsbuf, "%d : %s\n", ast_get_adc_en(ast_adc,sensor_attr->index),ast_get_adc_en(ast_adc,sensor_attr->index) ? "Enable":"Disable"); + break; + case 1: //value + tmp = ast_get_adc_value(ast_adc, sensor_attr->index); + //Voltage Sense Method + tmp1 = (adc_vcc_ref[REST_DESIGN].r1 + adc_vcc_ref[REST_DESIGN].r2) * tmp * 25 * 10; + tmp2 = adc_vcc_ref[REST_DESIGN].r2 * 1023 ; + + tmp3 = (adc_vcc_ref[REST_DESIGN].r1 * adc_vcc_ref[REST_DESIGN].v2) / adc_vcc_ref[REST_DESIGN].r2; + // printk("tmp3 = %d \n",tmp3); + voltage = (tmp1/tmp2) - tmp3; + + return sprintf(sysfsbuf, "%d.%d (V)\n",voltage/100, voltage%100); + break; + case 2: //alarm + return sprintf(sysfsbuf, "%d \n", ast_get_adc_alarm(ast_adc,sensor_attr->index)); + break; + case 3: //upper + return sprintf(sysfsbuf, "%d \n", ast_get_adc_upper(ast_adc,sensor_attr->index)); + break; + case 4: //lower + return sprintf(sysfsbuf, "%d \n", ast_get_adc_lower(ast_adc,sensor_attr->index)); + break; + case 5: //hystersis enable + return sprintf(sysfsbuf, "%d : %s\n", ast_get_adc_hyster_en(ast_adc,sensor_attr->index),ast_get_adc_hyster_en(ast_adc,sensor_attr->index) ? "Enable":"Disable"); + break; + case 6: //hystersis upper + return sprintf(sysfsbuf, "%d \n", ast_get_adc_hyster_upper(ast_adc,sensor_attr->index)); + break; + case 7: //hystersis lower + return sprintf(sysfsbuf, "%d \n", ast_get_adc_hyster_lower(ast_adc,sensor_attr->index)); + break; + + default: + return -EINVAL; + break; + } +} + +static ssize_t +ast_store_adc(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + ast_set_adc_en(ast_adc, sensor_attr->index, input_val); + break; + case 1: //value + + break; + case 2: //alarm + break; + case 3: + ast_set_adc_upper(ast_adc, sensor_attr->index, input_val); + break; + case 4: + ast_set_adc_lower(ast_adc, sensor_attr->index, input_val); + break; + case 5: //hystersis + ast_set_adc_hyster_en(ast_adc, sensor_attr->index, input_val); + break; + case 6: + ast_set_adc_hyster_upper(ast_adc, sensor_attr->index, input_val); + break; + case 7: + ast_set_adc_hyster_lower(ast_adc, sensor_attr->index, input_val); + break; + + default: + return -EINVAL; + break; + } + + return count; +} + +/* attr ADC sysfs 0~max adc channel +* 0 - show/store channel enable +* 1 - show value +* 2 - show alarm get statuse +* 3 - show/store upper +* 4 - show/store lower +* 5 - show/store hystersis enable +* 6 - show/store hystersis upper +* 7 - show/store hystersis low +*/ + +#define sysfs_adc_ch(index) \ +static SENSOR_DEVICE_ATTR_2(adc##index##_en, S_IRUGO | S_IWUSR, \ + ast_show_adc, ast_store_adc, 0, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_value, S_IRUGO | S_IWUSR, \ + ast_show_adc, NULL, 1, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_alarm, S_IRUGO | S_IWUSR, \ + ast_show_adc, NULL, 2, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_upper, S_IRUGO | S_IWUSR, \ + ast_show_adc, ast_store_adc, 3, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_lower, S_IRUGO | S_IWUSR, \ + ast_show_adc, ast_store_adc, 4, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_hyster_en, S_IRUGO | S_IWUSR, \ + ast_show_adc, ast_store_adc, 5, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_hyster_upper, S_IRUGO | S_IWUSR, \ + ast_show_adc, ast_store_adc, 6, index); \ +\ +static SENSOR_DEVICE_ATTR_2(adc##index##_hyster_lower, S_IRUGO | S_IWUSR, \ + ast_show_adc, ast_store_adc, 7, index); \ +\ +static struct attribute *adc##index##_attributes[] = { \ + &sensor_dev_attr_adc##index##_en.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_value.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_alarm.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_upper.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_lower.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_hyster_en.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_hyster_upper.dev_attr.attr, \ + &sensor_dev_attr_adc##index##_hyster_lower.dev_attr.attr, \ + NULL \ +}; + +/* + * Create the needed functions for each pwm using the macro defined above + * (4 pwms are supported) + */ +sysfs_adc_ch(0); +sysfs_adc_ch(1); +sysfs_adc_ch(2); +sysfs_adc_ch(3); +sysfs_adc_ch(4); +sysfs_adc_ch(5); +sysfs_adc_ch(6); +sysfs_adc_ch(7); +sysfs_adc_ch(8); +sysfs_adc_ch(9); +sysfs_adc_ch(10); +sysfs_adc_ch(11); +#if defined(CONFIG_ARCH_AST2400) || defined(CONFIG_ARCH_AST2500) +sysfs_adc_ch(12); +sysfs_adc_ch(13); +sysfs_adc_ch(14); +sysfs_adc_ch(15); +#endif + +static const struct attribute_group adc_attribute_groups[] = { + { .attrs = adc0_attributes }, + { .attrs = adc1_attributes }, + { .attrs = adc2_attributes }, + { .attrs = adc3_attributes }, + { .attrs = adc4_attributes }, + { .attrs = adc5_attributes }, + { .attrs = adc6_attributes }, + { .attrs = adc7_attributes }, + { .attrs = adc8_attributes }, + { .attrs = adc9_attributes }, + { .attrs = adc10_attributes }, + { .attrs = adc11_attributes }, +#if defined(CONFIG_ARCH_AST2400) || defined(CONFIG_ARCH_AST2500) + { .attrs = adc12_attributes }, + { .attrs = adc13_attributes }, + { .attrs = adc14_attributes }, + { .attrs = adc15_attributes }, +#endif +}; + + +static int +ast_adc_probe(struct platform_device *pdev) +{ + struct resource *res; + int err; + int ret=0; + int i; + + dev_dbg(&pdev->dev, "ast_adc_probe \n"); + + ast_adc = kzalloc(sizeof(struct ast_adc_data), GFP_KERNEL); + if (!ast_adc) { + ret = -ENOMEM; + goto out; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (NULL == res) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); + ret = -ENOENT; + goto out_mem; + } + + if (!request_mem_region(res->start, resource_size(res), res->name)) { + dev_err(&pdev->dev, "cannot reserved region\n"); + ret = -ENXIO; + goto out_mem; + } + + ast_adc->reg_base = ioremap(res->start, resource_size(res)); + if (!ast_adc->reg_base) { + ret = -EIO; + goto out_region; + } + + ast_adc->irq = platform_get_irq(pdev, 0); + if (ast_adc->irq < 0) { + dev_err(&pdev->dev, "no irq specified\n"); + ret = -ENOENT; + goto out_region; + } + + /* Register sysfs hooks */ + ast_adc->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(ast_adc->hwmon_dev)) { + ret = PTR_ERR(ast_adc->hwmon_dev); + goto out_region; + } + + for(i=0; i<MAX_CH_NO; i++) { + err = sysfs_create_group(&pdev->dev.kobj, &adc_attribute_groups[i]); + if (err) + goto out_region; + } + + ast_adc_ctrl_init(); + + printk(KERN_INFO "ast_adc: driver successfully loaded.\n"); + + return 0; + + +//out_irq: +// free_irq(ast_adc->irq, NULL); +out_region: + release_mem_region(res->start, res->end - res->start + 1); +out_mem: + kfree(ast_adc); +out: + printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret); + return ret; +} + +static int +ast_adc_remove(struct platform_device *pdev) +{ + int i=0; + struct ast_adc_data *ast_adc = platform_get_drvdata(pdev); + struct resource *res; + printk(KERN_INFO "ast_adc: driver unloaded.\n"); + + hwmon_device_unregister(ast_adc->hwmon_dev); + + for(i=0; i<5; i++) + sysfs_remove_group(&pdev->dev.kobj, &adc_attribute_groups[i]); + + platform_set_drvdata(pdev, NULL); +// free_irq(ast_adc->irq, ast_adc); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iounmap(ast_adc->reg_base); + release_mem_region(res->start, res->end - res->start + 1); + kfree(ast_adc); + return 0; +} + +#ifdef CONFIG_PM +static int +ast_adc_suspend(struct platform_device *pdev, pm_message_t state) +{ + printk("ast_adc_suspend : TODO \n"); + return 0; +} + +static int +ast_adc_resume(struct platform_device *pdev) +{ + ast_adc_ctrl_init(); + return 0; +} + +#else +#define ast_adc_suspend NULL +#define ast_adc_resume NULL +#endif + +static struct platform_driver ast_adc_driver = { + .probe = ast_adc_probe, + .remove = __devexit_p(ast_adc_remove), + .suspend = ast_adc_suspend, + .resume = ast_adc_resume, + .driver = { + .name = "ast_adc", + .owner = THIS_MODULE, + }, +}; + +static int __init +ast_adc_init(void) +{ + return platform_driver_register(&ast_adc_driver); +} + +static void __exit +ast_adc_exit(void) +{ + platform_driver_unregister(&ast_adc_driver); +} + +module_init(ast_adc_init); +module_exit(ast_adc_exit); + +MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>"); +MODULE_DESCRIPTION("ADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/ast_lcp_80h.c b/drivers/hwmon/ast_lcp_80h.c new file mode 100755 index 000000000000..681d2d6cac1f --- /dev/null +++ b/drivers/hwmon/ast_lcp_80h.c @@ -0,0 +1,312 @@ +/* + * ast_lpc_snoop.c + * + * ASPEED LPC Snoop controller driver + * + * Copyright (C) 2012-2020 ASPEED Technology Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * History: + * 2012.11.26: Initial version [Ryan Chen] + */ + +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/mutex.h> +#include <linux/hwmon-sysfs.h> +#include <linux/hwmon.h> +#include <linux/workqueue.h> +#include <linux/sysfs.h> +#include <linux/err.h> + +#include <mach/hardware.h> +#include <asm/irq.h> +#include <asm/io.h> + +#include <plat/regs-1070_lpc.h> + +struct ast_clpc_data { + struct device *hwmon_dev; + void __iomem *reg_base; /* virtual */ + int irq; //ADC IRQ number + u8 80h_data; //80h_data +}; + +static inline void +ast_clpc_write(struct ast_clpc_data *ast_clpc, u32 val, u32 reg) +{ +// printk("write offset: %x, val: %x \n",reg,val); + writel(val, ast_clpc->reg_base+ reg); +} + +static inline u32 +ast_clpc_read(struct ast_adc_data *ast_clpc, u32 reg) +{ + u32 val = readl(ast_clpc->reg_base + reg); +// printk("read offset: %x, val: %x \n",reg,val); + return val; +} + +static irqreturn_t ast_lpc_80h_handler(int irq, void *dev_id) +{ + struct ast_clpc_data *ast_clpc = dev_id; + u32 sts = ast_clpc_read(ast_clpc, AST1070_LPC_80H_CTRL); + + if(isr_sts & AST1070_LPC_80H_CLR) { + ast_clpc->80h_data = ast_clpc_read(ast_clpc, AST1070_LPC_80H_DATA); + ast_clpc_write(ast_clpc, AST1070_LPC_80H_CLR, AST1070_LPC_80H_DATA); + } else + printk("IRQ ISSUE bug \n"); + + return IRQ_HANDLED; + +} + +static void ast_clpc_80h_init(struct ast_clpc_data *ast_clpc, u16 addr) +{ + ast_clpc_write(ast_clpc, AST1070_LPC_80H_CLR, AST1070_LPC_80H_CTRL); + + //Snoop Port + ast_clpc_write(ast_clpc, addr & 0xff, AST1070_LPC_L_80H_ADDR); + ast_clpc_write(ast_clpc, (addr & 0xff) >> 8 , AST1070_LPC_H_80H_ADDR); + //Clear Interrupt and Enable + //AST1070 BUG :===: D[4] W1C + ast_clpc_write(ast_clpc, AST1070_LPC_80H_CLR, AST1070_LPC_80H_DATA); + ast_clpc_write(ast_clpc, AST1070_LPC_80H_CLR | AST1070_LPC_80H_EN, AST1070_LPC_80H_CTRL); +} + +/* attr 80H sysfs 0~max adc channel +* 0 - show/store 80h addr +* 1 - show 80h data +*/ + +static ssize_t +ast_show_clpc(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); + u16 tmp; + u32 voltage,tmp1, tmp2,tmp3; + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //channel enable, disable + return sprintf(sysfsbuf, "%d \n", ast_clpc->80h_data); + break; + + default: + return -EINVAL; + break; + } +} + +static ssize_t +ast_store_clpc(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + + break; + default: + return -EINVAL; + break; + } + + return count; +} + +/* attr ADC sysfs 0~max adc channel +* 0 - show 80h data +*/ + +#define sysfs_clpc(index) \ +static SENSOR_DEVICE_ATTR_2(clpc##index##_en, S_IRUGO | S_IWUSR, \ + ast_show_clpc, NULL, 0, index); \ +\ +static struct attribute *clpc##index##_attributes[] = { \ + &sensor_dev_attr_clpc##index##_80h.dev_attr.attr, \ + NULL \ +}; + +/* + * Create the needed functions for each pwm using the macro defined above + * (4 pwms are supported) + */ +sysfs_clpc(0); + +static const struct attribute_group clpc_attribute_groups[] = { + { .attrs = clpc0_attributes }, +}; + + +static int +ast_clpc_probe(struct platform_device *pdev) +{ + struct ast_clpc_data *ast_clpc; + struct resource *res; + int err; + int ret=0; + int i; + + dev_dbg(&pdev->dev, "ast_clpc_probe \n"); + + ast_clpc = kzalloc(sizeof(struct ast_clpc_data), GFP_KERNEL); + if (!ast_adc) { + ret = -ENOMEM; + goto out; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (NULL == res) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); + ret = -ENOENT; + goto out_mem; + } + + if (!request_mem_region(res->start, resource_size(res), res->name)) { + dev_err(&pdev->dev, "cannot reserved region\n"); + ret = -ENXIO; + goto out_mem; + } + + ast_clpc->reg_base = ioremap(res->start, resource_size(res)); + if (!ast_clpc->reg_base) { + ret = -EIO; + goto out_region; + } + + ast_clpc->irq = platform_get_irq(pdev, 3); + if (ast_clpc->irq < 0) { + dev_err(&pdev->dev, "no irq specified\n"); + ret = -ENOENT; + goto out_region; + } + + + /* Register sysfs hooks */ + ast_clpc->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(ast_clpc->hwmon_dev)) { + ret = PTR_ERR(ast_clpc->hwmon_dev); + goto out_region; + } + + for(i=0; i< MAX_CH_NO; i++) { + err = sysfs_create_group(&pdev->dev.kobj, &clpc_attribute_groups[i]); + if (err) + goto out_region; + } + + ast_clpc_80h_init(); + + ret = request_irq(ast_clpc->irq, ast_lpc_handler, IRQF_SHARED, + i2c_dev->adap.name, i2c_dev); + if (ret) { + printk(KERN_INFO "I2C: Failed request irq %d\n", i2c_dev->irq); + goto out_region; + } + + platform_set_drvdata(pdev, ast_clpc); + + printk(KERN_INFO "ast_adc: driver successfully loaded.\n"); + + return 0; + + +//out_irq: +// free_irq(ast_clpc->irq, NULL); +out_region: + release_mem_region(res->start, res->end - res->start + 1); +out_mem: + kfree(ast_clpc); +out: + printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret); + return ret; +} + +static int +ast_adc_remove(struct platform_device *pdev) +{ + int i=0; + struct ast_adc_data *ast_clpc = platform_get_drvdata(pdev); + struct resource *res; + printk(KERN_INFO "ast_adc: driver unloaded.\n"); + + hwmon_device_unregister(ast_clpc->hwmon_dev); + + for(i=0; i<5; i++) + sysfs_remove_group(&pdev->dev.kobj, &clpc_attribute_groups[i]); + + platform_set_drvdata(pdev, NULL); +// free_irq(ast_adc->irq, ast_adc); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iounmap(ast_clpc->reg_base); + release_mem_region(res->start, res->end - res->start + 1); + kfree(ast_clpc); + return 0; +} + +#ifdef CONFIG_PM +static int +ast_adc_suspend(struct platform_device *pdev, pm_message_t state) +{ + printk("ast_adc_suspend : TODO \n"); + return 0; +} + +static int +ast_adc_resume(struct platform_device *pdev) +{ + ast_adc_ctrl_init(); + return 0; +} + +#else +#define ast_adc_suspend NULL +#define ast_adc_resume NULL +#endif + +static struct platform_driver ast_adc_driver = { + .probe = ast_adc_probe, + .remove = __devexit_p(ast_adc_remove), + .suspend = ast_adc_suspend, + .resume = ast_adc_resume, + .driver = { + .name = "ast_adc", + .owner = THIS_MODULE, + }, +}; + +static int __init +ast_adc_init(void) +{ + return platform_driver_register(&ast_adc_driver); +} + +static void __exit +ast_adc_exit(void) +{ + platform_driver_unregister(&ast_adc_driver); +} + +module_init(ast_adc_init); +module_exit(ast_adc_exit); + +MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>"); +MODULE_DESCRIPTION("ADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/ast_pwm_fan.c b/drivers/hwmon/ast_pwm_fan.c new file mode 100644 index 000000000000..02784c5b9e1e --- /dev/null +++ b/drivers/hwmon/ast_pwm_fan.c @@ -0,0 +1,2129 @@ +/* + * ast_pwm_fan.c + * + * ASPEED PWM & Fan Tacho controller driver + * + * Copyright (C) 2012-2020 ASPEED Technology Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * History: + * 2012.08.06: Initial version [Ryan Chen] + */ +/* CLK sysfs +* 0 : enable +* 1 : clk_source */ + +/* PWM sysfs A~H (0~7) +* 0 - show/store enable +* 1 - show/store type +* 2 - show/store falling +* 3 - show/store rising */ + +/*PWM M/N/O Type sysfs +* 0 - show/store unit +* 1 - show/store division_l +* 2 - show/store division_h */ + +/* FAN sysfs (0~15) +* - show/store enable +* - show/store source +* - show/store rpm +* - show/store alarm +* - show/store alarm_en */ + +/* Fan M/N/O Type sysfs +* 0 - show/store enable +* 1 - show/store mode +* 2 - show/store unit +* 3 - show/store division +* 4 - show/store limit */ + +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/mutex.h> +#include <linux/hwmon-sysfs.h> +#include <linux/hwmon.h> +#include <linux/workqueue.h> +#include <linux/sysfs.h> +#include <linux/err.h> +#include <linux/slab.h> + +#include <asm/irq.h> +#include <asm/io.h> + +#ifdef CONFIG_COLDFIRE +#include <asm/arch/regs-pwm_fan.h> +#include <asm/arch/ast_pwm_techo.h> +#else +#include <plat/regs-pwm_fan.h> +#include <mach/ast_pwm_techo.h> +#endif + +//#define MCLK 1 + +struct ast_pwm_tacho_data { + struct device *hwmon_dev; + void __iomem *reg_base; /* virtual */ + int irq; + struct ast_pwm_driver_data *ast_pwm_data; +}; + +struct ast_pwm_tacho_data *ast_pwm_tacho; + +static u8 ast_get_pwm_type(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch); +static u8 ast_get_pwm_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch); +static u8 ast_get_tacho_type_division(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type); +static u16 ast_get_tacho_type_unit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type); +static u8 ast_get_pwm_clock_division_h(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type); +static u8 ast_get_pwm_clock_division_l(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type); +static u8 ast_get_pwm_clock_unit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type); + +static inline void +ast_pwm_tacho_write(struct ast_pwm_tacho_data *ast_pwm_tacho, u32 val, u32 reg) +{ +// printk("write offset: %x, val: %x \n",reg,val); + writel(val, ast_pwm_tacho->reg_base+ reg); +} + +static inline u32 +ast_pwm_tacho_read(struct ast_pwm_tacho_data *ast_pwm_tacho, u32 reg) +{ + u32 val = readl(ast_pwm_tacho->reg_base + reg); +// printk("read offset: %x, val: %x \n",reg,val); + return val; +} + +///////////////////////////////////////// +/* +//1. The PWM base clock = 24Mhz / (Clock_Division_H D[7:4] in PTCR04 * Clock_Division_L D[3:0] in PTCR04) +//2. The frequency of PWM = The PWM base clock / (PWM period D[15:8] in PTCR04 + 1) +//3. If you plan to output 25Khz PWM frequency and 10% step of duty cycle, we suggest to set 0x943 in PTCR04 register. +// The PWM frequency = 24Mhz / (16 * 6 * (9 + 1)) = 25Khz +// duty cycle settings in the PTCR08 register: +// 0x1e786008 D[15:0] = 0x0900, duty = 90% +// 0x1e786008 D[15:0] = 0x0902, duty = 70% +// . +// . +// . +// 0x1e786008 D[15:0] = 0x0908, duty = 10% +// 0x1e786008 D[15:0] = 0x0909, duty = 100% +// 0x1e786008 D[15:0] = 0x0000, duty = 100% + (falling) - (rising+1) /unit +*/ + +static void ast_pwm_taco_init(void) +{ + //Enable PWM TACH CLK ************************************************** + // Set M/N/O out is 25Khz + //The PWM frequency = 24Mhz / (16 * 6 * (9 + 1)) = 25Khz + ast_pwm_tacho_write(ast_pwm_tacho, 0x09430943, AST_PTCR_CLK_CTRL); + ast_pwm_tacho_write(ast_pwm_tacho, 0x0943, AST_PTCR_CLK_EXT_CTRL); + + //FULL SPEED at initialize 100% pwm A~H + ast_pwm_tacho_write(ast_pwm_tacho, 0x0, AST_PTCR_DUTY0_CTRL); + ast_pwm_tacho_write(ast_pwm_tacho, 0x0, AST_PTCR_DUTY1_CTRL); + ast_pwm_tacho_write(ast_pwm_tacho, 0x0, AST_PTCR_DUTY2_CTRL); + ast_pwm_tacho_write(ast_pwm_tacho, 0x0, AST_PTCR_DUTY3_CTRL); + + //Set TACO M/N/O initial unit 0x1000, falling , divide 4 , Enable + ast_pwm_tacho_write(ast_pwm_tacho, 0x10000001, AST_PTCR_TYPEM_CTRL0); + ast_pwm_tacho_write(ast_pwm_tacho, 0x10000001, AST_PTCR_TYPEN_CTRL0); +#ifdef PWM_TYPE_O + ast_pwm_tacho_write(ast_pwm_tacho, 0x10000001, AST_PTCR_TYPEO_CTRL0); +#endif + + // TACO measure period = 24000000 / 2 / 2 / 256 / 4096 / 1 (only enable 1 TACHO) = 5.72Hz, it means that software needs to + // wait at least 0.2 sec to get refreshed TACO value. If you will enable more TACO or require faster response, you have to + // control the clock divisor and the period to be smaller + + //Full Range to do measure unit 0x1000 + ast_pwm_tacho_write(ast_pwm_tacho, 0x10000000, AST_PTCR_TYPEM_CTRL1); + ast_pwm_tacho_write(ast_pwm_tacho, 0x10000000, AST_PTCR_TYPEN_CTRL1); +#ifdef PWM_TYPE_O + ast_pwm_tacho_write(ast_pwm_tacho, 0x10000000, AST_PTCR_TYPEO_CTRL1); +#endif + + //TACO Source Selection, PWMA for fan0~15 + ast_pwm_tacho_write(ast_pwm_tacho, 0x0, AST_PTCR_TACH_SOURCE); + ast_pwm_tacho_write(ast_pwm_tacho, 0x0, AST_PTCR_TACH_SOURCE_EXT); + + //PWM A~D -> Disable , type M, + //Tacho 0~15 Disable + //CLK source 24Mhz +#ifdef MCLK + ast_pwm_tacho_write(ast_pwm_tacho, AST_PTCR_CTRL_CLK_MCLK | AST_PTCR_CTRL_CLK_EN, AST_PTCR_CTRL); +#else + ast_pwm_tacho_write(ast_pwm_tacho, AST_PTCR_CTRL_CLK_EN, AST_PTCR_CTRL); +#endif + +} + +/*index 0 : clk_en , 1: clk_source*/ +static ssize_t +ast_store_clk(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + if ((input_val > 1) || (input_val < 0)) + return -EINVAL; + + //sensor_attr->index : tacho# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //clk_en + if(input_val) + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_CLK_EN, + AST_PTCR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~AST_PTCR_CTRL_CLK_EN, + AST_PTCR_CTRL); + break; + case 1: //clk_source + if(input_val) { + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_CLK_MCLK, + AST_PTCR_CTRL); + } else { + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~AST_PTCR_CTRL_CLK_MCLK, + AST_PTCR_CTRL); + } + break; + default: + return -EINVAL; + break; + } + + return count; + +} + + +static ssize_t +ast_show_clk(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); + + //sensor_attr->index : fan# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //clk_en + if(AST_PTCR_CTRL_CLK_EN & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)) + return sprintf(sysfsbuf, "1: Enable\n"); + else + return sprintf(sysfsbuf, "0: Disable\n"); + break; + case 1: //clk_source + if(AST_PTCR_CTRL_CLK_MCLK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)) + return sprintf(sysfsbuf, "1: MCLK \n"); + else + return sprintf(sysfsbuf, "0: 24Mhz\n"); + + break; + default: + return sprintf(sysfsbuf, "ERROR CLK Index\n"); + break; + } +} + +static u32 +ast_get_tacho_measure_period(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u32 clk,clk_unit,div_h,div_l,tacho_unit,tacho_div; + //TODO ... 266 + if(AST_PTCR_CTRL_CLK_MCLK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)) { + //TODO ..... + clk = ast_pwm_tacho->ast_pwm_data->get_pwm_clock(); + } else + clk = 24*1000*1000; + + clk_unit = ast_get_pwm_clock_unit(ast_pwm_tacho,pwm_type); + div_h = ast_get_pwm_clock_division_h(ast_pwm_tacho,pwm_type); + div_h = 0x1 << div_h; + div_l = ast_get_pwm_clock_division_l(ast_pwm_tacho,pwm_type); +// div_l = (div_l) << 1; + if(div_l == 0) + div_l = 1; + else + div_l = div_l * 2; + + tacho_unit = ast_get_tacho_type_unit(ast_pwm_tacho,pwm_type); + tacho_div = ast_get_tacho_type_division(ast_pwm_tacho,pwm_type); + + tacho_div = 0x4 << (tacho_div*2); +// printk("clk %d,clk_unit %d, div_h %d, div_l %d, tacho_unit %d, tacho_div %d\n",clk,clk_unit, div_h, div_l, tacho_unit, tacho_div); + return clk/(clk_unit*div_h*div_l*tacho_div*tacho_unit); +} + +static u8 +ast_get_tacho_type_division(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u32 tmp = 0; + switch(pwm_type) { + case PWM_TYPE_M: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("error type !! \n"); + break; + + } + + return ((tmp & TYPE_CTRL0_CLK_DIVISION_MASK) >> TYPE_CTRL0_CLK_DIVISION); +} + +static void +ast_set_tacho_type_division(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type, u32 division) +{ + u32 tmp = 0; + if(division > 0x7) + return; + + switch(pwm_type) { + case PWM_TYPE_M: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + + tmp &= ~TYPE_CTRL0_CLK_DIVISION_MASK; + tmp |= (division << TYPE_CTRL0_CLK_DIVISION); + + switch(pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + +} + +static u16 +ast_get_tacho_type_unit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u32 tmp = 0; + + switch(pwm_type) { + case PWM_TYPE_M: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + + return ((tmp & TYPE_CTRL0_FAN_PERIOD_MASK) >> TYPE_CTRL0_FAN_PERIOD); +} + +static void +ast_set_tacho_type_unit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type,u32 unit) +{ + u32 tmp = 0; + + if(unit > 0xffff) + return; + + switch(pwm_type) { + case PWM_TYPE_M: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + + tmp &= ~TYPE_CTRL0_FAN_PERIOD_MASK; + tmp |= (unit << TYPE_CTRL0_FAN_PERIOD); + + switch(pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + +} + +static u32 +ast_get_tacho_type_mode(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u32 tmp = 0; + + switch(pwm_type) { + case PWM_TYPE_M: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + + return ((tmp & TYPE_CTRL0_FAN_MODE_MASK) >> TYPE_CTRL0_FAN_MODE); +} + +static void +ast_set_tacho_type_mode(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type,u32 mode) +{ + u32 tmp = 0; + if(mode > 0x2) + return; + + switch(pwm_type) { + case PWM_TYPE_M: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + + tmp &= ~TYPE_CTRL0_FAN_MODE_MASK; + tmp |= (mode << TYPE_CTRL0_FAN_MODE); + + switch(pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEM_CTRL0); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEN_CTRL0); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_TYPEO_CTRL0); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + +} + +static u8 +ast_get_tacho_type_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u8 tmp; + switch(pwm_type) { + case PWM_TYPE_M: + tmp = (TYPE_CTRL0_FAN_TYPE_EN & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0)); + break; + case PWM_TYPE_N: + tmp = (TYPE_CTRL0_FAN_TYPE_EN & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0)); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = (TYPE_CTRL0_FAN_TYPE_EN & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0)); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + + return tmp; +} + +static void +ast_set_tacho_type_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type,u32 enable) +{ + switch(pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_CTRL0) | enable, + AST_PTCR_TYPEM_CTRL0); + + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_CTRL0) | enable, + AST_PTCR_TYPEN_CTRL0); + + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_CTRL0) | enable, + AST_PTCR_TYPEO_CTRL0); + + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } +} + +static u32 +ast_get_tacho_type_limit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + switch(pwm_type) { + case PWM_TYPE_M: + return (FAN_LIMIT_MASK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEM_LIMIT)); + break; + case PWM_TYPE_N: + return (FAN_LIMIT_MASK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEN_LIMIT)); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + return (FAN_LIMIT_MASK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TYPEO_LIMIT)); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } +} + +static void +ast_set_tacho_type_limit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type,u32 limit) +{ + if(limit > FAN_LIMIT_MASK) + return; + + switch(pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, limit, AST_PTCR_TYPEM_LIMIT); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, limit, AST_PTCR_TYPEN_LIMIT); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, limit, AST_PTCR_TYPEO_LIMIT); + break; +#endif + default: + printk("ERROR type !! \n"); + break; + } + +} + +static u8 +ast_get_tacho_alarm_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch) +{ + //tacho source + if( ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_INTR_CTRL) & INTR_CTRL_EN_NUM(tacho_ch)) + return 1; + else + return 0; +} + +static void +ast_set_tacho_alarm_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch, u8 enable) +{ + //tacho source + if(enable == 1) + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_INTR_CTRL) | INTR_CTRL_EN_NUM(tacho_ch), + AST_PTCR_INTR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_INTR_CTRL) & ~(INTR_CTRL_EN_NUM(tacho_ch)), + AST_PTCR_INTR_CTRL); +} + +static u8 +ast_get_tacho_alarm(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch) +{ + //tacho source + if(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_INTR_STS) & INTR_CTRL_NUM(tacho_ch)) + return 1; + else + return 0; +} + +static u8 +ast_get_tacho_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch) +{ + if(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & AST_PTCR_CTRL_FAN_NUM_EN(tacho_ch)) + return 1; + else + return 0; +} + +static void +ast_set_tacho_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch, u8 enable) +{ + //tacho number enable + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_FAN_NUM_EN(tacho_ch), + AST_PTCR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~(AST_PTCR_CTRL_FAN_NUM_EN(tacho_ch)), + AST_PTCR_CTRL); +} + +static u8 +ast_get_tacho_source(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch) +{ + u32 tmp1, tmp2; + + //tacho source + tmp1 = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TACH_SOURCE); + tmp1 &= TACH_PWM_SOURCE_MASK_BIT01(tacho_ch); + tmp1 = tmp1 >> (TACH_PWM_SOURCE_BIT01(tacho_ch)); + + tmp2 = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TACH_SOURCE_EXT); + tmp2 &= TACH_PWM_SOURCE_MASK_BIT2(tacho_ch); + tmp2 = tmp2 >> (TACH_PWM_SOURCE_BIT2(tacho_ch)); + tmp2 = tmp2 << 2; + + return (tmp2 | tmp1); +} + +static void +ast_set_tacho_source(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch, u8 tacho_source) +{ + u32 tmp1, tmp2; + if(tacho_source > 7) + return; + + //tacho source + tmp1 = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TACH_SOURCE); + tmp1 &= ~(TACH_PWM_SOURCE_MASK_BIT01(tacho_ch)); + tmp1 |= ((tacho_source &0x3) << (TACH_PWM_SOURCE_BIT01(tacho_ch))); + + tmp2 = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_TACH_SOURCE_EXT); + tmp2 &= ~(TACH_PWM_SOURCE_MASK_BIT2(tacho_ch)); + tmp2 |= (((tacho_source &0x4)>>2) << (TACH_PWM_SOURCE_BIT2(tacho_ch))); + + ast_pwm_tacho_write(ast_pwm_tacho, tmp1, AST_PTCR_TACH_SOURCE); + ast_pwm_tacho_write(ast_pwm_tacho, tmp2, AST_PTCR_TACH_SOURCE_EXT); + +} + +static u32 +ast_get_tacho_rpm(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 tacho_ch) +{ + u32 raw_data, rpm, tacho_clk_div, clk_source, timeout=0; + u8 tacho_source, pwm_type,tacho_type_en; + + if(!(ast_get_tacho_en(ast_pwm_tacho,tacho_ch))) + return 0; + + //write 0 + ast_pwm_tacho_write(ast_pwm_tacho, 0, AST_PTCR_TRIGGER); + + //write 1 + ast_pwm_tacho_write(ast_pwm_tacho, 0x1 << tacho_ch, AST_PTCR_TRIGGER); + + tacho_source = ast_get_tacho_source(ast_pwm_tacho, tacho_ch); + pwm_type = ast_get_pwm_type(ast_pwm_tacho, tacho_source); + tacho_type_en = ast_get_tacho_type_en(ast_pwm_tacho, pwm_type); + +// printk("source: %d,type: %d,en: %d \n",tacho_source,pwm_type,tacho_type_en); + + //check pwm_type and get clock division + if(!tacho_type_en) + return 0; + + //Wait ready + while(!(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_RESULT) & (0x1 << RESULT_STATUS))) { + timeout++; + if(timeout > 25) + return 0; + }; + + raw_data = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_RESULT)& RESULT_VALUE_MASK; + tacho_clk_div = ast_get_tacho_type_division(ast_pwm_tacho, pwm_type); + +// printk("raw div = %d \n",tacho_clk_div); + + tacho_clk_div = 0x4 << (tacho_clk_div*2); +// printk("raw div = %d \n",tacho_clk_div); + + //TODO 166 + if(AST_PTCR_CTRL_CLK_MCLK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)) + clk_source = 166*1000*1000; + else + clk_source = 24*1000*1000; + + printk("raw_data %d, clk_source %d, tacho_clk_div %d \n",raw_data, clk_source, tacho_clk_div); + rpm = (clk_source * 60) / (2 * raw_data * tacho_clk_div); + + return rpm; +} + +static u8 +ast_get_pwm_clock_division_h(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u8 tmp=0; + + switch (pwm_type) { + case PWM_TYPE_M: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & AST_PTCR_CLK_CTRL_TYPEM_H_MASK) >> AST_PTCR_CLK_CTRL_TYPEM_H; + break; + case PWM_TYPE_N: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & AST_PTCR_CLK_CTRL_TYPEN_H_MASK) >> AST_PTCR_CLK_CTRL_TYPEN_H; + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_EXT_CTRL) & AST_PTCR_CLK_CTRL_TYPEO_H_MASK) >> AST_PTCR_CLK_CTRL_TYPEO_H; + break; +#endif + default: + printk("error channel ast_get_pwm_clock_division_h %d \n",pwm_type); + break; + } + return tmp; +} + +static void +ast_set_pwm_clock_division_h(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type, u8 div_high) +{ + if(div_high > 0xf) + return; + switch (pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEM_H_MASK) | (div_high << AST_PTCR_CLK_CTRL_TYPEM_H), + AST_PTCR_CLK_CTRL); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEN_H_MASK) | (div_high << AST_PTCR_CLK_CTRL_TYPEN_H), + AST_PTCR_CLK_CTRL); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_EXT_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEO_H_MASK) | (div_high << AST_PTCR_CLK_CTRL_TYPEO_H), + AST_PTCR_CLK_EXT_CTRL); + break; +#endif + default: + printk("error channel ast_get_pwm_type %d \n",pwm_type); + break; + } + +} + +static u8 +ast_get_pwm_clock_division_l(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u8 tmp=0; + + switch (pwm_type) { + case PWM_TYPE_M: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & AST_PTCR_CLK_CTRL_TYPEM_L_MASK) >> AST_PTCR_CLK_CTRL_TYPEM_L; + break; + case PWM_TYPE_N: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & AST_PTCR_CLK_CTRL_TYPEN_L_MASK) >> AST_PTCR_CLK_CTRL_TYPEN_L; + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_EXT_CTRL) & AST_PTCR_CLK_CTRL_TYPEO_L_MASK) >> AST_PTCR_CLK_CTRL_TYPEO_L; + break; +#endif + default: + printk("error channel ast_get_pwm_clock_division_l %d \n",pwm_type); + break; + } + return tmp; +} + +static void +ast_set_pwm_clock_division_l(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type, u8 div_low) +{ + if(div_low> 0xf) + return; + switch (pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEM_L_MASK) | (div_low << AST_PTCR_CLK_CTRL_TYPEM_L), + AST_PTCR_CLK_CTRL); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEN_L_MASK) | (div_low << AST_PTCR_CLK_CTRL_TYPEN_L), + AST_PTCR_CLK_CTRL); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_EXT_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEO_L_MASK) | (div_low << AST_PTCR_CLK_CTRL_TYPEO_L), + AST_PTCR_CLK_EXT_CTRL); + break; +#endif + default: + printk("error channel ast_get_pwm_type %d \n",pwm_type); + break; + } +} + +static u8 +ast_get_pwm_clock_unit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u8 tmp=0; + + switch (pwm_type) { + case PWM_TYPE_M: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & AST_PTCR_CLK_CTRL_TYPEM_UNIT_MASK) >> AST_PTCR_CLK_CTRL_TYPEM_UNIT; + break; + case PWM_TYPE_N: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & AST_PTCR_CLK_CTRL_TYPEN_UNIT_MASK) >> AST_PTCR_CLK_CTRL_TYPEN_UNIT; + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_EXT_CTRL) & AST_PTCR_CLK_CTRL_TYPEO_UNIT_MASK) >> AST_PTCR_CLK_CTRL_TYPEO_UNIT; + break; +#endif + default: + printk("error channel ast_get_pwm_clock_unit %d \n",pwm_type); + break; + } + return tmp; +} + +static void +ast_set_pwm_clock_unit(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type, u8 unit) +{ + if(unit > 0xff) + return; + switch (pwm_type) { + case PWM_TYPE_M: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEM_UNIT_MASK) | (unit << AST_PTCR_CLK_CTRL_TYPEM_UNIT), + AST_PTCR_CLK_CTRL); + break; + case PWM_TYPE_N: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEN_UNIT_MASK) | (unit << AST_PTCR_CLK_CTRL_TYPEN_UNIT), + AST_PTCR_CLK_CTRL); + break; +#ifdef PWM_TYPE_O + case PWM_TYPE_O: + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CLK_EXT_CTRL) & ~AST_PTCR_CLK_CTRL_TYPEO_UNIT_MASK) | (unit << AST_PTCR_CLK_CTRL_TYPEO_UNIT), + AST_PTCR_CLK_EXT_CTRL); + break; +#endif + default: + printk("error channel ast_get_pwm_type %d \n",pwm_type); + break; + } +} + +static u32 +ast_get_pwm_clock(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_type) +{ + u32 unit, div_low, div_high, clk_source; + + unit = ast_get_pwm_clock_unit(ast_pwm_tacho,pwm_type); + + div_high = ast_get_pwm_clock_division_h(ast_pwm_tacho,pwm_type); + div_high = (0x1<<div_high); + + div_low = ast_get_pwm_clock_division_l(ast_pwm_tacho,pwm_type); + if(div_low == 0) + div_low = 1; + else + div_low = div_low*2; + //TODO 266 + + if(AST_PTCR_CTRL_CLK_MCLK & ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)) + clk_source = ast_pwm_tacho->ast_pwm_data->get_pwm_clock(); + else + clk_source = 24*1000*1000; + +// printk("%d, %d, %d, %d \n",clk_source,div_high,div_low,unit); + return (clk_source/(div_high*div_low*(unit+1))); +} + +static u8 +ast_get_pwm_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch) +{ + u8 tmp=0; + + switch (pwm_ch) { + case PWMA: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & AST_PTCR_CTRL_PMWA_EN) >> AST_PTCR_CTRL_PMWA; + break; + case PWMB: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & AST_PTCR_CTRL_PMWB_EN) >> AST_PTCR_CTRL_PMWB; + break; + case PWMC: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & AST_PTCR_CTRL_PMWC_EN) >> AST_PTCR_CTRL_PMWC; + break; + case PWMD: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & AST_PTCR_CTRL_PMWD_EN) >> AST_PTCR_CTRL_PMWD; + break; + case PWME: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & AST_PTCR_CTRL_PMWE_EN) >> AST_PTCR_CTRL_PMWE; + break; + case PWMF: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & AST_PTCR_CTRL_PMWF_EN) >> AST_PTCR_CTRL_PMWF; + break; + case PWMG: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & AST_PTCR_CTRL_PMWG_EN) >> AST_PTCR_CTRL_PMWG; + break; + case PWMH: + tmp = (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & AST_PTCR_CTRL_PMWH_EN) >> AST_PTCR_CTRL_PMWH; + break; + default: + printk("error channel ast_get_pwm_type %d \n",pwm_ch); + break; + } + + return tmp; + +} + +static void +ast_set_pwm_en(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch, u8 enable) +{ + switch (pwm_ch) { + case PWMA: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_PMWA_EN, + AST_PTCR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~AST_PTCR_CTRL_PMWA_EN, + AST_PTCR_CTRL); + + break; + case PWMB: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_PMWB_EN), + AST_PTCR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~AST_PTCR_CTRL_PMWB_EN), + AST_PTCR_CTRL); + break; + case PWMC: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_PMWC_EN), + AST_PTCR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~AST_PTCR_CTRL_PMWC_EN), + AST_PTCR_CTRL); + + break; + case PWMD: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) | AST_PTCR_CTRL_PMWD_EN), + AST_PTCR_CTRL); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL) & ~AST_PTCR_CTRL_PMWD_EN), + AST_PTCR_CTRL); + + break; + case PWME: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) | AST_PTCR_CTRL_PMWE_EN), + AST_PTCR_CTRL_EXT); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & ~AST_PTCR_CTRL_PMWE_EN), + AST_PTCR_CTRL_EXT); + + break; + case PWMF: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) | AST_PTCR_CTRL_PMWF_EN), + AST_PTCR_CTRL_EXT); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & ~AST_PTCR_CTRL_PMWF_EN), + AST_PTCR_CTRL_EXT); + + break; + case PWMG: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) | AST_PTCR_CTRL_PMWG_EN), + AST_PTCR_CTRL_EXT); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & ~AST_PTCR_CTRL_PMWG_EN), + AST_PTCR_CTRL_EXT); + + break; + case PWMH: + if(enable) + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) | AST_PTCR_CTRL_PMWH_EN), + AST_PTCR_CTRL_EXT); + else + ast_pwm_tacho_write(ast_pwm_tacho, + (ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT) & ~AST_PTCR_CTRL_PMWH_EN), + AST_PTCR_CTRL_EXT); + + break; + default: + printk("error channel ast_get_pwm_type %d \n",pwm_ch); + break; + } +} + +static u8 +ast_get_pwm_type(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch) +{ + u8 tmp=0; + + switch (pwm_ch) { + case PWMA: + tmp = AST_PTCR_CTRL_GET_PWMA_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)); + break; + case PWMB: + tmp = AST_PTCR_CTRL_GET_PWMB_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)); + break; + case PWMC: + tmp = AST_PTCR_CTRL_GET_PWMC_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)); + break; + case PWMD: + tmp = AST_PTCR_CTRL_GET_PWMD_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL)); + break; + case PWME: + tmp = AST_PTCR_CTRL_GET_PWME_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT)); + break; + case PWMF: + tmp = AST_PTCR_CTRL_GET_PWMF_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT)); + break; + case PWMG: + tmp = AST_PTCR_CTRL_GET_PWMG_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT)); + break; + case PWMH: + tmp = AST_PTCR_CTRL_GET_PWMH_TYPE(ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT)); + break; + default: + printk("error channel ast_get_pwm_type %d \n",pwm_ch); + break; + } + + return tmp; +} + +static void +ast_set_pwm_type(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch, u8 type) +{ + u32 tmp1,tmp2; + + if(type > 0x2) + return; + + tmp1 = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL); + tmp2 = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_CTRL_EXT); + + switch (pwm_ch) { + case PWMA: + tmp1 &= ~AST_PTCR_CTRL_SET_PWMA_TYPE_MASK; + tmp1 |= AST_PTCR_CTRL_SET_PWMA_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp1, AST_PTCR_CTRL); + break; + case PWMB: + tmp1 &= ~AST_PTCR_CTRL_SET_PWMB_TYPE_MASK; + tmp1 |= AST_PTCR_CTRL_SET_PWMB_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp1, AST_PTCR_CTRL); + break; + case PWMC: + tmp1 &= ~AST_PTCR_CTRL_SET_PWMC_TYPE_MASK; + tmp1 |= AST_PTCR_CTRL_SET_PWMC_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp1, AST_PTCR_CTRL); + break; + case PWMD: + tmp1 &= ~AST_PTCR_CTRL_SET_PWMD_TYPE_MASK; + tmp1 |= AST_PTCR_CTRL_SET_PWMD_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp1, AST_PTCR_CTRL); + break; + case PWME: + tmp2 &= ~AST_PTCR_CTRL_SET_PWME_TYPE_MASK; + tmp2 |= AST_PTCR_CTRL_SET_PWME_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp2, AST_PTCR_CTRL_EXT); + break; + case PWMF: + tmp2 &= ~AST_PTCR_CTRL_SET_PWMF_TYPE_MASK; + tmp2 |= AST_PTCR_CTRL_SET_PWMF_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp2, AST_PTCR_CTRL_EXT); + break; + case PWMG: + tmp2 &= ~AST_PTCR_CTRL_SET_PWMG_TYPE_MASK; + tmp2 |= AST_PTCR_CTRL_SET_PWMG_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp2, AST_PTCR_CTRL_EXT); + break; + case PWMH: + tmp2 &= ~AST_PTCR_CTRL_SET_PWMH_TYPE_MASK; + tmp2 |= AST_PTCR_CTRL_SET_PWMH_TYPE(type); + ast_pwm_tacho_write(ast_pwm_tacho, tmp2, AST_PTCR_CTRL_EXT); + break; + default: + printk("error channel %d \n",pwm_ch); + break; + } +} + +// PWM DUTY +static u8 +ast_get_pwm_duty_rising(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch) +{ + u32 tmp=0; + switch (pwm_ch) { + case PWMA: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= DUTY_CTRL0_PWMA_RISE_POINT_MASK; + break; + case PWMB: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= DUTY_CTRL0_PWMB_RISE_POINT_MASK; + tmp = (tmp >> DUTY_CTRL0_PWMB_RISE_POINT); + break; + case PWMC: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= DUTY_CTRL1_PWMC_RISE_POINT_MASK; + break; + case PWMD: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= DUTY_CTRL1_PWMD_RISE_POINT_MASK; + tmp = (tmp >> DUTY_CTRL1_PWMD_RISE_POINT); + break; + case PWME: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= DUTY_CTRL2_PWME_RISE_POINT_MASK; + break; + case PWMF: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= DUTY_CTRL2_PWMF_RISE_POINT_MASK; + tmp = (tmp >> DUTY_CTRL2_PWMF_RISE_POINT); + break; + case PWMG: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= DUTY_CTRL3_PWMG_RISE_POINT_MASK; + break; + case PWMH: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= DUTY_CTRL3_PWMH_RISE_POINT_MASK; + tmp = (tmp >> DUTY_CTRL3_PWMH_RISE_POINT); + break; + default: + printk("error pwm channel %d with duty R \n",pwm_ch); + break; + } + + return tmp; +} + +static void +ast_set_pwm_duty_rising(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch, u8 rising) +{ + u32 tmp=0; + u32 pwm_type = ast_get_pwm_type(ast_pwm_tacho,pwm_ch); + + if((rising > 0xff) || (rising > ast_get_pwm_clock_unit(ast_pwm_tacho,pwm_type))) + return; + + switch (pwm_ch) { + case PWMA: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= ~DUTY_CTRL0_PWMA_RISE_POINT_MASK; + tmp |= rising; + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY0_CTRL); + break; + case PWMB: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= ~DUTY_CTRL0_PWMB_RISE_POINT_MASK; + tmp |= (rising << DUTY_CTRL0_PWMB_RISE_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY0_CTRL); + break; + case PWMC: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= ~DUTY_CTRL1_PWMC_RISE_POINT_MASK; + tmp |= rising; + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY1_CTRL); + break; + case PWMD: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= ~DUTY_CTRL1_PWMD_RISE_POINT_MASK; + tmp |= (rising << DUTY_CTRL1_PWMD_RISE_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY1_CTRL); + break; + case PWME: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= ~DUTY_CTRL2_PWME_RISE_POINT_MASK; + tmp |= rising; + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY2_CTRL); + break; + case PWMF: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= ~DUTY_CTRL2_PWMF_RISE_POINT_MASK; + tmp |= (rising << DUTY_CTRL2_PWMF_RISE_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY2_CTRL); + break; + case PWMG: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= ~DUTY_CTRL3_PWMG_RISE_POINT_MASK; + tmp |= rising; + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY3_CTRL); + break; + case PWMH: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= ~DUTY_CTRL3_PWMH_RISE_POINT_MASK; + tmp |= (rising << DUTY_CTRL3_PWMH_RISE_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY3_CTRL); + break; + + default: + printk("error pwm channel %d with duty \n",pwm_ch); + break; + } +} + +static u8 +ast_get_pwm_duty_falling(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch) +{ + u32 tmp=0; + switch (pwm_ch) { + case PWMA: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= DUTY_CTRL0_PWMA_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL0_PWMA_FALL_POINT); + break; + case PWMB: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= DUTY_CTRL0_PWMB_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL0_PWMB_FALL_POINT); + break; + case PWMC: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= DUTY_CTRL1_PWMC_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL1_PWMC_FALL_POINT); + break; + case PWMD: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= DUTY_CTRL1_PWMD_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL1_PWMD_FALL_POINT); + break; + case PWME: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= DUTY_CTRL2_PWME_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL2_PWME_FALL_POINT); + break; + case PWMF: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= DUTY_CTRL2_PWMF_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL2_PWMF_FALL_POINT); + break; + case PWMG: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= DUTY_CTRL3_PWMG_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL3_PWMG_FALL_POINT); + break; + case PWMH: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= DUTY_CTRL3_PWMH_FALL_POINT_MASK; + tmp = (tmp >> DUTY_CTRL3_PWMH_FALL_POINT); + break; + + default: + printk("error pwm channel %d with duty F \n",pwm_ch); + break; + } + + return tmp; +} + +static void +ast_set_pwm_duty_falling(struct ast_pwm_tacho_data *ast_pwm_tacho, u8 pwm_ch, u8 falling) +{ + u32 tmp =0; + u32 pwm_type = ast_get_pwm_type(ast_pwm_tacho,pwm_ch); + + if((falling > 0xff) || (falling > ast_get_pwm_clock_unit(ast_pwm_tacho,pwm_type))) + return; + + switch (pwm_ch) { + case PWMA: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= ~DUTY_CTRL0_PWMA_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL0_PWMA_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY0_CTRL); + break; + case PWMB: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY0_CTRL); + tmp &= ~DUTY_CTRL0_PWMB_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL0_PWMB_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY0_CTRL); + break; + case PWMC: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= ~DUTY_CTRL1_PWMC_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL1_PWMC_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY1_CTRL); + break; + case PWMD: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY1_CTRL); + tmp &= ~DUTY_CTRL1_PWMD_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL1_PWMD_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY1_CTRL); + break; + case PWME: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= ~DUTY_CTRL2_PWME_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL2_PWME_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY2_CTRL); + break; + case PWMF: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY2_CTRL); + tmp &= ~DUTY_CTRL2_PWMF_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL2_PWMF_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY2_CTRL); + break; + case PWMG: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= ~DUTY_CTRL3_PWMG_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL3_PWMG_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY3_CTRL); + break; + case PWMH: + tmp = ast_pwm_tacho_read(ast_pwm_tacho, AST_PTCR_DUTY3_CTRL); + tmp &= ~DUTY_CTRL3_PWMH_FALL_POINT_MASK; + tmp |= (falling << DUTY_CTRL3_PWMH_FALL_POINT); + ast_pwm_tacho_write(ast_pwm_tacho, tmp, AST_PTCR_DUTY3_CTRL); + break; + + default: + printk("error pwm channel %d with duty \n",pwm_ch); + break; + } + +} + +/*PWM M/N/O Type sysfs*/ +/* + * Macro defining SENSOR_DEVICE_ATTR for a pwm sysfs entries. + * 0 - show/store unit + * 1 - show/store division_l + * 2 - show/store division_h + */ + +static ssize_t +ast_show_pwm_type_clock(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + + //sensor_attr->index : M/N/O# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //unit : 0~256 + return sprintf(sysfsbuf, "%d (0~255)\n", ast_get_pwm_clock_unit(ast_pwm_tacho,sensor_attr->index)); + break; + case 1: //division_l + return sprintf(sysfsbuf, "%d (0~15) \n", ast_get_pwm_clock_division_l(ast_pwm_tacho,sensor_attr->index)); + break; + case 2: //division_h + return sprintf(sysfsbuf, "%d (0~15) \n", ast_get_pwm_clock_division_h(ast_pwm_tacho,sensor_attr->index)); + + break; + case 3: //expect clock + + return sprintf(sysfsbuf, "%d \n", ast_get_pwm_clock(ast_pwm_tacho,sensor_attr->index)); + + break; + + default: + return -EINVAL; + break; + } + + return sprintf(sysfsbuf, "%d : %d\n", sensor_attr->nr,sensor_attr->index); + + +} + +static ssize_t +ast_store_pwm_type_clock(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + switch(sensor_attr->nr) + { + case 0: //unit : 0~256 + ast_set_pwm_clock_unit(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 1: //division_l + ast_set_pwm_clock_division_l(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 2: //division_h + ast_set_pwm_clock_division_h(ast_pwm_tacho, sensor_attr->index, input_val); + break; + default: + return -EINVAL; + break; + } + + return count; +} + +/* attr + * 0 - show/store enable + * 1 - show/store type + * 2 - show/store falling + * 3 - show/store rising */ +static ssize_t +ast_show_pwm_speed(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + return sprintf(sysfsbuf, "%d : %s\n", ast_get_pwm_en(ast_pwm_tacho,sensor_attr->index),ast_get_pwm_en(ast_pwm_tacho,sensor_attr->index) ? "Enable":"Disable"); + break; + case 1: //pwm type M/N/O + return sprintf(sysfsbuf, "%d (0:M/1:N/2:O)\n",ast_get_pwm_type(ast_pwm_tacho, sensor_attr->index)); + break; + case 2: //rising + return sprintf(sysfsbuf, "%x : unit limit (0~%d)\n",ast_get_pwm_duty_rising(ast_pwm_tacho, sensor_attr->index), + ast_get_pwm_clock_unit(ast_pwm_tacho, ast_get_pwm_type(ast_pwm_tacho, sensor_attr->index))); + break; + case 3: //falling + return sprintf(sysfsbuf, "%x : unit limit (0~%d)\n",ast_get_pwm_duty_falling(ast_pwm_tacho, sensor_attr->index), + ast_get_pwm_clock_unit(ast_pwm_tacho, ast_get_pwm_type(ast_pwm_tacho, sensor_attr->index))); + break; + default: + return -EINVAL; + break; + } +} + +static ssize_t +ast_store_pwm_speed(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + ast_set_pwm_en(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 1: //pwm type M/N/O + ast_set_pwm_type(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 2: //rising + ast_set_pwm_duty_rising(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 3: //falling + ast_set_pwm_duty_falling(ast_pwm_tacho, sensor_attr->index, input_val); + break; + default: + return -EINVAL; + break; + } + + return count; +} + +/* Fan Type */ +/* Fan M/N/O Type sysfs + * Macro defining SENSOR_DEVICE_ATTR for a pwm sysfs entries. + * 0 - show/store enable + * 1 - show/store mode + * 2 - show/store unit + * 3 - show/store division + * 4 - show/store limit + */ + +static ssize_t +ast_show_tacho_type(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + //sensor_attr->index : M/N/O + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + return sprintf(sysfsbuf, "%d : %s\n", ast_get_tacho_type_en(ast_pwm_tacho,sensor_attr->index),ast_get_tacho_type_en(ast_pwm_tacho,sensor_attr->index) ? "Enable":"Disable"); + break; + case 1: //fan tacho mode + if(ast_get_tacho_type_mode(ast_pwm_tacho, sensor_attr->index) == FALL_EDGE) + return sprintf(sysfsbuf, "0: falling\n"); + else if(ast_get_tacho_type_mode(ast_pwm_tacho, sensor_attr->index) == RISE_EDGE) + return sprintf(sysfsbuf, "1: rising\n"); + else if (ast_get_tacho_type_mode(ast_pwm_tacho, sensor_attr->index) == BOTH_EDGE) + return sprintf(sysfsbuf, "2: both\n"); + else + return sprintf(sysfsbuf, "3: unknown\n"); + break; + case 2: //unit + return sprintf(sysfsbuf, "%d (0~65535)\n",ast_get_tacho_type_unit(ast_pwm_tacho, sensor_attr->index)); + + break; + case 3: //division + return sprintf(sysfsbuf, "%d (0~7) \n",ast_get_tacho_type_division(ast_pwm_tacho, sensor_attr->index)); + break; + case 4: //limit + return sprintf(sysfsbuf, "%d (0~1048575)\n",ast_get_tacho_type_limit(ast_pwm_tacho, sensor_attr->index)); + break; + case 5: //measure period + return sprintf(sysfsbuf, "%d \n",ast_get_tacho_measure_period(ast_pwm_tacho, sensor_attr->index)); + break; + default: + return -EINVAL; + break; + } +} + +static ssize_t +ast_store_tacho_type(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + ast_set_tacho_type_en(ast_pwm_tacho,sensor_attr->index, input_val); + break; + case 1: //fan tacho mode + ast_set_tacho_type_mode(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 2: //unit + ast_set_tacho_type_unit(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 3: //division + ast_set_tacho_type_division(ast_pwm_tacho, sensor_attr->index, input_val); + break; + case 4: //limit + ast_set_tacho_type_limit(ast_pwm_tacho, sensor_attr->index, input_val); + break; + default: + return -EINVAL; + break; + } + return count; + +} + +/* fan detect */ +/* FAN sysfs + * Macro defining SENSOR_DEVICE_ATTR for a tacho sysfs entries. + * - show/store enable + * - show/store source + * - show/store rpm + * - show/store alarm + * - show/store alarm_en +*/ +static ssize_t +ast_show_tacho_speed(struct device *dev, struct device_attribute *attr, char *sysfsbuf) +{ + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + //sensor_attr->index : pwm_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + return sprintf(sysfsbuf, "%d : %s\n", ast_get_tacho_en(ast_pwm_tacho,sensor_attr->index),ast_get_tacho_en(ast_pwm_tacho,sensor_attr->index) ? "Enable":"Disable"); + break; + case 1: //tacho source PWMA~H - 0~7 + return sprintf(sysfsbuf, "PWM%d (0~7)\n", ast_get_tacho_source(ast_pwm_tacho,sensor_attr->index)); + break; + case 2: //rpm + return sprintf(sysfsbuf, "%d \n", ast_get_tacho_rpm(ast_pwm_tacho,sensor_attr->index)); + break; + case 3: //alarm + return sprintf(sysfsbuf, "%d \n", ast_get_tacho_alarm(ast_pwm_tacho,sensor_attr->index)); + break; + case 4: //alarm_en + return sprintf(sysfsbuf, "%d : %s\n", + ast_get_tacho_alarm_en(ast_pwm_tacho,sensor_attr->index), + ast_get_tacho_alarm_en(ast_pwm_tacho,sensor_attr->index) ? "Enable":"Disable"); + break; + default: + return -EINVAL; + break; + } + +} + +static ssize_t +ast_store_tacho_speed(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) +{ + u32 input_val; + struct sensor_device_attribute_2 *sensor_attr = + to_sensor_dev_attr_2(attr); + + input_val = simple_strtoul(sysfsbuf, NULL, 10); + + + //sensor_attr->index : tacho_ch# + //sensor_attr->nr : attr# + switch(sensor_attr->nr) + { + case 0: //enable, disable + ast_set_tacho_en(ast_pwm_tacho,sensor_attr->index,input_val); + break; + case 1: //tacho source PWMA~H - 0~7 + ast_set_tacho_source(ast_pwm_tacho,sensor_attr->index,input_val); + break; + case 2: //rpm + return -EINVAL; + break; + case 3: //alarm + return -EINVAL; + break; + case 4: //alarm_en + ast_set_tacho_alarm_en(ast_pwm_tacho,sensor_attr->index,input_val); + break; + default: + return -EINVAL; + break; + } + return count; +} + +/* + * sysfs attributes + */ +/* CLK sysfs*/ +static SENSOR_DEVICE_ATTR_2(clk_en, S_IRUGO | S_IWUSR, ast_show_clk, ast_store_clk, 0, 0); +static SENSOR_DEVICE_ATTR_2(clk_source, S_IRUGO | S_IWUSR, ast_show_clk, ast_store_clk, 1, 0); + + +static struct attribute *clk_attributes[] = { + &sensor_dev_attr_clk_source.dev_attr.attr, + &sensor_dev_attr_clk_en.dev_attr.attr, + NULL +}; + +static const struct attribute_group clk_attribute_groups = { + .attrs = clk_attributes, +}; + +/*PWM M/N/O Type sysfs*/ +/* + * Macro defining SENSOR_DEVICE_ATTR for a pwm sysfs entries. + * 0 - show/store unit + * 1 - show/store division_l + * 2 - show/store division_h + */ + +#define sysfs_pwm_type(type,index) \ +static SENSOR_DEVICE_ATTR_2(pwm_type_##type##_unit, S_IRUGO | S_IWUSR, \ + ast_show_pwm_type_clock, ast_store_pwm_type_clock, 0, index); \ +\ +static SENSOR_DEVICE_ATTR_2(pwm_type_##type##_division_l, S_IRUGO | S_IWUSR, \ + ast_show_pwm_type_clock, ast_store_pwm_type_clock, 1, index); \ +\ +static SENSOR_DEVICE_ATTR_2(pwm_type_##type##_division_h, S_IRUGO | S_IWUSR, \ + ast_show_pwm_type_clock, ast_store_pwm_type_clock, 2, index); \ +\ +static SENSOR_DEVICE_ATTR_2(pwm_type_##type##_clk, S_IRUGO, \ + ast_show_pwm_type_clock, NULL, 3, index); \ +\ +static struct attribute *pwm_type_##type##_attributes[] = { \ + &sensor_dev_attr_pwm_type_##type##_unit.dev_attr.attr, \ + &sensor_dev_attr_pwm_type_##type##_division_l.dev_attr.attr, \ + &sensor_dev_attr_pwm_type_##type##_division_h.dev_attr.attr, \ + &sensor_dev_attr_pwm_type_##type##_clk.dev_attr.attr, \ + NULL \ +}; + +/* + * Create the needed functions for each pwm using the macro defined above + * (4 pwms are supported) + */ +sysfs_pwm_type(m,0); +sysfs_pwm_type(n,1); +#ifdef PWM_TYPE_O +sysfs_pwm_type(o,2); +#endif + +static const struct attribute_group pwm_type_attribute_groups[] = { + { .attrs = pwm_type_m_attributes }, + { .attrs = pwm_type_n_attributes }, +#ifdef PWM_TYPE_O + { .attrs = pwm_type_o_attributes }, +#endif +}; + +/* PWM sysfs + * Macro defining SENSOR_DEVICE_ATTR for a pwm sysfs entries. + * 0 - show/store enable + * 1 - show/store type + * 2 - show/store rising + * 3 - show/store falling + */ + +#define sysfs_pwm_speeds_num(index) \ +static SENSOR_DEVICE_ATTR_2(pwm##index##_en, S_IRUGO | S_IWUSR, \ + ast_show_pwm_speed, ast_store_pwm_speed, 0, index); \ +\ +static SENSOR_DEVICE_ATTR_2(pwm##index##_type, S_IRUGO | S_IWUSR, \ + ast_show_pwm_speed, ast_store_pwm_speed, 1, index); \ +\ +static SENSOR_DEVICE_ATTR_2(pwm##index##_rising, S_IRUGO | S_IWUSR, \ + ast_show_pwm_speed, ast_store_pwm_speed, 2, index); \ +\ +static SENSOR_DEVICE_ATTR_2(pwm##index##_falling, S_IRUGO | S_IWUSR, \ + ast_show_pwm_speed, ast_store_pwm_speed, 3, index); \ +\ +static struct attribute *pwm##index##_attributes[] = { \ + &sensor_dev_attr_pwm##index##_en.dev_attr.attr, \ + &sensor_dev_attr_pwm##index##_type.dev_attr.attr, \ + &sensor_dev_attr_pwm##index##_rising.dev_attr.attr, \ + &sensor_dev_attr_pwm##index##_falling.dev_attr.attr, \ + NULL \ +}; + +/* + * Create the needed functions for each pwm using the macro defined above + * (4 pwms are supported) + */ +sysfs_pwm_speeds_num(0); +sysfs_pwm_speeds_num(1); +sysfs_pwm_speeds_num(2); +sysfs_pwm_speeds_num(3); +sysfs_pwm_speeds_num(4); +sysfs_pwm_speeds_num(5); +sysfs_pwm_speeds_num(6); +sysfs_pwm_speeds_num(7); + +static const struct attribute_group pwm_attribute_groups[] = { + { .attrs = pwm0_attributes }, + { .attrs = pwm1_attributes }, + { .attrs = pwm2_attributes }, + { .attrs = pwm3_attributes }, + { .attrs = pwm4_attributes }, + { .attrs = pwm5_attributes }, + { .attrs = pwm6_attributes }, + { .attrs = pwm7_attributes }, +}; + +/* Fan M/N/O Type sysfs + * Macro defining SENSOR_DEVICE_ATTR for a pwm sysfs entries. + * 0 - show/store enable + * 1 - show/store mode + * 2 - show/store unit + * 3 - show/store division + * 4 - show/store limit + */ + +#define sysfs_tacho_type(type,index) \ +static SENSOR_DEVICE_ATTR_2(tacho_type_##type##_en, S_IRUGO | S_IWUSR, \ + ast_show_tacho_type, ast_store_tacho_type, 0, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho_type_##type##_mode, S_IRUGO | S_IWUSR, \ + ast_show_tacho_type, ast_store_tacho_type, 1, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho_type_##type##_unit, S_IRUGO | S_IWUSR, \ + ast_show_tacho_type, ast_store_tacho_type, 2, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho_type_##type##_division, S_IRUGO | S_IWUSR, \ + ast_show_tacho_type, ast_store_tacho_type, 3, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho_type_##type##_limit, S_IRUGO | S_IWUSR, \ + ast_show_tacho_type, ast_store_tacho_type, 4, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho_type_##type##_measure_period, S_IRUGO | S_IWUSR, \ + ast_show_tacho_type, ast_store_tacho_type, 5, index); \ +\ +static struct attribute *tacho_type_##type##_attributes[] = { \ + &sensor_dev_attr_tacho_type_##type##_en.dev_attr.attr, \ + &sensor_dev_attr_tacho_type_##type##_mode.dev_attr.attr, \ + &sensor_dev_attr_tacho_type_##type##_unit.dev_attr.attr, \ + &sensor_dev_attr_tacho_type_##type##_division.dev_attr.attr, \ + &sensor_dev_attr_tacho_type_##type##_limit.dev_attr.attr, \ + &sensor_dev_attr_tacho_type_##type##_measure_period.dev_attr.attr, \ + NULL \ +}; + +/* + * Create the needed functions for each pwm using the macro defined above + * (4 pwms are supported) + */ +sysfs_tacho_type(m,0); +sysfs_tacho_type(n,1); +#ifdef PWM_TYPE_O +sysfs_tacho_type(o,2); +#endif + +static const struct attribute_group tacho_type_attribute_groups[] = { + { .attrs = tacho_type_m_attributes }, + { .attrs = tacho_type_n_attributes }, +#ifdef PWM_TYPE_O + { .attrs = tacho_type_o_attributes }, +#endif +}; + +/* FAN sysfs + * Macro defining SENSOR_DEVICE_ATTR for a tacho sysfs entries. + * - show/store enable + * - show/store source + * - show/store rpm + * - show/store alarm + * - show/store alarm_en + */ +#define sysfs_tacho_speeds_num(index) \ +static SENSOR_DEVICE_ATTR_2(tacho##index##_en, S_IRUGO | S_IWUSR, \ + ast_show_tacho_speed, ast_store_tacho_speed, 0, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho##index##_source, S_IRUGO | S_IWUSR, \ + ast_show_tacho_speed, ast_store_tacho_speed, 1, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho##index##_rpm, S_IRUGO, \ + ast_show_tacho_speed, NULL, 2, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho##index##_alarm, S_IRUGO, \ + ast_show_tacho_speed, ast_store_tacho_speed, 3, index); \ +\ +static SENSOR_DEVICE_ATTR_2(tacho##index##_alarm_en, S_IRUGO | S_IWUSR, \ + ast_show_tacho_speed, ast_store_tacho_speed, 4, index); \ +\ +static struct attribute *tacho##index##_attributes[] = { \ + &sensor_dev_attr_tacho##index##_en.dev_attr.attr, \ + &sensor_dev_attr_tacho##index##_source.dev_attr.attr, \ + &sensor_dev_attr_tacho##index##_rpm.dev_attr.attr, \ + &sensor_dev_attr_tacho##index##_alarm.dev_attr.attr, \ + &sensor_dev_attr_tacho##index##_alarm_en.dev_attr.attr, \ + NULL \ +}; + +/* + * Create the needed functions for each tacho using the macro defined above + * (4 tachos are supported) + */ +sysfs_tacho_speeds_num(0); +sysfs_tacho_speeds_num(1); +sysfs_tacho_speeds_num(2); +sysfs_tacho_speeds_num(3); +sysfs_tacho_speeds_num(4); +sysfs_tacho_speeds_num(5); +sysfs_tacho_speeds_num(6); +sysfs_tacho_speeds_num(7); +sysfs_tacho_speeds_num(8); +sysfs_tacho_speeds_num(9); +sysfs_tacho_speeds_num(10); +sysfs_tacho_speeds_num(11); +sysfs_tacho_speeds_num(12); +sysfs_tacho_speeds_num(13); +sysfs_tacho_speeds_num(14); +sysfs_tacho_speeds_num(15); + +static const struct attribute_group tacho_attribute_groups[] = { + { .attrs = tacho0_attributes }, + { .attrs = tacho1_attributes }, + { .attrs = tacho2_attributes }, + { .attrs = tacho3_attributes }, + { .attrs = tacho4_attributes }, + { .attrs = tacho5_attributes }, + { .attrs = tacho6_attributes }, + { .attrs = tacho7_attributes }, + { .attrs = tacho8_attributes }, + { .attrs = tacho9_attributes }, + { .attrs = tacho10_attributes }, + { .attrs = tacho11_attributes }, + { .attrs = tacho12_attributes }, + { .attrs = tacho13_attributes }, + { .attrs = tacho14_attributes }, + { .attrs = tacho15_attributes }, +}; + +static int +ast_pwm_tacho_probe(struct platform_device *pdev) +{ + struct resource *res; + int err; + int ret=0; + int i; + + dev_dbg(&pdev->dev, "ast_pwm_fan_probe \n"); + + ast_pwm_tacho = kzalloc(sizeof(struct ast_pwm_tacho_data), GFP_KERNEL); + if (!ast_pwm_tacho) { + ret = -ENOMEM; + goto out; + } + + ast_pwm_tacho->ast_pwm_data = pdev->dev.platform_data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (NULL == res) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); + ret = -ENOENT; + goto out_mem; + } + + if (!request_mem_region(res->start, resource_size(res), res->name)) { + dev_err(&pdev->dev, "cannot reserved region\n"); + ret = -ENXIO; + goto out_mem; + } + + ast_pwm_tacho->reg_base = ioremap(res->start, resource_size(res)); + if (!ast_pwm_tacho->reg_base) { + ret = -EIO; + goto out_region; + } + + ast_pwm_tacho->irq = platform_get_irq(pdev, 0); + if (ast_pwm_tacho->irq < 0) { + dev_err(&pdev->dev, "no irq specified\n"); + ret = -ENOENT; + goto out_region; + } + + /* Register sysfs hooks */ + err = sysfs_create_group(&pdev->dev.kobj, &clk_attribute_groups); + if (err) + goto out_region; + + ast_pwm_tacho->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(ast_pwm_tacho->hwmon_dev)) { + ret = PTR_ERR(ast_pwm_tacho->hwmon_dev); + goto out_sysfs0; + } + + for(i=0; i< PWM_CH_NUM; i++) { + err = sysfs_create_group(&pdev->dev.kobj, &pwm_attribute_groups[i]); + if (err) + goto out_sysfs0; + } + + for(i=0; i< PWM_TYPE_NUM; i++) { + err = sysfs_create_group(&pdev->dev.kobj, &pwm_type_attribute_groups[i]); + if (err) + goto out_sysfs1; + } + + + for(i=0; i< TACHO_NUM; i++) { + err = sysfs_create_group(&pdev->dev.kobj, &tacho_attribute_groups[i]); + if (err) + goto out_sysfs2; + } + + for(i=0; i< PWM_TYPE_NUM; i++) { + err = sysfs_create_group(&pdev->dev.kobj, &tacho_type_attribute_groups[i]); + if (err) + goto out_sysfs3; + } + + ast_pwm_taco_init(); + + printk(KERN_INFO "ast_pwm_tacho: driver successfully loaded.\n"); + + return 0; + +out_sysfs3: + for(i=0; i< TACHO_NUM; i++) + sysfs_remove_group(&pdev->dev.kobj, &tacho_attribute_groups[i]); + +out_sysfs2: + for(i=0; i< PWM_TYPE_NUM; i++) + sysfs_remove_group(&pdev->dev.kobj, &pwm_type_attribute_groups[i]); + +out_sysfs1: + for(i=0; i< PWM_CH_NUM; i++) + sysfs_remove_group(&pdev->dev.kobj, &pwm_attribute_groups[i]); +out_sysfs0: + sysfs_remove_group(&pdev->dev.kobj, &clk_attribute_groups); + +//out_irq: +// free_irq(ast_pwm_tacho->irq, NULL); +out_region: + release_mem_region(res->start, res->end - res->start + 1); +out_mem: + kfree(ast_pwm_tacho); +out: + printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret); + return ret; +} + +static int +ast_pwm_tacho_remove(struct platform_device *pdev) +{ + int i=0; + struct ast_pwm_tacho_data *ast_pwm_tacho = platform_get_drvdata(pdev); + struct resource *res; + printk(KERN_INFO "ast_pwm_tacho: driver unloaded.\n"); + + hwmon_device_unregister(ast_pwm_tacho->hwmon_dev); + + for(i=0; i<16; i++) + sysfs_remove_group(&pdev->dev.kobj, &tacho_attribute_groups[i]); + + for(i=0; i<3; i++) + sysfs_remove_group(&pdev->dev.kobj, &pwm_type_attribute_groups[i]); + + for(i=0; i<8; i++) + sysfs_remove_group(&pdev->dev.kobj, &pwm_attribute_groups[i]); + + sysfs_remove_group(&pdev->dev.kobj, &clk_attribute_groups); + + platform_set_drvdata(pdev, NULL); +// free_irq(ast_pwm_tacho->irq, ast_pwm_tacho); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iounmap(ast_pwm_tacho->reg_base); + release_mem_region(res->start, res->end - res->start + 1); + kfree(ast_pwm_tacho); + return 0; +} + +#ifdef CONFIG_PM +static int +ast_pwm_tacho_suspend(struct platform_device *pdev, pm_message_t state) +{ + printk("ast_pwm_tacho_suspend : TODO \n"); + return 0; +} + +static int +ast_pwm_tacho_resume(struct platform_device *pdev) +{ + ast_pwm_taco_init(); + return 0; +} + +#else +#define ast_pwm_tacho_suspend NULL +#define ast_pwm_tacho_resume NULL +#endif + +static struct platform_driver ast_pwm_tacho_driver = { + .probe = ast_pwm_tacho_probe, + .remove = __devexit_p(ast_pwm_tacho_remove), + .suspend = ast_pwm_tacho_suspend, + .resume = ast_pwm_tacho_resume, + .driver = { + .name = "ast_pwm_tacho", + .owner = THIS_MODULE, + }, +}; + +static int __init +ast_pwm_tacho_init(void) +{ + return platform_driver_register(&ast_pwm_tacho_driver); +} + +static void __exit +ast_pwm_tacho_exit(void) +{ + platform_driver_unregister(&ast_pwm_tacho_driver); +} + +module_init(ast_pwm_tacho_init); +module_exit(ast_pwm_tacho_exit); + +MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>"); +MODULE_DESCRIPTION("PWM TACHO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 7f95905bbb9d..2ed09280b161 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -454,6 +454,51 @@ config I2C_PXA_SLAVE is necessary for systems where the PXA may be a target on the I2C bus. +config I2C_AST + tristate "ASPEED AST I2C adapter " +# depends on ARCH_ASPEED + help + If you have devices in the AST I2C bus, say yes to this option. + This driver can also be built as a module. If so, the module + will be called i2c-ast. + +config I2C_AST1070 + tristate "ASPEED AST1070 I2C adapter " + depends on ARCH_AST1070 + help + If you have devices in the AST1070 I2C bus, say yes to this option. + This driver can also be built as a module. If so, the module + will be called i2c-ast. + +config AST_I2C_SLAVE_MODE + bool "AST I2C Slave mode" + depends on I2C_AST + +if AST_I2C_SLAVE_MODE + +choice + prompt "I2C slave config" + default AST_I2C_SLAVE_EEPROM + +config AST_I2C_SLAVE_EEPROM + bool "10 byte EEPROM Device" + help + Support I2C slave mode communications on the AST I2C bus. This + is necessary for systems where the AST may be a target on the + I2C bus. + +config AST_I2C_SLAVE_RDWR + bool "I2C Slave RD/WR via ioctl" + + help + Support I2C slave mode communications on the AST I2C bus. This + is necessary for systems where the AST may be a target on the + I2C bus. + +endchoice + +endif + config I2C_S3C2410 tristate "S3C2410 I2C Driver" depends on ARCH_S3C2410 diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 0c2c4b26cdf1..a3b523ea8a12 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_I2C_OMAP) += i2c-omap.o obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o obj-$(CONFIG_I2C_PNX) += i2c-pnx.o obj-$(CONFIG_I2C_PXA) += i2c-pxa.o +obj-$(CONFIG_I2C_AST) += i2c-ast.o obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o diff --git a/drivers/i2c/busses/i2c-ast.c b/drivers/i2c/busses/i2c-ast.c new file mode 100644 index 000000000000..bccf5a33bc1e --- /dev/null +++ b/drivers/i2c/busses/i2c-ast.c @@ -0,0 +1,1725 @@ +/* + * i2c_adap_ast.c + * + * I2C adapter for the ASPEED I2C bus access. + * + * Copyright (C) 2012-2020 ASPEED Technology Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * History: + * 2012.07.26: Initial version [Ryan Chen] + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/i2c-id.h> +#include <linux/init.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/completion.h> + +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> + +#include <linux/dma-mapping.h> + +#include <asm/irq.h> +#include <asm/io.h> + +#if defined(CONFIG_COLDFIRE) +#include <asm/arch/regs-iic.h> +#include <asm/arch/ast_i2c.h> +#else +#include <plat/regs-iic.h> +#include <plat/ast_i2c.h> +#endif + +//AST2400 buffer mode issue , force I2C slave write use byte mode , read use buffer mode +/* Use platform_data instead of module parameters */ +/* Fast Mode = 400 kHz, Standard = 100 kHz */ +//static int clock = 100; /* Default: 100 kHz */ + + +/***************************************************************************/ +struct ast_i2c_dev { + struct ast_i2c_driver_data *ast_i2c_data; + struct device *dev; + void __iomem *reg_base; /* virtual */ + int irq; //I2C IRQ number + u32 bus_id; //for i2c dev# IRQ number check + u32 state; //I2C xfer mode state matchine + struct i2c_adapter adap; + struct buf_page *req_page; +//dma or buff mode needed + unsigned char *dma_buf; + dma_addr_t dma_addr; + +//master + int xfer_last; //cur xfer is last msgs for stop msgs + struct i2c_msg *master_msgs; //cur xfer msgs + int master_xfer_len; //cur xfer len + int master_xfer_cnt; //total xfer count + u32 master_xfer_mode; //cur xfer mode ... 0 : no_op , master: 1 byte , 2 : buffer , 3: dma , slave : xxxx + struct completion cmd_complete; + int cmd_err; + u8 blk_r_flag; //for smbus block read + void (*do_master_xfer)(struct ast_i2c_dev *i2c_dev); +//Slave structure + u8 slave_operation; + u8 slave_event; + struct i2c_msg *slave_msgs; //cur slave xfer msgs + int slave_xfer_len; + int slave_xfer_cnt; + u32 slave_xfer_mode; //cur xfer mode ... 0 : no_op , master: 1 byte , 2 : buffer , 3: dma , slave : xxxx + void (*do_slave_xfer)(struct ast_i2c_dev *i2c_dev); +}; + +#ifdef CONFIG_AST_I2C_SLAVE_RDWR +#define I2C_S_BUF_SIZE 64 +#define I2C_S_RX_BUF_NUM 4 +#define BUFF_FULL 0xff00 +#define BUFF_ONGOING 1 + +struct i2c_msg slave_rx_msg[I2C_S_RX_BUF_NUM + 1]; +struct i2c_msg slave_tx_msg; +#endif + + +static inline void +ast_i2c_write(struct ast_i2c_dev *i2c_dev, u32 val, u32 reg) +{ +// dev_dbg(i2c_dev->dev, "ast_i2c_write : val: %x , reg : %x \n",val,reg); + writel(val, i2c_dev->reg_base+ reg); +} + +static inline u32 +ast_i2c_read(struct ast_i2c_dev *i2c_dev, u32 reg) +{ +#if 0 + u32 val = readl(i2c_dev->reg_base + reg); + printk("R : reg %x , val: %x \n",reg, val); + return val; +#else + return readl(i2c_dev->reg_base + reg); +#endif +} + +static u32 select_i2c_clock(struct ast_i2c_dev *i2c_dev) +{ + + unsigned int clk, inc = 0, div, divider_ratio; + u32 SCL_Low, SCL_High, data; + + clk = i2c_dev->ast_i2c_data->get_i2c_clock(); +// printk("pclk = %d \n",clk); + divider_ratio = clk / i2c_dev->ast_i2c_data->bus_clk; + for (div = 0; divider_ratio >= 16; div++) + { + inc |= (divider_ratio & 1); + divider_ratio >>= 1; + } + divider_ratio += inc; + SCL_Low = (divider_ratio >> 1) - 1; + SCL_High = divider_ratio - SCL_Low - 2; + data = 0x77700300 | (SCL_High << 16) | (SCL_Low << 12) | div; +// printk("I2CD04 for %d = %08X\n", target_speed, data); + return data; +} + +#ifdef CONFIG_AST_I2C_SLAVE_MODE +/* AST I2C Slave mode */ +static void ast_slave_issue_alert(struct ast_i2c_dev *i2c_dev, u8 enable) +{ + //only support dev0~3 + if(i2c_dev->bus_id > 3) + return; + else { + if(enable) + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_CMD_REG) | AST_I2CD_S_ALT_EN, I2C_CMD_REG); + else + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_CMD_REG) & ~AST_I2CD_S_ALT_EN, I2C_CMD_REG); + } +} + +static void ast_slave_mode_enable(struct ast_i2c_dev *i2c_dev, struct i2c_msg *msgs) +{ + if(msgs->buf[0] == 1) { + ast_i2c_write(i2c_dev, msgs->addr, I2C_DEV_ADDR_REG); + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_FUN_CTRL_REG) | AST_I2CD_SLAVE_EN, I2C_FUN_CTRL_REG); + } else + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_FUN_CTRL_REG) & ~AST_I2CD_SLAVE_EN, I2C_FUN_CTRL_REG); +} + +#endif + +static void ast_i2c_dev_init(struct ast_i2c_dev *i2c_dev) +{ + //I2CG Reset + ast_i2c_write(i2c_dev, 0, I2C_FUN_CTRL_REG); + +#ifdef CONFIG_AST_I2C_SLAVE_EEPROM + i2c_dev->ast_i2c_data->slave_init(&(i2c_dev->slave_msgs)); + ast_slave_mode_enable(i2c_dev, i2c_dev->slave_msgs); +#endif + + //Enable Master Mode + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev, I2C_FUN_CTRL_REG) | AST_I2CD_MASTER_EN, I2C_FUN_CTRL_REG); + + + /* Set AC Timing */ +#if defined(CONFIG_ARCH_AST2400) + if(i2c_dev->ast_i2c_data->bus_clk/1000 > 400) { + printk("high speed mode enable clk [%dkhz]\n",i2c_dev->ast_i2c_data->bus_clk/1000); + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev, I2C_FUN_CTRL_REG) | + AST_I2CD_M_HIGH_SPEED_EN | + AST_I2CD_M_SDA_DRIVE_1T_EN | + AST_I2CD_SDA_DRIVE_1T_EN + , I2C_FUN_CTRL_REG); + + /* Set AC Timing */ + ast_i2c_write(i2c_dev, 0x3, I2C_AC_TIMING_REG2); + ast_i2c_write(i2c_dev, select_i2c_clock(i2c_dev), I2C_AC_TIMING_REG1); + }else { + /* target apeed is xxKhz*/ + ast_i2c_write(i2c_dev, select_i2c_clock(i2c_dev), I2C_AC_TIMING_REG1); + ast_i2c_write(i2c_dev, AST_NO_TIMEOUT_CTRL, I2C_AC_TIMING_REG2); + } +#else + /* target apeed is xxKhz*/ + ast_i2c_write(i2c_dev, select_i2c_clock(i2c_dev), I2C_AC_TIMING_REG1); + ast_i2c_write(i2c_dev, AST_NO_TIMEOUT_CTRL, I2C_AC_TIMING_REG2); +#endif +// ast_i2c_write(i2c_dev, 0x77743335, I2C_AC_TIMING_REG1); +///// + + + //Clear Interrupt + ast_i2c_write(i2c_dev, 0xfffffff, I2C_INTR_STS_REG); + + //TODO +// ast_i2c_write(i2c_dev, 0xAF, I2C_INTR_CTRL_REG); + //Enable Interrupt, STOP Interrupt has bug in AST2000 + + /* Set interrupt generation of I2C controller */ + ast_i2c_write(i2c_dev, + AST_I2CD_SDA_DL_TO_INTR_EN | + AST_I2CD_BUS_RECOVER_INTR_EN | + AST_I2CD_SMBUS_ALT_INTR_EN | +// AST_I2CD_SLAVE_MATCH_INTR_EN | + AST_I2CD_SCL_TO_INTR_EN | + AST_I2CD_ABNORMAL_INTR_EN | + AST_I2CD_NORMAL_STOP_INTR_EN | + AST_I2CD_ARBIT_LOSS_INTR_EN | + AST_I2CD_RX_DOWN_INTR_EN | + AST_I2CD_TX_NAK_INTR_EN | + AST_I2CD_TX_ACK_INTR_EN, + I2C_INTR_CTRL_REG); + +} + +#ifdef CONFIG_AST_I2C_SLAVE_RDWR +//for memory buffer initial +static void ast_i2c_slave_buff_init(struct ast_i2c_dev *i2c_dev) +{ + int i; + //Tx buf 1 + slave_tx_msg.len = I2C_S_BUF_SIZE; + slave_tx_msg.buf = kzalloc(I2C_S_BUF_SIZE, GFP_KERNEL); + //Rx buf 4 + for(i=0; i<I2C_S_RX_BUF_NUM+1; i++) { + slave_rx_msg[i].addr = ~BUFF_ONGOING; + slave_rx_msg[i].flags = 0; //mean empty buffer + slave_rx_msg[i].len = I2C_S_BUF_SIZE; + slave_rx_msg[i].buf = kzalloc(I2C_S_BUF_SIZE, GFP_KERNEL); + } +} + +static void ast_i2c_slave_rdwr_xfer(struct ast_i2c_dev *i2c_dev) +{ + int i; + spinlock_t lock; + spin_lock(&lock); + + switch(i2c_dev->slave_event) { + case I2C_SLAVE_EVENT_START_WRITE: + for(i=0; i<I2C_S_RX_BUF_NUM; i++) { + if((slave_rx_msg[i].flags == 0) && (slave_rx_msg[i].addr != BUFF_ONGOING)) { + slave_rx_msg[i].addr = BUFF_ONGOING; + break; + } + } + if(i == I2C_S_RX_BUF_NUM) { + printk("RX buffer full ........use tmp msgs buff \n"); + //TODO... + } + printk("I2C_SLAVE_EVENT_START_WRITE ... %d \n", i); + + i2c_dev->slave_msgs = &slave_rx_msg[i]; + break; + case I2C_SLAVE_EVENT_START_READ: + printk("I2C_SLAVE_EVENT_START_READ ERROR .. not imple \n"); + i2c_dev->slave_msgs = &slave_tx_msg; + break; + case I2C_SLAVE_EVENT_WRITE: + printk("I2C_SLAVE_EVENT_WRITE next write ERROR ...\n"); + i2c_dev->slave_msgs = &slave_tx_msg; + break; + case I2C_SLAVE_EVENT_READ: + printk("I2C_SLAVE_EVENT_READ ERROR ... \n"); + i2c_dev->slave_msgs = &slave_tx_msg; + break; + case I2C_SLAVE_EVENT_NACK: + printk("I2C_SLAVE_EVENT_NACK ERROR ... \n"); + i2c_dev->slave_msgs = &slave_tx_msg; + break; + case I2C_SLAVE_EVENT_STOP: + printk("I2C_SLAVE_EVENT_STOP \n"); + for(i=0; i<I2C_S_RX_BUF_NUM; i++) { + if(slave_rx_msg[i].addr == BUFF_ONGOING) { + slave_rx_msg[i].flags = BUFF_FULL; + slave_rx_msg[i].addr = 0; + break; + } + } + + i2c_dev->slave_msgs = &slave_tx_msg; + break; + } + spin_unlock(&lock); + +} + +static int ast_i2c_slave_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs) +{ + struct ast_i2c_dev *i2c_dev = adap->algo_data; + int ret=0, i; + + switch(msgs->flags) { + case 0: +// printk("slave read \n"); + //cur_msg = get_free_msg; + for(i=0; i<I2C_S_RX_BUF_NUM; i++) { + if((slave_rx_msg[i].addr == 0) && (slave_rx_msg[i].flags == BUFF_FULL)) { + memcpy(msgs->buf, slave_rx_msg[i].buf, slave_rx_msg[i].len); + msgs->len = slave_rx_msg[i].len; + slave_rx_msg[i].flags = 0; + slave_rx_msg[i].len = 0; + break; + } + } + + if(i == I2C_S_RX_BUF_NUM) { + printk("No buffer ........ \n"); + msgs->len = 0; + ret = -1; + } + break; + case I2C_M_RD: //slave write +// printk("slave write \n"); + memcpy(msgs->buf, slave_tx_msg.buf, I2C_S_BUF_SIZE); + break; + case I2C_S_EN: + if((msgs->addr < 0x1) || (msgs->addr > 0xff)) { + ret = -1; + printk("addrsss not correct !! \n"); + return ret; + } + if(msgs->len != 1) printk("ERROR \n"); + ast_slave_mode_enable(i2c_dev, msgs); + break; + case I2C_S_ALT: +// printk("slave issue alt\n"); + if(msgs->len != 1) printk("ERROR \n"); + if(msgs->buf[0]==1) + ast_slave_issue_alert(i2c_dev, 1); + else + ast_slave_issue_alert(i2c_dev, 0); + break; + + default: + printk("slave xfer error \n"); + break; + + } + return ret; +} + + +#endif + +static u8 +ast_i2c_bus_error_recover(struct ast_i2c_dev *i2c_dev) +{ + u32 sts; + int r; + u32 i = 0; + + //Check 0x14's SDA and SCL status + sts = ast_i2c_read(i2c_dev,I2C_CMD_REG); + + if ((sts & AST_I2CD_SDA_LINE_STS) && (sts & AST_I2CD_SCL_LINE_STS)) { + //Means bus is idle. + dev_dbg(i2c_dev->dev, "I2C bus (%d) is idle. I2C slave doesn't exist?!\n", i2c_dev->bus_id); + return -1; + } + + dev_dbg(i2c_dev->dev, "ERROR!! I2C(%d) bus hanged, try to recovery it!\n", i2c_dev->bus_id); + + + if ((sts & AST_I2CD_SDA_LINE_STS) && !(sts & AST_I2CD_SCL_LINE_STS)) { + //if SDA == 1 and SCL == 0, it means the master is locking the bus. + //Send a stop command to unlock the bus. + dev_dbg(i2c_dev->dev, "I2C's master is locking the bus, try to stop it.\n"); +// + init_completion(&i2c_dev->cmd_complete); + + ast_i2c_write(i2c_dev, AST_I2CD_M_STOP_CMD, I2C_CMD_REG); + + r = wait_for_completion_interruptible_timeout(&i2c_dev->cmd_complete, + i2c_dev->adap.timeout*HZ); + + if(i2c_dev->cmd_err) { + dev_dbg(i2c_dev->dev, "recovery error \n"); + return -1; + } + + if (r == 0) { + dev_dbg(i2c_dev->dev, "recovery timed out\n"); + return -1; + } else { + dev_dbg(i2c_dev->dev, "Recovery successfully\n"); + return 0; + } + + + } else if (!(sts & AST_I2CD_SDA_LINE_STS)) { + //else if SDA == 0, the device is dead. We need to reset the bus + //And do the recovery command. + dev_dbg(i2c_dev->dev, "I2C's slave is dead, try to recover it\n"); + //Let's retry 10 times + for (i = 0; i < 10; i++) { + ast_i2c_dev_init(i2c_dev); + //Do the recovery command BIT11 + init_completion(&i2c_dev->cmd_complete); + ast_i2c_write(i2c_dev, AST_I2CD_BUS_RECOVER_CMD_EN, I2C_CMD_REG); + + r = wait_for_completion_interruptible_timeout(&i2c_dev->cmd_complete, + i2c_dev->adap.timeout*HZ); + if (i2c_dev->cmd_err != 0) { + dev_dbg(i2c_dev->dev, "ERROR!! Failed to do recovery command(0x%08x)\n", i2c_dev->cmd_err); + return -1; + } + //Check 0x14's SDA and SCL status + sts = ast_i2c_read(i2c_dev,I2C_CMD_REG); + if (sts & AST_I2CD_SDA_LINE_STS) //Recover OK + break; + } + if (i == 10) { + dev_dbg(i2c_dev->dev, "ERROR!! recover failed\n"); + return -1; + } + } else { + dev_dbg(i2c_dev->dev, "Don't know how to handle this case?!\n"); + return -1; + } + dev_dbg(i2c_dev->dev, "Recovery successfully\n"); + return 0; +} + +static void ast_master_alert_recv(struct ast_i2c_dev *i2c_dev) +{ + printk("ast_master_alert_recv bus id %d, Disable Alt, Please Imple \n",i2c_dev->bus_id); +} + +static int ast_i2c_wait_bus_not_busy(struct ast_i2c_dev *i2c_dev) +{ + int timeout = 32; //TODO number +// printk("ast_i2c_wait_bus_not_busy \n"); + while (ast_i2c_read(i2c_dev,I2C_CMD_REG) & AST_I2CD_BUS_BUSY_STS) { + ast_i2c_bus_error_recover(i2c_dev); + if(timeout<=0) + break; + timeout--; + msleep(2); + } + + return timeout <= 0 ? EAGAIN : 0; +} + +static void ast_i2c_do_dma_xfer(struct ast_i2c_dev *i2c_dev) +{ + u32 cmd = 0; + int i; + + i2c_dev->master_xfer_mode = DMA_XFER; + i2c_dev->slave_xfer_mode = DMA_XFER; + + if(i2c_dev->slave_operation == 1) { + if(i2c_dev->slave_msgs->flags & I2C_M_RD) { + //DMA tx mode + if(i2c_dev->slave_msgs->len > AST_I2C_DMA_SIZE) + i2c_dev->slave_xfer_len = AST_I2C_DMA_SIZE; + else + i2c_dev->slave_xfer_len = i2c_dev->slave_msgs->len; + + dev_dbg(i2c_dev->dev, "(<--) slave tx DMA \n"); + for(i=0; i<i2c_dev->slave_xfer_len; i++) + i2c_dev->dma_buf[i] = i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt + i]; + + ast_i2c_write(i2c_dev, i2c_dev->dma_addr, I2C_DMA_BASE_REG); + ast_i2c_write(i2c_dev, (i2c_dev->slave_xfer_len-1), I2C_DMA_LEN_REG); + ast_i2c_write(i2c_dev, AST_I2CD_TX_DMA_ENABLE | AST_I2CD_S_TX_CMD,I2C_CMD_REG); + } else { + //DMA prepare rx + dev_dbg(i2c_dev->dev, "(-->) slave rx DMA \n"); + ast_i2c_write(i2c_dev, i2c_dev->dma_addr, I2C_DMA_BASE_REG); + ast_i2c_write(i2c_dev, (AST_I2C_DMA_SIZE-1), I2C_DMA_LEN_REG); + ast_i2c_write(i2c_dev, AST_I2CD_RX_DMA_ENABLE, I2C_CMD_REG); + } + } else { + dev_dbg(i2c_dev->dev,"M cnt %d, xf len %d \n",i2c_dev->master_xfer_cnt, i2c_dev->master_msgs->len); + if(i2c_dev->master_xfer_cnt == -1) { + //send start + dev_dbg(i2c_dev->dev, " %sing %d byte%s %s 0x%02x\n", + i2c_dev->master_msgs->flags & I2C_M_RD ? "read" : "write", + i2c_dev->master_msgs->len, i2c_dev->master_msgs->len > 1 ? "s" : "", + i2c_dev->master_msgs->flags & I2C_M_RD ? "from" : "to", i2c_dev->master_msgs->addr); + + if(i2c_dev->master_msgs->flags & I2C_M_RD) { + //workaround .. HW can;t send start read addr with buff mode + cmd = AST_I2CD_M_START_CMD | AST_I2CD_M_TX_CMD; + ast_i2c_write(i2c_dev, (i2c_dev->master_msgs->addr <<1) |0x1, I2C_BYTE_BUF_REG); + +// tx_buf[0] = (i2c_dev->master_msgs->addr <<1); //+1 + i2c_dev->master_xfer_len = 1; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + } else { + //tx + cmd = AST_I2CD_M_START_CMD | AST_I2CD_M_TX_CMD | AST_I2CD_TX_DMA_ENABLE; + + i2c_dev->dma_buf[0] = (i2c_dev->master_msgs->addr <<1); //+1 + //next data write + if((i2c_dev->master_msgs->len + 1) > AST_I2C_DMA_SIZE) + i2c_dev->master_xfer_len = AST_I2C_DMA_SIZE; + else + i2c_dev->master_xfer_len = i2c_dev->master_msgs->len + 1; + + for(i = 1; i < i2c_dev->master_xfer_len; i++) + i2c_dev->dma_buf[i] = i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt+i]; + + if (i2c_dev->xfer_last == 1) { + dev_dbg(i2c_dev->dev, "last stop \n"); + cmd |= AST_I2CD_M_STOP_CMD; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + } + ast_i2c_write(i2c_dev, i2c_dev->dma_addr, I2C_DMA_BASE_REG); + ast_i2c_write(i2c_dev, (i2c_dev->master_xfer_len-1), I2C_DMA_LEN_REG); + + } + ast_i2c_write(i2c_dev, cmd, I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "txfer size %d , cmd = %x \n",i2c_dev->master_xfer_len, cmd); + + } else if (i2c_dev->master_xfer_cnt < i2c_dev->master_msgs->len){ + //Next send + if(i2c_dev->master_msgs->flags & I2C_M_RD) { + //Rx data + cmd = AST_I2CD_M_RX_CMD | AST_I2CD_RX_DMA_ENABLE; + + if((i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt) > AST_I2C_DMA_SIZE) { + i2c_dev->master_xfer_len = AST_I2C_DMA_SIZE; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + i2c_dev->master_xfer_len = i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt; + if((i2c_dev->master_msgs->flags & I2C_M_RECV_LEN) && (i2c_dev->blk_r_flag == 0)) { + dev_dbg(i2c_dev->dev, "I2C_M_RECV_LEN \n"); + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { +#ifdef CONFIG_AST1010 + //Workaround for ast1010 can't send NACK + if((i2c_dev->master_xfer_len == 1) && (i2c_dev->xfer_last == 1)) { + //change to byte mode + cmd |= AST_I2CD_M_STOP_CMD | AST_I2CD_M_S_RX_CMD_LAST; + cmd &= ~AST_I2CD_RX_DMA_ENABLE; + i2c_dev->master_xfer_mode = BYTE_XFER; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + + } else if (i2c_dev->master_xfer_len > 1) { + i2c_dev->master_xfer_len -=1; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { + printk(" Fix Me !! \n"); + } +#else + if(i2c_dev->xfer_last == 1) { + dev_dbg(i2c_dev->dev, "last stop \n"); + cmd |= AST_I2CD_M_STOP_CMD; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } + //TODO check.... + cmd |= AST_I2CD_M_S_RX_CMD_LAST; +#endif + } + + } + ast_i2c_write(i2c_dev, i2c_dev->dma_addr, I2C_DMA_BASE_REG); + ast_i2c_write(i2c_dev, i2c_dev->master_xfer_len-1, I2C_DMA_LEN_REG); + ast_i2c_write(i2c_dev, cmd, I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "rxfer size %d , cmd = %x \n",i2c_dev->master_xfer_len, cmd); + } else { + //Tx data + //next data write + cmd = AST_I2CD_M_TX_CMD | AST_I2CD_TX_DMA_ENABLE; + if((i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt) > AST_I2C_DMA_SIZE) { + i2c_dev->master_xfer_len = AST_I2C_DMA_SIZE; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + i2c_dev->master_xfer_len = i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt; + if(i2c_dev->xfer_last == 1) { + dev_dbg(i2c_dev->dev, "last stop \n"); + cmd |= AST_I2CD_M_STOP_CMD; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + } + } + + for(i = 0; i < i2c_dev->master_xfer_len; i++) + i2c_dev->dma_buf[i] = i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt + i]; + + ast_i2c_write(i2c_dev, i2c_dev->dma_addr, I2C_DMA_BASE_REG); + ast_i2c_write(i2c_dev, (i2c_dev->master_xfer_len-1), I2C_DMA_LEN_REG); + ast_i2c_write(i2c_dev, cmd , I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "txfer size %d , cmd = %x \n",i2c_dev->master_xfer_len, cmd); + + } + }else { + //should send next msg + if(i2c_dev->master_xfer_cnt != i2c_dev->master_msgs->len) + printk("complete rx ... ERROR \n"); + + dev_dbg(i2c_dev->dev, "ast_i2c_do_byte_xfer complete \n"); + i2c_dev->cmd_err = 0; + complete(&i2c_dev->cmd_complete); + } + + } + + +} + +static void ast_i2c_do_pool_xfer(struct ast_i2c_dev *i2c_dev) +{ + u32 cmd = 0; + int i; + u32 *tx_buf; + + i2c_dev->master_xfer_mode = BUFF_XFER; + i2c_dev->slave_xfer_mode = BUFF_XFER; + +#if defined(CONFIG_ARCH_AST2400) + ast_i2c_write(i2c_dev, + (ast_i2c_read(i2c_dev, I2C_FUN_CTRL_REG) & + ~AST_I2CD_BUFF_SEL_MASK) | + AST_I2CD_BUFF_SEL(i2c_dev->req_page->page_no), + I2C_FUN_CTRL_REG); +#endif + + tx_buf = (u32 *) i2c_dev->req_page->page_addr; + + + if(i2c_dev->slave_operation == 1) { + if(i2c_dev->slave_msgs->flags & I2C_M_RD) { + dev_dbg(i2c_dev->dev, "(<--) slave tx buf \n"); + + if(i2c_dev->slave_msgs->len > i2c_dev->req_page->page_size) + i2c_dev->slave_xfer_len = i2c_dev->req_page->page_size; + else + i2c_dev->slave_xfer_len = i2c_dev->slave_msgs->len; + + for(i = 0; i< i2c_dev->slave_xfer_len; i++) { + if(i%4 == 0) + tx_buf[i/4] = 0; + tx_buf[i/4] |= (i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt + i] << ((i%4)*8)) ; + dev_dbg(i2c_dev->dev, "[%x] ",tx_buf[i/4]); + } + dev_dbg(i2c_dev->dev, "\n"); + + ast_i2c_write(i2c_dev, AST_I2CD_TX_DATA_BUF_END_SET((i2c_dev->slave_xfer_len-1)) | + AST_I2CD_BUF_BASE_ADDR_SET((i2c_dev->req_page->page_addr_point)), + I2C_BUF_CTRL_REG); + + ast_i2c_write(i2c_dev, AST_I2CD_TX_BUFF_ENABLE | AST_I2CD_S_TX_CMD, I2C_CMD_REG); + } else { + //prepare for new rx + dev_dbg(i2c_dev->dev, "(-->) slave prepare rx buf \n"); + ast_i2c_write(i2c_dev, + AST_I2CD_RX_BUF_END_ADDR_SET((i2c_dev->req_page->page_size-1)) | + AST_I2CD_BUF_BASE_ADDR_SET((i2c_dev->req_page->page_addr_point)), + I2C_BUF_CTRL_REG); + + ast_i2c_write(i2c_dev, AST_I2CD_RX_BUFF_ENABLE, I2C_CMD_REG); + + } + } else { + dev_dbg(i2c_dev->dev,"M cnt %d, xf len %d \n",i2c_dev->master_xfer_cnt, i2c_dev->master_msgs->len); + if(i2c_dev->master_xfer_cnt == -1) { + //send start + dev_dbg(i2c_dev->dev, " %sing %d byte%s %s 0x%02x\n", + i2c_dev->master_msgs->flags & I2C_M_RD ? "read" : "write", + i2c_dev->master_msgs->len, i2c_dev->master_msgs->len > 1 ? "s" : "", + i2c_dev->master_msgs->flags & I2C_M_RD ? "from" : "to", i2c_dev->master_msgs->addr); + + if(i2c_dev->master_msgs->flags & I2C_M_RD) { +//workaround .. HW can;t send start read addr with buff mode + cmd = AST_I2CD_M_START_CMD | AST_I2CD_M_TX_CMD; + ast_i2c_write(i2c_dev, (i2c_dev->master_msgs->addr <<1) |0x1, I2C_BYTE_BUF_REG); + +// tx_buf[0] = (i2c_dev->master_msgs->addr <<1); //+1 + i2c_dev->master_xfer_len = 1; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + } else { + cmd = AST_I2CD_M_START_CMD | AST_I2CD_M_TX_CMD | AST_I2CD_TX_BUFF_ENABLE; + tx_buf[0] = (i2c_dev->master_msgs->addr <<1); //+1 + //next data write + if((i2c_dev->master_msgs->len + 1) > i2c_dev->req_page->page_size) + i2c_dev->master_xfer_len = i2c_dev->req_page->page_size; + else + i2c_dev->master_xfer_len = i2c_dev->master_msgs->len + 1; + + for(i = 1; i < i2c_dev->master_xfer_len; i++) { + if(i%4 == 0) + tx_buf[i/4] = 0; + tx_buf[i/4] |= (i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt + i] << ((i%4)*8)) ; + } + + if (i2c_dev->xfer_last == 1) { + dev_dbg(i2c_dev->dev, "last stop \n"); + cmd |= AST_I2CD_M_STOP_CMD; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + } + ast_i2c_write(i2c_dev, + AST_I2CD_TX_DATA_BUF_END_SET((i2c_dev->master_xfer_len - 1)) | + AST_I2CD_BUF_BASE_ADDR_SET(i2c_dev->req_page->page_addr_point), + I2C_BUF_CTRL_REG); + } + ast_i2c_write(i2c_dev, cmd, I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "txfer size %d , cmd = %x \n",i2c_dev->master_xfer_len, cmd); + + } else if (i2c_dev->master_xfer_cnt < i2c_dev->master_msgs->len){ + //Next send + if(i2c_dev->master_msgs->flags & I2C_M_RD) { + //Rx data + cmd = AST_I2CD_M_RX_CMD | AST_I2CD_RX_BUFF_ENABLE; + + if((i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt) > i2c_dev->req_page->page_size) { + i2c_dev->master_xfer_len = i2c_dev->req_page->page_size; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { + i2c_dev->master_xfer_len = i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt; + if((i2c_dev->master_msgs->flags & I2C_M_RECV_LEN) && (i2c_dev->blk_r_flag == 0)) { + dev_dbg(i2c_dev->dev, "I2C_M_RECV_LEN \n"); + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { + if(i2c_dev->xfer_last == 1) { + dev_dbg(i2c_dev->dev, "last stop \n"); + cmd |= AST_I2CD_M_STOP_CMD; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } + cmd |= AST_I2CD_M_S_RX_CMD_LAST; + } + } + ast_i2c_write(i2c_dev, + AST_I2CD_RX_BUF_END_ADDR_SET((i2c_dev->master_xfer_len-1))| + AST_I2CD_BUF_BASE_ADDR_SET((i2c_dev->req_page->page_addr_point)), + I2C_BUF_CTRL_REG); + ast_i2c_write(i2c_dev, cmd, I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "rxfer size %d , cmd = %x \n",i2c_dev->master_xfer_len, cmd); + } else { + //Tx data + //next data write + cmd = AST_I2CD_M_TX_CMD | AST_I2CD_TX_BUFF_ENABLE; + if((i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt) > i2c_dev->req_page->page_size) { + i2c_dev->master_xfer_len = i2c_dev->req_page->page_size; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + i2c_dev->master_xfer_len = i2c_dev->master_msgs->len - i2c_dev->master_xfer_cnt; + if(i2c_dev->xfer_last == 1) { + dev_dbg(i2c_dev->dev, "last stop \n"); + cmd |= AST_I2CD_M_STOP_CMD; + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + } + } + + for(i = 0; i < i2c_dev->master_xfer_len; i++) { + if(i%4 == 0) + tx_buf[i/4] = 0; + tx_buf[i/4] |= (i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt + i] << ((i%4)*8)) ; + } +// printk("count %x \n",ast_i2c_read(i2c_dev,I2C_CMD_REG)); + ast_i2c_write(i2c_dev, + AST_I2CD_TX_DATA_BUF_END_SET((i2c_dev->master_xfer_len - 1)) | + AST_I2CD_BUF_BASE_ADDR_SET(i2c_dev->req_page->page_addr_point), + I2C_BUF_CTRL_REG); + + ast_i2c_write(i2c_dev, cmd , I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "txfer size %d , cmd = %x \n",i2c_dev->master_xfer_len, cmd); + } + } else { + //should send next msg + if(i2c_dev->master_xfer_cnt != i2c_dev->master_msgs->len) + printk("complete rx ... ERROR \n"); + + dev_dbg(i2c_dev->dev, "ast_i2c_do_byte_xfer complete \n"); + i2c_dev->cmd_err = 0; + complete(&i2c_dev->cmd_complete); + } + + } +} +static void ast_i2c_do_byte_xfer(struct ast_i2c_dev *i2c_dev) +{ + u8 *xfer_buf; + u32 cmd = 0; + + i2c_dev->master_xfer_mode = BYTE_XFER; + i2c_dev->master_xfer_len = 1; + + i2c_dev->slave_xfer_mode = BYTE_XFER; + i2c_dev->slave_xfer_len = 1; + + if(i2c_dev->slave_operation == 1) { + dev_dbg(i2c_dev->dev,"S cnt %d, xf len %d \n",i2c_dev->slave_xfer_cnt, i2c_dev->slave_msgs->len); + if(i2c_dev->slave_msgs->flags & I2C_M_RD) { + //READ <-- TX + dev_dbg(i2c_dev->dev, "(<--) slave(tx) buf %d [%x]\n", i2c_dev->slave_xfer_cnt, i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt]); + ast_i2c_write(i2c_dev, i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt], I2C_BYTE_BUF_REG); + ast_i2c_write(i2c_dev, AST_I2CD_S_TX_CMD, I2C_CMD_REG); + } else { + // Write -->Rx + //no need to handle in byte mode + dev_dbg(i2c_dev->dev, "(-->) slave(rx) BYTE do nothing\n"); + + } + } else { + dev_dbg(i2c_dev->dev,"M cnt %d, xf len %d \n",i2c_dev->master_xfer_cnt, i2c_dev->master_msgs->len); + if(i2c_dev->master_xfer_cnt == -1) { + //first start + dev_dbg(i2c_dev->dev, " %sing %d byte%s %s 0x%02x\n", + i2c_dev->master_msgs->flags & I2C_M_RD ? "read" : "write", + i2c_dev->master_msgs->len, i2c_dev->master_msgs->len > 1 ? "s" : "", + i2c_dev->master_msgs->flags & I2C_M_RD ? "from" : "to", i2c_dev->master_msgs->addr); + + + if(i2c_dev->master_msgs->flags & I2C_M_RD) + ast_i2c_write(i2c_dev, (i2c_dev->master_msgs->addr <<1) |0x1, I2C_BYTE_BUF_REG); + else + ast_i2c_write(i2c_dev, (i2c_dev->master_msgs->addr <<1), I2C_BYTE_BUF_REG); + + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + + ast_i2c_write(i2c_dev, AST_I2CD_M_TX_CMD | AST_I2CD_M_START_CMD, I2C_CMD_REG); + + + } else if (i2c_dev->master_xfer_cnt < i2c_dev->master_msgs->len){ + xfer_buf = i2c_dev->master_msgs->buf; + if(i2c_dev->master_msgs->flags & I2C_M_RD) { + //Rx data + cmd = AST_I2CD_M_RX_CMD; + if((i2c_dev->master_msgs->flags & I2C_M_RECV_LEN) && (i2c_dev->master_xfer_cnt == 0)) { + dev_dbg(i2c_dev->dev, "I2C_M_RECV_LEN \n"); + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + + } else if((i2c_dev->xfer_last == 1) && (i2c_dev->master_xfer_cnt + 1 == i2c_dev->master_msgs->len)) { + cmd |= AST_I2CD_M_S_RX_CMD_LAST | AST_I2CD_M_STOP_CMD; + // disable rx_dwn isr + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + } + + dev_dbg(i2c_dev->dev, "(<--) rx byte, cmd = %x \n",cmd); + + ast_i2c_write(i2c_dev, cmd, I2C_CMD_REG); + + + } else { + //Tx data + dev_dbg(i2c_dev->dev, "(-->) xfer byte data index[%02x]:%02x \n",i2c_dev->master_xfer_cnt, *(xfer_buf + i2c_dev->master_xfer_cnt)); + ast_i2c_write(i2c_dev, *(xfer_buf + i2c_dev->master_xfer_cnt), I2C_BYTE_BUF_REG); + if((i2c_dev->xfer_last == 1) && (i2c_dev->master_xfer_cnt + 1 == i2c_dev->master_msgs->len)) { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) & + ~AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + ast_i2c_write(i2c_dev, AST_I2CD_M_TX_CMD | AST_I2CD_M_STOP_CMD, I2C_CMD_REG); + } else { + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + ast_i2c_write(i2c_dev, AST_I2CD_M_TX_CMD, I2C_CMD_REG); + } + } + + } else { + //should send next msg + if(i2c_dev->master_xfer_cnt != i2c_dev->master_msgs->len) + printk("CNT ERROR \n"); + + dev_dbg(i2c_dev->dev, "ast_i2c_do_byte_xfer complete \n"); + i2c_dev->cmd_err = 0; + complete(&i2c_dev->cmd_complete); + + } + } + +} + +static void ast_i2c_slave_xfer_done(struct ast_i2c_dev *i2c_dev) +{ + u32 xfer_len; + int i; + u8 *rx_buf; + + dev_dbg(i2c_dev->dev, "ast_i2c_slave_xfer_done [%d]\n",i2c_dev->slave_xfer_mode); + + if (i2c_dev->slave_msgs->flags & I2C_M_RD) { + //tx done , only check tx count ... + if(i2c_dev->master_xfer_mode == BYTE_XFER) { + xfer_len = 1; + } else if (i2c_dev->master_xfer_mode == BUFF_XFER) { + xfer_len = AST_I2CD_TX_DATA_BUF_GET(ast_i2c_read(i2c_dev, I2C_BUF_CTRL_REG)); + xfer_len++; + dev_dbg(i2c_dev->dev,"S tx buff done len %d \n",xfer_len); + } else { + //DMA mode + xfer_len = ast_i2c_read(i2c_dev, I2C_DMA_LEN_REG); + if(xfer_len == 0) + xfer_len = i2c_dev->slave_xfer_len; + else + xfer_len = i2c_dev->slave_xfer_len - xfer_len - 1; + + dev_dbg(i2c_dev->dev,"S tx rx dma done len %d \n",xfer_len); + } + + } else { + //rx done + if(i2c_dev->slave_xfer_mode == BYTE_XFER) { + //TODO + xfer_len = 1; + if(i2c_dev->slave_event == I2C_SLAVE_EVENT_STOP) { + i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt] = 0; + i2c_dev->slave_msgs->len = i2c_dev->slave_xfer_cnt; + } else { + i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt] = ast_i2c_read(i2c_dev,I2C_BYTE_BUF_REG) >> 8; + } + dev_dbg(i2c_dev->dev,"rx buff %d, [%x] \n",i2c_dev->slave_xfer_cnt ,i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt]); + } else if (i2c_dev->master_xfer_mode == BUFF_XFER) { + xfer_len = AST_I2CD_RX_BUF_ADDR_GET(ast_i2c_read(i2c_dev, I2C_BUF_CTRL_REG)); + if(xfer_len == 0) + xfer_len = AST_I2C_PAGE_SIZE; + + dev_dbg(i2c_dev->dev,"rx buff done len %d \n",xfer_len); + + rx_buf = (u8 *)i2c_dev->req_page->page_addr; + + for(i=0;i<xfer_len;i++) { + i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt+i] = rx_buf[i]; + dev_dbg(i2c_dev->dev,"%d, [%x] \n",i2c_dev->slave_xfer_cnt+i ,i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt+i]); + } + + } else { + //RX DMA DOWN + xfer_len = ast_i2c_read(i2c_dev, I2C_DMA_LEN_REG); + if(xfer_len == 0) + xfer_len = i2c_dev->slave_xfer_len; + else + xfer_len = i2c_dev->slave_xfer_len - xfer_len - 1; + + dev_dbg(i2c_dev->dev, " rx dma done len %d \n", xfer_len); + + for(i=0;i<xfer_len;i++) { + i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt+i] = i2c_dev->dma_buf[i]; + dev_dbg(i2c_dev->dev,"%d, [%x] \n",i2c_dev->slave_xfer_cnt+i ,i2c_dev->slave_msgs->buf[i2c_dev->slave_xfer_cnt+i]); + } + } + + } + + if(xfer_len !=i2c_dev->slave_xfer_len) { + //TODO.. + printk(" **slave xfer error ====\n"); + //should goto stop.... + } else + i2c_dev->slave_xfer_cnt += i2c_dev->slave_xfer_len; + + + if((i2c_dev->slave_event == I2C_SLAVE_EVENT_NACK) || (i2c_dev->slave_event == I2C_SLAVE_EVENT_STOP)) { +#ifdef CONFIG_AST_I2C_SLAVE_RDWR + ast_i2c_slave_rdwr_xfer(i2c_dev); +#else + i2c_dev->ast_i2c_data->slave_xfer(i2c_dev->slave_event, &(i2c_dev->slave_msgs)); +#endif + i2c_dev->slave_xfer_cnt = 0; + } else { + if(i2c_dev->slave_xfer_cnt == i2c_dev->slave_msgs->len) { + dev_dbg(i2c_dev->dev,"slave next msgs \n"); +#ifdef CONFIG_AST_I2C_SLAVE_RDWR + ast_i2c_slave_rdwr_xfer(i2c_dev); +#else + i2c_dev->ast_i2c_data->slave_xfer(i2c_dev->slave_event, &(i2c_dev->slave_msgs)); +#endif + + i2c_dev->slave_xfer_cnt = 0; + } + i2c_dev->do_slave_xfer(i2c_dev); + } + + + if(AST_I2CD_IDLE == i2c_dev->state) { + dev_dbg(i2c_dev->dev,"** Slave go IDLE **\n"); + i2c_dev->slave_operation = 0; + + if(i2c_dev->slave_xfer_mode == BUFF_XFER) { + i2c_dev->ast_i2c_data->free_pool_buff_page(i2c_dev->req_page); + } + + } + +} + +//TX/Rx Done +static void ast_i2c_master_xfer_done(struct ast_i2c_dev *i2c_dev) +{ + u32 xfer_len; + int i; + u8 *pool_buf; + + dev_dbg(i2c_dev->dev, "ast_i2c_master_xfer_done mode[%d]\n",i2c_dev->master_xfer_mode); + + if (i2c_dev->master_msgs->flags & I2C_M_RD) { + if(i2c_dev->master_xfer_cnt == -1) { + xfer_len = 1; + goto next_xfer; + } + if(i2c_dev->master_xfer_mode == BYTE_XFER) { + if ((i2c_dev->master_msgs->flags & I2C_M_RECV_LEN) && (i2c_dev->blk_r_flag == 0)) { + i2c_dev->master_msgs->len += (ast_i2c_read(i2c_dev,I2C_BYTE_BUF_REG) & AST_I2CD_RX_BYTE_BUFFER) >> 8; + i2c_dev->blk_r_flag = 1; + dev_dbg(i2c_dev->dev, "I2C_M_RECV_LEN %d \n", i2c_dev->master_msgs->len -1); + } + xfer_len = 1; + i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt] = (ast_i2c_read(i2c_dev,I2C_BYTE_BUF_REG) & AST_I2CD_RX_BYTE_BUFFER) >> 8; + } else if (i2c_dev->master_xfer_mode == BUFF_XFER) { + pool_buf = (u8 *)i2c_dev->req_page->page_addr; + xfer_len = AST_I2CD_RX_BUF_ADDR_GET(ast_i2c_read(i2c_dev, I2C_BUF_CTRL_REG)); + + if(xfer_len == 0) + xfer_len = AST_I2C_PAGE_SIZE; + + for(i = 0; i< xfer_len; i++) { + i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt + i] = pool_buf[i]; + dev_dbg(i2c_dev->dev, "rx %d buff[%x]\n",i2c_dev->master_xfer_cnt+i, i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt+i]); + } + + if ((i2c_dev->master_msgs->flags & I2C_M_RECV_LEN) && (i2c_dev->blk_r_flag == 0)) { + i2c_dev->master_msgs->len += pool_buf[0]; + i2c_dev->blk_r_flag = 1; + dev_dbg(i2c_dev->dev, "I2C_M_RECV_LEN %d \n", i2c_dev->master_msgs->len -1); + } + } else { + //DMA Mode + xfer_len = ast_i2c_read(i2c_dev, I2C_DMA_LEN_REG); + + if(xfer_len == 0) + xfer_len = i2c_dev->master_xfer_len; + else + xfer_len = i2c_dev->master_xfer_len - xfer_len - 1; + + for(i = 0; i < xfer_len; i++) { + i2c_dev->master_msgs->buf[i2c_dev->master_xfer_cnt + i] = i2c_dev->dma_buf[i]; + dev_dbg(i2c_dev->dev, "buf[%x] \n", i2c_dev->dma_buf[i]); + dev_dbg(i2c_dev->dev, "buf[%x] \n", i2c_dev->dma_buf[i+1]); + } + + if ((i2c_dev->master_msgs->flags & I2C_M_RECV_LEN) && (i2c_dev->blk_r_flag == 0)) { + i2c_dev->master_msgs->len += i2c_dev->dma_buf[0]; + i2c_dev->blk_r_flag = 1; + dev_dbg(i2c_dev->dev, "I2C_M_RECV_LEN %d \n", i2c_dev->master_msgs->len -1); + } + + } + + }else { + if(i2c_dev->master_xfer_mode == BYTE_XFER) { + xfer_len = 1; + } else if(i2c_dev->master_xfer_mode == BUFF_XFER) { + xfer_len = AST_I2CD_TX_DATA_BUF_GET(ast_i2c_read(i2c_dev, I2C_BUF_CTRL_REG)); + xfer_len++; + dev_dbg(i2c_dev->dev,"tx buff done len %d \n",xfer_len); + } else { + //DMA + xfer_len = ast_i2c_read(i2c_dev, I2C_DMA_LEN_REG); + if(xfer_len == 0) + xfer_len = i2c_dev->master_xfer_len; + else + xfer_len = i2c_dev->master_xfer_len - xfer_len - 1; + + dev_dbg(i2c_dev->dev,"tx dma done len %d \n",xfer_len); + } + } + +next_xfer: + + if(xfer_len !=i2c_dev->master_xfer_len) { + //TODO.. + printk(" ** xfer error \n"); + //should goto stop.... + i2c_dev->cmd_err = 1; + goto done_out; + } else + i2c_dev->master_xfer_cnt += i2c_dev->master_xfer_len; + + if(i2c_dev->master_xfer_cnt != i2c_dev->master_msgs->len) { + dev_dbg(i2c_dev->dev,"do next cnt \n"); + i2c_dev->do_master_xfer(i2c_dev); + } else { +#if 0 + int i; + printk(" ===== \n"); + for(i=0;i<i2c_dev->master_msgs->len;i++) + printk("rx buf i,[%x]\n",i,i2c_dev->master_msgs->buf[i]); + printk(" ===== \n"); +#endif + i2c_dev->cmd_err = 0; + +done_out: + dev_dbg(i2c_dev->dev,"msgs complete \n"); + complete(&i2c_dev->cmd_complete); + } +} + +static void ast_i2c_slave_addr_match(struct ast_i2c_dev *i2c_dev) +{ + u8 match; + + i2c_dev->slave_operation = 1; + i2c_dev->slave_xfer_cnt = 0; + match = ast_i2c_read(i2c_dev,I2C_BYTE_BUF_REG) >> 8; + i2c_dev->slave_msgs->buf[0] = match; + dev_dbg(i2c_dev->dev, "S Start Addr match [%x] \n",match); + + + if(match & 1) { + i2c_dev->slave_event = I2C_SLAVE_EVENT_START_READ; + } else { + i2c_dev->slave_event = I2C_SLAVE_EVENT_START_WRITE; + } + +#ifdef CONFIG_AST_I2C_SLAVE_RDWR + ast_i2c_slave_rdwr_xfer(i2c_dev); + i2c_dev->slave_msgs->buf[0] = match; + i2c_dev->slave_xfer_cnt = 1; +#else + i2c_dev->ast_i2c_data->slave_xfer(i2c_dev->slave_event, &(i2c_dev->slave_msgs)); + i2c_dev->slave_xfer_cnt = 0; +#endif + + //request + if(i2c_dev->ast_i2c_data->slave_dma == BYTE_MODE) + i2c_dev->do_slave_xfer = ast_i2c_do_byte_xfer; + else if (i2c_dev->ast_i2c_data->slave_dma == DMA_MODE) + i2c_dev->do_slave_xfer = ast_i2c_do_dma_xfer; + else { + if(i2c_dev->ast_i2c_data->request_pool_buff_page(&(i2c_dev->req_page)) == 0) + i2c_dev->do_slave_xfer = ast_i2c_do_pool_xfer; + else + i2c_dev->do_slave_xfer = ast_i2c_do_byte_xfer; + } + + i2c_dev->do_slave_xfer(i2c_dev); + +} + +static irqreturn_t i2c_ast_handler(int this_irq, void *dev_id) +{ + u32 sts; + + struct ast_i2c_dev *i2c_dev = dev_id; + u32 isr_sts = readl(i2c_dev->ast_i2c_data->reg_gr); + + if(!(isr_sts & (1<< i2c_dev->bus_id))) + return IRQ_NONE; + + i2c_dev->state = (ast_i2c_read(i2c_dev,I2C_CMD_REG) >> 19) & 0xf; + sts = ast_i2c_read(i2c_dev,I2C_INTR_STS_REG); +// printk("ISR : %x , sts [%x]\n",sts , xfer_sts); +// dev_dbg(i2c_dev->dev,"ISR : %x , sts [%x]\n",sts , xfer_sts); + +// dev_dbg(i2c_dev->dev,"sts machine %x, slave_op %d \n", xfer_sts,i2c_dev->slave_operation); + + if(AST_I2CD_INTR_STS_SMBUS_ALT & sts) { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_SMBUS_ALT= %x\n",sts); + //Disable ALT INT + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev, I2C_INTR_CTRL_REG) & + ~AST_I2CD_SMBUS_ALT_INTR_EN, + I2C_INTR_CTRL_REG); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_SMBUS_ALT, I2C_INTR_STS_REG); + ast_master_alert_recv(i2c_dev); + sts &= ~AST_I2CD_SMBUS_ALT_INTR_EN; + } + + switch(sts) { + case AST_I2CD_INTR_STS_TX_ACK: + if(i2c_dev->slave_operation == 1) { + i2c_dev->slave_event = I2C_SLAVE_EVENT_READ; + ast_i2c_slave_xfer_done(i2c_dev); + dev_dbg(i2c_dev->dev, "S clear isr: AST_I2CD_INTR_STS_TX_ACK = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_ACK, I2C_INTR_STS_REG); + } else { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_TX_ACK = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_ACK, I2C_INTR_STS_REG); + ast_i2c_master_xfer_done(i2c_dev); + } + break; + case AST_I2CD_INTR_STS_TX_ACK | AST_I2CD_INTR_STS_NORMAL_STOP: + if((i2c_dev->xfer_last == 1) && (i2c_dev->slave_operation == 0)) { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_TX_ACK | AST_I2CD_INTR_STS_NORMAL_STOP= %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_ACK | AST_I2CD_INTR_STS_NORMAL_STOP, I2C_INTR_STS_REG); + //take care + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_TX_ACK_INTR_EN, I2C_INTR_CTRL_REG); + ast_i2c_master_xfer_done(i2c_dev); + + } else { + printk("TODO ...\n"); + } + break; + + case AST_I2CD_INTR_STS_TX_NAK: + if(i2c_dev->slave_operation == 1) { + i2c_dev->slave_event = I2C_SLAVE_EVENT_NACK; + ast_i2c_slave_xfer_done(i2c_dev); + dev_dbg(i2c_dev->dev, "S clear isr: AST_I2CD_INTR_STS_TX_NAK = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_NAK, I2C_INTR_STS_REG); + + } else { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_TX_NAK = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_NAK, I2C_INTR_STS_REG); + if(i2c_dev->master_msgs->flags == I2C_M_IGNORE_NAK) { + dev_dbg(i2c_dev->dev, "I2C_M_IGNORE_NAK next send\n"); + i2c_dev->cmd_err = 0; + } else { + dev_dbg(i2c_dev->dev, "NAK error\n"); + i2c_dev->cmd_err = AST_I2CD_INTR_STS_TX_NAK; + } + complete(&i2c_dev->cmd_complete); + } + break; + + case AST_I2CD_INTR_STS_TX_NAK | AST_I2CD_INTR_STS_NORMAL_STOP: + if(i2c_dev->slave_operation == 1) { + printk("SLAVE TODO .... \n"); + + } else { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_TX_NAK| AST_I2CD_INTR_STS_NORMAL_STOP = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_NAK | AST_I2CD_INTR_STS_NORMAL_STOP, I2C_INTR_STS_REG); + dev_dbg(i2c_dev->dev, "M TX NAK | NORMAL STOP \n"); + i2c_dev->cmd_err = AST_I2CD_INTR_STS_TX_NAK | AST_I2CD_INTR_STS_NORMAL_STOP; + complete(&i2c_dev->cmd_complete); + } + break; + + //Issue : Workaround for I2C slave mode + case AST_I2CD_INTR_STS_TX_NAK | AST_I2CD_INTR_STS_SLAVE_MATCH: + if(i2c_dev->slave_operation == 1) { + i2c_dev->slave_event = I2C_SLAVE_EVENT_NACK; + ast_i2c_slave_xfer_done(i2c_dev); + ast_i2c_slave_addr_match(i2c_dev); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_TX_NAK | AST_I2CD_INTR_STS_SLAVE_MATCH , I2C_INTR_STS_REG); + } else { + printk("ERROR !!!!\n"); + } + break; + case AST_I2CD_INTR_STS_RX_DOWN | AST_I2CD_INTR_STS_SLAVE_MATCH: + ast_i2c_slave_addr_match(i2c_dev); + dev_dbg(i2c_dev->dev, "S clear isr: AST_I2CD_INTR_STS_RX_DOWN | AST_I2CD_INTR_STS_SLAVE_MATCH = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_RX_DOWN | AST_I2CD_INTR_STS_SLAVE_MATCH, I2C_INTR_STS_REG); + break; + + case AST_I2CD_INTR_STS_RX_DOWN: + if(i2c_dev->slave_operation == 1) { + i2c_dev->slave_event = I2C_SLAVE_EVENT_WRITE; + ast_i2c_slave_xfer_done(i2c_dev); + dev_dbg(i2c_dev->dev, "S clear isr: AST_I2CD_INTR_STS_RX_DOWN = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_RX_DOWN, I2C_INTR_STS_REG); + } else { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_RX_DOWN = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_RX_DOWN, I2C_INTR_STS_REG); + ast_i2c_master_xfer_done(i2c_dev); + + } + break; + + case AST_I2CD_INTR_STS_NORMAL_STOP: + if(i2c_dev->slave_operation == 1) { + i2c_dev->slave_event = I2C_SLAVE_EVENT_STOP; + ast_i2c_slave_xfer_done(i2c_dev); + dev_dbg(i2c_dev->dev, "S clear isr: AST_I2CD_INTR_STS_NORMAL_STOP = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_NORMAL_STOP, I2C_INTR_STS_REG); + dev_dbg(i2c_dev->dev, "state [%x] \n",i2c_dev->state); + } else { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_NORMAL_STOP = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_NORMAL_STOP, I2C_INTR_STS_REG); + i2c_dev->cmd_err = 0; + complete(&i2c_dev->cmd_complete); + } + break; + case (AST_I2CD_INTR_STS_RX_DOWN | AST_I2CD_INTR_STS_NORMAL_STOP): + if((i2c_dev->xfer_last == 1) && (i2c_dev->slave_operation == 0)) { + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_RX_DOWN | AST_I2CD_INTR_STS_NORMAL_STOP = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_RX_DOWN | AST_I2CD_INTR_STS_NORMAL_STOP, I2C_INTR_STS_REG); + //take care + ast_i2c_write(i2c_dev, ast_i2c_read(i2c_dev,I2C_INTR_CTRL_REG) | + AST_I2CD_RX_DOWN_INTR_EN, I2C_INTR_CTRL_REG); + ast_i2c_master_xfer_done(i2c_dev); + } else { + printk("TODO .. .. ..\n"); + } + break; + case AST_I2CD_INTR_STS_ARBIT_LOSS: + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_ARBIT_LOSS = %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_ARBIT_LOSS, I2C_INTR_STS_REG); + i2c_dev->cmd_err = AST_I2CD_INTR_STS_ARBIT_LOSS; + complete(&i2c_dev->cmd_complete); + break; + case AST_I2CD_INTR_STS_ABNORMAL: + i2c_dev->cmd_err = AST_I2CD_INTR_STS_ABNORMAL; + complete(&i2c_dev->cmd_complete); + break; + case AST_I2CD_INTR_STS_SCL_TO: + i2c_dev->cmd_err = AST_I2CD_INTR_STS_SCL_TO; + complete(&i2c_dev->cmd_complete); + + break; + case AST_I2CD_INTR_STS_GCALL_ADDR: + i2c_dev->cmd_err = AST_I2CD_INTR_STS_GCALL_ADDR; + complete(&i2c_dev->cmd_complete); + + break; + case AST_I2CD_INTR_STS_SMBUS_DEF_ADDR: + break; + case AST_I2CD_INTR_STS_SMBUS_DEV_ALT: + + break; + + case AST_I2CD_INTR_STS_SMBUS_ARP_ADDR: + break; + case AST_I2CD_INTR_STS_SDA_DL_TO: + break; + case AST_I2CD_INTR_STS_BUS_RECOVER: + dev_dbg(i2c_dev->dev, "M clear isr: AST_I2CD_INTR_STS_BUS_RECOVER= %x\n",sts); + ast_i2c_write(i2c_dev, AST_I2CD_INTR_STS_BUS_RECOVER, I2C_INTR_STS_REG); + i2c_dev->cmd_err = 0; + complete(&i2c_dev->cmd_complete); + break; + default: + if(sts) + printk("GR %x : No one care : %x, bus_id %d\n",i2c_dev->ast_i2c_data->reg_gr, sts, i2c_dev->bus_id); + return IRQ_NONE; + } + + return IRQ_HANDLED; + +} + +static int ast_i2c_do_msgs_xfer(struct ast_i2c_dev *i2c_dev, struct i2c_msg *msgs, int num) +{ + int i; + int ret = 1; + + //request + if(i2c_dev->ast_i2c_data->master_dma == BYTE_MODE) + i2c_dev->do_master_xfer = ast_i2c_do_byte_xfer; + else if (i2c_dev->ast_i2c_data->master_dma == DMA_MODE) + i2c_dev->do_master_xfer = ast_i2c_do_dma_xfer; + else { + if(i2c_dev->ast_i2c_data->request_pool_buff_page(&(i2c_dev->req_page)) == 0) + i2c_dev->do_master_xfer = ast_i2c_do_pool_xfer; + else + i2c_dev->do_master_xfer = ast_i2c_do_byte_xfer; + } + +// printk("start xfer ret = %d \n",ret); + + for (i=0; i < num; i++) { + i2c_dev->blk_r_flag = 0; + i2c_dev->master_msgs = &msgs[i]; + if(num == i+1) + i2c_dev->xfer_last = 1; + else + i2c_dev->xfer_last = 0; + + i2c_dev->blk_r_flag = 0; + init_completion(&i2c_dev->cmd_complete); + + if(i2c_dev->master_msgs->flags & I2C_M_NOSTART) + i2c_dev->master_xfer_cnt = 0; + else + i2c_dev->master_xfer_cnt = -1; + + i2c_dev->do_master_xfer(i2c_dev); + + ret = wait_for_completion_interruptible_timeout(&i2c_dev->cmd_complete, + i2c_dev->adap.timeout*HZ); + + if (ret == 0) { + dev_dbg(i2c_dev->dev, "controller timed out\n"); + i2c_dev->state = (ast_i2c_read(i2c_dev,I2C_CMD_REG) >> 19) & 0xf; +// printk("sts [%x], isr sts [%x] \n",i2c_dev->state, ast_i2c_read(i2c_dev,I2C_INTR_STS_REG)); + ret = -ETIMEDOUT; + goto stop; + } + + if(i2c_dev->cmd_err != 0) { + ret = -EAGAIN; + goto stop; + } + + } + + if(i2c_dev->cmd_err == 0) { + ret = num; + goto out; + + } +stop: + init_completion(&i2c_dev->cmd_complete); + ast_i2c_write(i2c_dev, AST_I2CD_M_STOP_CMD, I2C_CMD_REG); + wait_for_completion_interruptible_timeout(&i2c_dev->cmd_complete, + i2c_dev->adap.timeout*HZ); + +out: + //Free .. + if(i2c_dev->master_xfer_mode == BUFF_XFER) { + i2c_dev->ast_i2c_data->free_pool_buff_page(i2c_dev->req_page); + + } + dev_dbg(i2c_dev->dev, "end xfer ret = %d, xfer mode[%d]\n",ret, i2c_dev->master_xfer_mode); + return ret; + +} + +static int ast_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct ast_i2c_dev *i2c_dev = adap->algo_data; + int ret, i; + int sts; + + sts = ast_i2c_read(i2c_dev,I2C_CMD_REG); + dev_dbg(i2c_dev->dev, "state[%x],SCL[%d],SDA[%d],BUS[%d]\n", (sts >> 19) & 0xf, (sts >> 18) & 0x1,(sts >> 17) & 0x1,(sts >> 16) & 1); + /* + * Wait for the bus to become free. + */ + + ret = ast_i2c_wait_bus_not_busy(i2c_dev); + if (ret) { + dev_err(&i2c_dev->adap.dev, "i2c_ast: timeout waiting for bus free\n"); + goto out; + } + + for (i = adap->retries; i >= 0; i--) { + + ret = ast_i2c_do_msgs_xfer(i2c_dev, msgs, num); + if (ret != -EAGAIN) + goto out; + dev_dbg(&adap->dev, "Retrying transmission [%d]\n",i); + udelay(100); + } + + ret = -EREMOTEIO; +out: + + return ret; +} + +static u32 ast_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; +} + +static const struct i2c_algorithm i2c_ast_algorithm = { + .master_xfer = ast_i2c_xfer, +#ifdef CONFIG_AST_I2C_SLAVE_RDWR + .slave_xfer = ast_i2c_slave_xfer, +#endif + .functionality = ast_i2c_functionality, +}; + +static int ast_i2c_probe(struct platform_device *pdev) +{ + struct ast_i2c_dev *i2c_dev; + struct resource *res; + int ret; + + dev_dbg(&pdev->dev, "ast_i2c_probe \n"); + + i2c_dev = kzalloc(sizeof(struct ast_i2c_dev), GFP_KERNEL); + if (!i2c_dev) { + ret = -ENOMEM; + goto err_no_mem; + } + + i2c_dev->ast_i2c_data = pdev->dev.platform_data; + if(i2c_dev->ast_i2c_data->master_dma == BUFF_MODE) { + dev_dbg(&pdev->dev, "use buffer pool mode 256\n"); + + } else if ((i2c_dev->ast_i2c_data->master_dma == DMA_MODE) || (i2c_dev->ast_i2c_data->slave_dma == DMA_MODE)) { + dev_dbg(&pdev->dev, "use dma mode \n"); + if (!i2c_dev->dma_buf) { + i2c_dev->dma_buf = dma_alloc_coherent(NULL, AST_I2C_DMA_SIZE, &i2c_dev->dma_addr, GFP_KERNEL); + if (!i2c_dev->dma_buf) { + printk("unable to allocate tx Buffer memory\n"); + ret = -ENOMEM; + goto err_no_dma; + } + if(i2c_dev->dma_addr%4 !=0) { + printk("not 4 byte boundary \n"); + ret = -ENOMEM; + goto err_no_dma; + } +// printk("dma_buf = [0x%x] dma_addr = [0x%x], please check 4byte boundary \n",i2c_dev->dma_buf,i2c_dev->dma_addr); + memset (i2c_dev->dma_buf, 0, AST_I2C_DMA_SIZE); + } + + } else { + //master_mode 0: use byte mode + dev_dbg(&pdev->dev, "use default byte mode \n"); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (NULL == res) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); + ret = -ENOENT; + goto err_no_io_res; + } + if (!request_mem_region(res->start, resource_size(res), res->name)) { + dev_err(&pdev->dev, "cannot reserved region\n"); + ret = -ENXIO; + goto err_no_io_res; + } + + i2c_dev->reg_base = ioremap(res->start, resource_size(res)); + if (!i2c_dev->reg_base) { + ret = -EIO; + goto release_mem; + } + + i2c_dev->irq = platform_get_irq(pdev, 0); + if (i2c_dev->irq < 0) { + dev_err(&pdev->dev, "no irq specified\n"); + ret = -ENOENT; + goto ereqirq; + } + + i2c_dev->dev = &pdev->dev; + +#if defined (CONFIG_ARCH_AST1070) + if(i2c_dev->irq == IRQ_C0_I2C) { + i2c_dev->bus_id = pdev->id - NUM_BUS; + dev_dbg(&pdev->dev, "C0 :: pdev->id %d , i2c_dev->bus_id = %d, i2c_dev->irq =%d\n",pdev->id, i2c_dev->bus_id,i2c_dev->irq); +#if (CONFIG_AST1070_NR >= 2) + } else if(i2c_dev->irq == IRQ_C1_I2C) { + i2c_dev->bus_id = pdev->id - (NUM_BUS + 8); + dev_dbg(&pdev->dev, "C1 :: pdev->id %d , i2c_dev->bus_id = %d, i2c_dev->irq =%d\n",pdev->id, i2c_dev->bus_id,i2c_dev->irq); +#endif + } else { + i2c_dev->bus_id = pdev->id; + dev_dbg(&pdev->dev, "AST pdev->id %d , i2c_dev->bus_id = %d, i2c_dev->irq =%d\n",pdev->id, i2c_dev->bus_id,i2c_dev->irq); + } +#else + i2c_dev->bus_id = pdev->id; +#endif + + /* Initialize the I2C adapter */ + i2c_dev->adap.owner = THIS_MODULE; +//TODO + i2c_dev->adap.retries = 0; + +// i2c_dev->adap.retries = 3; + + i2c_dev->adap.timeout = 5; + + i2c_dev->master_xfer_mode = BYTE_XFER; + + /* + * If "pdev->id" is negative we consider it as zero. + * The reason to do so is to avoid sysfs names that only make + * sense when there are multiple adapters. + */ + i2c_dev->adap.nr = pdev->id != -1 ? pdev->id : 0; + snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name), "ast_i2c.%u", + i2c_dev->adap.nr); + + i2c_dev->slave_operation = 0; + i2c_dev->blk_r_flag = 0; + i2c_dev->adap.algo = &i2c_ast_algorithm; + + ret = request_irq(i2c_dev->irq, i2c_ast_handler, IRQF_SHARED, + i2c_dev->adap.name, i2c_dev); + if (ret) { + printk(KERN_INFO "I2C: Failed request irq %d\n", i2c_dev->irq); + goto ereqirq; + } + + ast_i2c_dev_init(i2c_dev); + +#ifdef CONFIG_AST_I2C_SLAVE_RDWR + ast_i2c_slave_buff_init(i2c_dev); +#endif + + i2c_dev->adap.algo_data = i2c_dev; + i2c_dev->adap.dev.parent = &pdev->dev; + + i2c_dev->adap.id = pdev->id; + + ret = i2c_add_numbered_adapter(&i2c_dev->adap); + if (ret < 0) { + printk(KERN_INFO "I2C: Failed to add bus\n"); + goto eadapt; + } + + platform_set_drvdata(pdev, i2c_dev); + + printk(KERN_INFO "I2C: %s: AST I2C adapter [%d khz]\n", + i2c_dev->adap.dev.bus_id,i2c_dev->ast_i2c_data->bus_clk/1000); + + return 0; + +eadapt: + free_irq(i2c_dev->irq, i2c_dev); +ereqirq: + iounmap(i2c_dev->reg_base); + +release_mem: + release_mem_region(res->start, resource_size(res)); +err_no_io_res: +err_no_dma: + kfree(i2c_dev); + +err_no_mem: + return ret; +} + +static int ast_i2c_remove(struct platform_device *pdev) +{ + struct ast_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + struct resource *res; + + platform_set_drvdata(pdev, NULL); + i2c_del_adapter(&i2c_dev->adap); + + free_irq(i2c_dev->irq, i2c_dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iounmap(i2c_dev->reg_base); + release_mem_region(res->start, res->end - res->start + 1); + + kfree(i2c_dev); + + return 0; +} + +#ifdef CONFIG_PM +static int ast_i2c_suspend(struct platform_device *pdev, pm_message_t state) +{ + //TODO +// struct ast_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + return 0; +} + +static int ast_i2c_resume(struct platform_device *pdev) +{ + //TODO +// struct ast_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + //Should reset i2c ??? + return 0; +} +#else +#define ast_i2c_suspend NULL +#define ast_i2c_resume NULL +#endif + +static struct platform_driver i2c_ast_driver = { + .probe = ast_i2c_probe, + .remove = __devexit_p(ast_i2c_remove), + .suspend = ast_i2c_suspend, + .resume = ast_i2c_resume, + .driver = { + .name = "ast-i2c", + .owner = THIS_MODULE, + }, +}; + +static int __init ast_i2c_init(void) +{ + return platform_driver_register(&i2c_ast_driver); +} + +static void __exit ast_i2c_exit(void) +{ + platform_driver_unregister(&i2c_ast_driver); +} +//TODO : check module init sequence +module_init(ast_i2c_init); +module_exit(ast_i2c_exit); + +MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>"); +MODULE_DESCRIPTION("ASPEED AST I2C Bus Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ast_i2c"); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index feb00df78baa..5ced92c864f5 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1063,6 +1063,32 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num) } EXPORT_SYMBOL(i2c_transfer); +#ifdef CONFIG_AST_I2C_SLAVE_RDWR +int i2c_slave_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs) +{ + unsigned long orig_jiffies; + int ret, try; + + if (adap->algo->slave_xfer) { +#ifdef DEBUG + dev_dbg(&adap->dev, "slave_xfer %c, addr=0x%02x, " + "len=%d\n", (msgs->flags & I2C_S_RD) + ? 'R' : 'W', msgs->addr, msgs->len); +#endif + i2c_lock_adapter(adap); + ret = adap->algo->slave_xfer(adap, msgs); + i2c_unlock_adapter(adap); + + return ret; + } else { + dev_dbg(&adap->dev, "I2C level transfers not supported\n"); + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(i2c_slave_transfer); + +#endif + /** * i2c_master_send - issue a single I2C message in master transmit mode * @client: Handle to slave device diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index c171988a9f51..7d1f7e91f58c 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -37,6 +37,10 @@ #include <linux/smp_lock.h> #include <asm/uaccess.h> +#ifdef CONFIG_AST_I2C_SLAVE_RDWR +#include <asm/arch/ast_i2c.h> +#endif + static struct i2c_driver i2cdev_driver; /* @@ -415,6 +419,11 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case I2C_RDWR: return i2cdev_ioctl_rdrw(client, arg); +#ifdef CONFIG_AST_I2C_SLAVE_RDWR + case I2C_SLAVE_RDWR: + return i2cdev_ioctl_slave_rdrw(client->adapter, (struct i2c_msg __user *)arg); +#endif + case I2C_SMBUS: return i2cdev_ioctl_smbus(client, arg); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index fdd7c760be8c..1928a42d9b55 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -113,20 +113,29 @@ static int mmc_decode_cid(struct mmc_card *card) static int mmc_decode_csd(struct mmc_card *card) { struct mmc_csd *csd = &card->csd; - unsigned int e, m, csd_struct; + unsigned int e, m; u32 *resp = card->raw_csd; /* * We only understand CSD structure v1.1 and v1.2. * v1.2 has extra information in bits 15, 11 and 10. + * We also support eMMC v4.4 & v4.41. */ +#if 0 csd_struct = UNSTUFF_BITS(resp, 126, 2); if (csd_struct != 1 && csd_struct != 2) { printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd_struct); return -EINVAL; } - +#else + csd->structure = UNSTUFF_BITS(resp, 126, 2); + if (csd->structure == 0) { + printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", + mmc_hostname(card->host), csd->structure); + return -EINVAL; + } +#endif csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); @@ -207,6 +216,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) goto out; } +#if 0 ext_csd_struct = ext_csd[EXT_CSD_REV]; if (ext_csd_struct > 2) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " @@ -215,7 +225,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) err = -EINVAL; goto out; } - + if (ext_csd_struct >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | @@ -224,7 +234,8 @@ static int mmc_read_ext_csd(struct mmc_card *card) ext_csd[EXT_CSD_SEC_CNT + 3] << 24; if (card->ext_csd.sectors) mmc_card_set_blockaddr(card); - } + } + switch (ext_csd[EXT_CSD_CARD_TYPE]) { case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: @@ -239,8 +250,86 @@ static int mmc_read_ext_csd(struct mmc_card *card) "support any high-speed modes.\n", mmc_hostname(card->host)); goto out; + } +#else + /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ + if (card->csd.structure == 3) { + int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE]; + if (ext_csd_struct > 2) { + printk(KERN_ERR "%s: unrecognised EXT_CSD structure " + "version %d\n", mmc_hostname(card->host), + ext_csd_struct); + err = -EINVAL; + goto out; + } } + card->ext_csd.rev = ext_csd[EXT_CSD_REV]; + if (card->ext_csd.rev > 6) { + printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", + mmc_hostname(card->host), card->ext_csd.rev); + err = -EINVAL; + goto out; + } + + if (card->ext_csd.rev >= 2) { + card->ext_csd.sectors = + ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | + ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | + ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | + ext_csd[EXT_CSD_SEC_CNT + 3] << 24; + + /* Cards with density > 2GiB are sector addressed */ + if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) + mmc_card_set_blockaddr(card); + } + + switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { + case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: + card->ext_csd.hs_max_dtr = 52000000; + break; + case EXT_CSD_CARD_TYPE_26: + card->ext_csd.hs_max_dtr = 26000000; + break; + default: + /* MMC v4 spec says this cannot happen */ + printk(KERN_WARNING "%s: card is mmc v4 but doesn't " + "support any high-speed modes.\n", + mmc_hostname(card->host)); + } + + if (card->ext_csd.rev >= 3) { + u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; + + /* Sleep / awake timeout in 100ns units */ + if (sa_shift > 0 && sa_shift <= 0x17) + card->ext_csd.sa_timeout = + 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; + card->ext_csd.erase_group_def = + ext_csd[EXT_CSD_ERASE_GROUP_DEF]; + card->ext_csd.hc_erase_timeout = 300 * + ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; + card->ext_csd.hc_erase_size = + ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; + } + + if (card->ext_csd.rev >= 4) { + card->ext_csd.sec_trim_mult = + ext_csd[EXT_CSD_SEC_TRIM_MULT]; + card->ext_csd.sec_erase_mult = + ext_csd[EXT_CSD_SEC_ERASE_MULT]; + card->ext_csd.sec_feature_support = + ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; + card->ext_csd.trim_timeout = 300 * + ext_csd[EXT_CSD_TRIM_MULT]; + } + + if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) + card->erased_byte = 0xFF; + else + card->erased_byte = 0x0; +#endif + out: kfree(ext_csd); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index dfa585f7feaf..ce66df50afb4 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -37,6 +37,17 @@ config MMC_SDHCI If unsure, say N. +config MMC_AST + tristate "ASPEED Secure Digital Host Controller Interface support" + depends on HAS_DMA + help + This selects the ASPEED Secure Digital Host Controller Interface. + + If you have a controller with this interface, say Y or M here. You + also need to enable an appropriate bus interface. + + If unsure, say N. + config MMC_SDHCI_PCI tristate "SDHCI support on PCI bus" depends on MMC_SDHCI && PCI diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index c794cc5ce442..5078ba2d32b9 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_IMX) += imxmmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o +obj-$(CONFIG_MMC_AST) += ast_sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o obj-$(CONFIG_MMC_WBSD) += wbsd.o diff --git a/drivers/mmc/host/ast_sdhci.c b/drivers/mmc/host/ast_sdhci.c new file mode 100644 index 000000000000..8b5d80d12867 --- /dev/null +++ b/drivers/mmc/host/ast_sdhci.c @@ -0,0 +1,1929 @@ +/*
+ * aspeed_sdhci.c - ASPEED Secure Digital Host Controller Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <mach/hardware.h>
+#include <mach/platform.h>
+
+#include <linux/leds.h>
+
+#include <linux/mmc/host.h>
+
+#include <plat/ast_sdhci.h>
+
+
+#define DRIVER_NAME "ast_sdhci"
+
+#define DBG(f, x...) \
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
+
+static unsigned int debug_quirks = 0;
+
+struct ast_sdhc_platform_data *ast_sdhc_info;
+
+static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
+static void sdhci_finish_data(struct sdhci_host *);
+
+static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
+static void sdhci_finish_command(struct sdhci_host *);
+
+static void sdhci_dumpregs(struct sdhci_host *host)
+{
+ printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
+
+ printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_DMA_ADDRESS),
+ readw(host->ioaddr + SDHCI_HOST_VERSION));
+ printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ readw(host->ioaddr + SDHCI_BLOCK_SIZE),
+ readw(host->ioaddr + SDHCI_BLOCK_COUNT));
+ printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_ARGUMENT),
+ readw(host->ioaddr + SDHCI_TRANSFER_MODE));
+ printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_PRESENT_STATE),
+ readb(host->ioaddr + SDHCI_HOST_CONTROL));
+ printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ readb(host->ioaddr + SDHCI_POWER_CONTROL),
+ readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
+ printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ readb(host->ioaddr + SDHCI_WAKE_UP_CONTROL),
+ readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
+ printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
+ readl(host->ioaddr + SDHCI_INT_STATUS));
+ printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_INT_ENABLE),
+ readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
+ printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ readw(host->ioaddr + SDHCI_ACMD12_ERR),
+ readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
+ printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_CAPABILITIES),
+ readl(host->ioaddr + SDHCI_MAX_CURRENT));
+
+ printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
+}
+
+/*****************************************************************************\
+ * *
+ * Low level functions *
+ * *
+\*****************************************************************************/
+
+static void sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ unsigned long timeout;
+
+ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
+ if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT))
+ return;
+ }
+
+ writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
+
+ if (mask & SDHCI_RESET_ALL)
+ host->clock = 0;
+
+ /* Wait max 100 ms */
+ timeout = 100;
+
+ /* hw clears the bit when it's done */
+ while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+ mmc_hostname(host->mmc), (int)mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+}
+
+static void sdhci_init(struct sdhci_host *host)
+{
+ u32 intmask;
+
+ sdhci_reset(host, SDHCI_RESET_ALL);
+
+ intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
+ SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+ SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
+ SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
+ SDHCI_INT_ADMA_ERROR;
+
+ writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
+ writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_activate_led(struct sdhci_host *host)
+{
+ u8 ctrl;
+
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ ctrl |= SDHCI_CTRL_LED;
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+}
+
+static void sdhci_deactivate_led(struct sdhci_host *host)
+{
+ u8 ctrl;
+
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_LED;
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+}
+
+#ifdef CONFIG_LEDS_CLASS
+static void sdhci_led_control(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct sdhci_host *host = container_of(led, struct sdhci_host, led);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (brightness == LED_OFF)
+ sdhci_deactivate_led(host);
+ else
+ sdhci_activate_led(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+#endif
+
+/*****************************************************************************\
+ * *
+ * Core functions *
+ * *
+\*****************************************************************************/
+
+static void sdhci_read_block_pio(struct sdhci_host *host)
+{
+ unsigned long flags;
+ size_t blksize, len, chunk;
+ u32 uninitialized_var(scratch);
+ u8 *buf;
+
+ DBG("PIO reading\n");
+
+ blksize = host->data->blksz;
+ chunk = 0;
+
+ local_irq_save(flags);
+
+ while (blksize) {
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
+
+ len = min(host->sg_miter.length, blksize);
+
+ blksize -= len;
+ host->sg_miter.consumed = len;
+
+ buf = host->sg_miter.addr;
+
+ while (len) {
+ if (chunk == 0) {
+ scratch = readl(host->ioaddr + SDHCI_BUFFER);
+ chunk = 4;
+ }
+
+ *buf = scratch & 0xFF;
+
+ buf++;
+ scratch >>= 8;
+ chunk--;
+ len--;
+ }
+ }
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
+}
+
+static void sdhci_write_block_pio(struct sdhci_host *host)
+{
+ unsigned long flags;
+ size_t blksize, len, chunk;
+ u32 scratch;
+ u8 *buf;
+
+ DBG("PIO writing\n");
+
+ blksize = host->data->blksz;
+ chunk = 0;
+ scratch = 0;
+
+ local_irq_save(flags);
+
+ while (blksize) {
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
+
+ len = min(host->sg_miter.length, blksize);
+
+ blksize -= len;
+ host->sg_miter.consumed = len;
+
+ buf = host->sg_miter.addr;
+
+ while (len) {
+ scratch |= (u32)*buf << (chunk * 8);
+
+ buf++;
+ chunk++;
+ len--;
+
+ if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
+ writel(scratch, host->ioaddr + SDHCI_BUFFER);
+ chunk = 0;
+ scratch = 0;
+ }
+ }
+ }
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
+}
+
+static void sdhci_transfer_pio(struct sdhci_host *host)
+{
+ u32 mask;
+
+ BUG_ON(!host->data);
+
+ if (host->blocks == 0)
+ return;
+
+ if (host->data->flags & MMC_DATA_READ)
+ mask = SDHCI_DATA_AVAILABLE;
+ else
+ mask = SDHCI_SPACE_AVAILABLE;
+
+ /*
+ * Some controllers (JMicron JMB38x) mess up the buffer bits
+ * for transfers < 4 bytes. As long as it is just one block,
+ * we can ignore the bits.
+ */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
+ (host->data->blocks == 1))
+ mask = ~0;
+
+ while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
+ if (host->data->flags & MMC_DATA_READ)
+ sdhci_read_block_pio(host);
+ else
+ sdhci_write_block_pio(host);
+
+ host->blocks--;
+ if (host->blocks == 0)
+ break;
+ }
+
+ DBG("PIO transfer complete.\n");
+}
+
+static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+{
+ kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
+
+static int sdhci_adma_table_pre(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ u8 *desc;
+ u8 *align;
+ dma_addr_t addr;
+ dma_addr_t align_addr;
+ int len, offset;
+
+ struct scatterlist *sg;
+ int i;
+ char *buffer;
+ unsigned long flags;
+
+ /*
+ * The spec does not specify endianness of descriptor table.
+ * We currently guess that it is LE.
+ */
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ /*
+ * The ADMA descriptor table is mapped further down as we
+ * need to fill it with data first.
+ */
+
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+ host->align_buffer, 128 * 4, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+ goto fail;
+ BUG_ON(host->align_addr & 0x3);
+
+ host->sg_count = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ if (host->sg_count == 0)
+ goto unmap_align;
+
+ desc = host->adma_desc;
+ align = host->align_buffer;
+
+ align_addr = host->align_addr;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ /*
+ * The SDHCI specification states that ADMA
+ * addresses must be 32-bit aligned. If they
+ * aren't, then we use a bounce buffer for
+ * the (up to three) bytes that screw up the
+ * alignment.
+ */
+ offset = (4 - (addr & 0x3)) & 0x3;
+ if (offset) {
+ if (data->flags & MMC_DATA_WRITE) {
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
+ memcpy(align, buffer, offset);
+ sdhci_kunmap_atomic(buffer, &flags);
+ }
+
+ desc[7] = (align_addr >> 24) & 0xff;
+ desc[6] = (align_addr >> 16) & 0xff;
+ desc[5] = (align_addr >> 8) & 0xff;
+ desc[4] = (align_addr >> 0) & 0xff;
+
+ BUG_ON(offset > 65536);
+
+ desc[3] = (offset >> 8) & 0xff;
+ desc[2] = (offset >> 0) & 0xff;
+
+ desc[1] = 0x00;
+ desc[0] = 0x21; /* tran, valid */
+
+ align += 4;
+ align_addr += 4;
+
+ desc += 8;
+
+ addr += offset;
+ len -= offset;
+ }
+
+ desc[7] = (addr >> 24) & 0xff;
+ desc[6] = (addr >> 16) & 0xff;
+ desc[5] = (addr >> 8) & 0xff;
+ desc[4] = (addr >> 0) & 0xff;
+
+ BUG_ON(len > 65536);
+
+ desc[3] = (len >> 8) & 0xff;
+ desc[2] = (len >> 0) & 0xff;
+
+ desc[1] = 0x00;
+ desc[0] = 0x21; /* tran, valid */
+
+ desc += 8;
+
+ /*
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+ WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
+ }
+
+ /*
+ * Add a terminating entry.
+ */
+ desc[7] = 0;
+ desc[6] = 0;
+ desc[5] = 0;
+ desc[4] = 0;
+
+ desc[3] = 0;
+ desc[2] = 0;
+
+ desc[1] = 0x00;
+ desc[0] = 0x03; /* nop, end, valid */
+
+ /*
+ * Resync align buffer as we might have changed it.
+ */
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_sync_single_for_device(mmc_dev(host->mmc),
+ host->align_addr, 128 * 4, direction);
+ }
+
+ host->adma_addr = dma_map_single(mmc_dev(host->mmc),
+ host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
+ goto unmap_entries;
+ BUG_ON(host->adma_addr & 0x3);
+
+ return 0;
+
+unmap_entries:
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+unmap_align:
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+fail:
+ return -EINVAL;
+}
+
+static void sdhci_adma_table_post(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ struct scatterlist *sg;
+ int i, size;
+ u8 *align;
+ char *buffer;
+ unsigned long flags;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
+ (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+
+ if (data->flags & MMC_DATA_READ) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+
+ align = host->align_buffer;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ if (sg_dma_address(sg) & 0x3) {
+ size = 4 - (sg_dma_address(sg) & 0x3);
+
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
+
+ align += 4;
+ }
+ }
+ }
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+}
+
+static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
+{
+ u8 count;
+ unsigned target_timeout, current_timeout;
+
+ /*
+ * If the host controller provides us with an incorrect timeout
+ * value, just skip the check and use 0xE. The hardware may take
+ * longer to time out, but that's much better than having a too-short
+ * timeout value.
+ */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
+ return 0xE;
+
+ /* timeout in us */
+ target_timeout = data->timeout_ns / 1000 +
+ data->timeout_clks / host->clock;
+
+ /*
+ * Figure out needed cycles.
+ * We do this in steps in order to fit inside a 32 bit int.
+ * The first step is the minimum timeout, which will have a
+ * minimum resolution of 6 bits:
+ * (1) 2^13*1000 > 2^22,
+ * (2) host->timeout_clk < 2^16
+ * =>
+ * (1) / (2) > 2^6
+ */
+ count = 0;
+ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ while (current_timeout < target_timeout) {
+ count++;
+ current_timeout <<= 1;
+ if (count >= 0xF)
+ break;
+ }
+
+ if (count >= 0xF) {
+ printk(KERN_WARNING "%s: Too large timeout requested!\n",
+ mmc_hostname(host->mmc));
+ count = 0xE;
+ }
+
+ return count;
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
+{
+ u8 count;
+ u8 ctrl;
+ int ret;
+
+ WARN_ON(host->data);
+
+ if (data == NULL)
+ return;
+
+ /* Sanity checks */
+ BUG_ON(data->blksz * data->blocks > 524288);
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > 65535);
+
+ host->data = data;
+ host->data_early = 0;
+
+ count = sdhci_calc_timeout(host, data);
+ writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
+
+ if (host->flags & SDHCI_USE_DMA)
+ host->flags |= SDHCI_REQ_USE_DMA;
+
+ /*
+ * FIXME: This doesn't account for merging when mapping the
+ * scatterlist.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ int broken, i;
+ struct scatterlist *sg;
+
+ broken = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
+ broken = 1;
+ } else {
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
+ broken = 1;
+ }
+
+ if (unlikely(broken)) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->length & 0x3) {
+ DBG("Reverting to PIO because of "
+ "transfer size (%d)\n",
+ sg->length);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * The assumption here being that alignment is the same after
+ * translation to device address space.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ int broken, i;
+ struct scatterlist *sg;
+
+ broken = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * As we use 3 byte chunks to work around
+ * alignment problems, we need to check this
+ * quirk.
+ */
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
+ broken = 1;
+ } else {
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
+ broken = 1;
+ }
+
+ if (unlikely(broken)) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 0x3) {
+ DBG("Reverting to PIO because of "
+ "bad alignment\n");
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
+ }
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (host->flags & SDHCI_USE_ADMA) {
+ ret = sdhci_adma_table_pre(host, data);
+ if (ret) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ } else {
+ writel(host->adma_addr,
+ host->ioaddr + SDHCI_ADMA_ADDRESS);
+ }
+ } else {
+ int sg_cnt;
+
+ sg_cnt = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (sg_cnt == 0) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ } else {
+ WARN_ON(sg_cnt != 1);
+ writel(sg_dma_address(data->sg),
+ host->ioaddr + SDHCI_DMA_ADDRESS);
+ }
+ }
+ }
+
+ /*
+ * Always adjust the DMA selection as some controllers
+ * (e.g. JMicron) can't do PIO properly when the selection
+ * is ADMA.
+ */
+ if (host->version >= SDHCI_SPEC_200) {
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
+ (host->flags & SDHCI_USE_ADMA))
+ ctrl |= SDHCI_CTRL_ADMA32;
+ else
+ ctrl |= SDHCI_CTRL_SDMA;
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+ }
+
+ if (!(host->flags & SDHCI_REQ_USE_DMA)) {
+ sg_miter_start(&host->sg_miter,
+ data->sg, data->sg_len, SG_MITER_ATOMIC);
+ host->blocks = data->blocks;
+ }
+
+ /* We do not handle DMA boundaries, so set it to max (512 KiB) */
+ writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
+ host->ioaddr + SDHCI_BLOCK_SIZE);
+ writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
+}
+
+static void sdhci_set_transfer_mode(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ u16 mode;
+
+ if (data == NULL)
+ return;
+
+ WARN_ON(!host->data);
+
+ mode = SDHCI_TRNS_BLK_CNT_EN;
+ if (data->blocks > 1)
+ mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
+// mode |= SDHCI_TRNS_MULTI;
+ if (data->flags & MMC_DATA_READ)
+ mode |= SDHCI_TRNS_READ;
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ mode |= SDHCI_TRNS_DMA;
+
+ writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
+}
+
+static void sdhci_finish_data(struct sdhci_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (host->flags & SDHCI_USE_ADMA)
+ sdhci_adma_table_post(host, data);
+ else {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+ }
+
+ /*
+ * The specification states that the block count register must
+ * be updated, but it does not specify at what point in the
+ * data flow. That makes the register entirely useless to read
+ * back so we have to assume that nothing made it to the card
+ * in the event of an error.
+ */
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+
+ if (data->stop) {
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (data->error) {
+ sdhci_reset(host, SDHCI_RESET_CMD);
+ sdhci_reset(host, SDHCI_RESET_DATA);
+ }
+
+ sdhci_send_command(host, data->stop);
+ } else
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ int flags;
+ u32 mask;
+ unsigned long timeout;
+
+ WARN_ON(host->cmd);
+
+ /* Wait max 10 ms */
+ timeout = 10;
+
+ mask = SDHCI_CMD_INHIBIT;
+ if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
+ mask |= SDHCI_DATA_INHIBIT;
+
+ /* We shouldn't wait for data inihibit for stop commands, even
+ though they might use busy signaling */
+ if (host->mrq->data && (cmd == host->mrq->data->stop))
+ mask &= ~SDHCI_DATA_INHIBIT;
+
+ while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Controller never released "
+ "inhibit bit(s).\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ cmd->error = -EIO;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ mod_timer(&host->timer, jiffies + 10 * HZ);
+
+ host->cmd = cmd;
+
+ sdhci_prepare_data(host, cmd->data);
+
+ writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
+
+ sdhci_set_transfer_mode(host, cmd->data);
+
+ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+ printk(KERN_ERR "%s: Unsupported response type!\n",
+ mmc_hostname(host->mmc));
+ cmd->error = -EINVAL;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ flags = SDHCI_CMD_RESP_NONE;
+ else if (cmd->flags & MMC_RSP_136)
+ flags = SDHCI_CMD_RESP_LONG;
+ else if (cmd->flags & MMC_RSP_BUSY)
+ flags = SDHCI_CMD_RESP_SHORT_BUSY;
+ else
+ flags = SDHCI_CMD_RESP_SHORT;
+
+ if (cmd->flags & MMC_RSP_CRC)
+ flags |= SDHCI_CMD_CRC;
+ if (cmd->flags & MMC_RSP_OPCODE)
+ flags |= SDHCI_CMD_INDEX;
+ if (cmd->data)
+ flags |= SDHCI_CMD_DATA;
+
+ writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
+ host->ioaddr + SDHCI_COMMAND);
+}
+
+static void sdhci_finish_command(struct sdhci_host *host)
+{
+ int i;
+
+ BUG_ON(host->cmd == NULL);
+
+ if (host->cmd->flags & MMC_RSP_PRESENT) {
+ if (host->cmd->flags & MMC_RSP_136) {
+ /* CRC is stripped so we need to do some shifting. */
+ for (i = 0;i < 4;i++) {
+ host->cmd->resp[i] = readl(host->ioaddr +
+ SDHCI_RESPONSE + (3-i)*4) << 8;
+ if (i != 3)
+ host->cmd->resp[i] |=
+ readb(host->ioaddr +
+ SDHCI_RESPONSE + (3-i)*4-1);
+ }
+ } else {
+ host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
+ }
+ }
+
+ host->cmd->error = 0;
+
+ if (host->data && host->data_early)
+ sdhci_finish_data(host);
+
+ if (!host->cmd->data)
+ tasklet_schedule(&host->finish_tasklet);
+
+ host->cmd = NULL;
+}
+
+static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int div;
+ u16 clk;
+ unsigned long timeout;
+
+ if (clock == host->clock)
+ return;
+
+ writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ goto out;
+
+ for (div = 1;div < 256;div *= 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
+ div >>= 1;
+
+ //Issue : For ast2300, ast2400 couldn't set div = 0 means /1 , so set source is ~50Mhz up
+
+ clk = div << SDHCI_DIVIDER_SHIFT;
+ clk |= SDHCI_CLOCK_INT_EN;
+ writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 10 ms */
+ timeout = 10;
+ while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Internal clock never "
+ "stabilised.\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+out:
+ host->clock = clock;
+}
+
+static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
+{
+ u8 pwr;
+
+ if (host->power == power)
+ return;
+
+ if (power == (unsigned short)-1) {
+ writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
+ goto out;
+ }
+
+ /*
+ * Spec says that we should clear the power reg before setting
+ * a new value. Some controllers don't seem to like this though.
+ */
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
+
+ pwr = SDHCI_POWER_ON;
+
+ switch (1 << power) {
+ case MMC_VDD_165_195:
+ pwr |= SDHCI_POWER_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ pwr |= SDHCI_POWER_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ pwr |= SDHCI_POWER_330;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * At least the Marvell CaFe chip gets confused if we set the voltage
+ * and set turn on power at the same time, so set the voltage first.
+ */
+ if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
+ writeb(pwr & ~SDHCI_POWER_ON,
+ host->ioaddr + SDHCI_POWER_CONTROL);
+
+ writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
+
+out:
+ host->power = power;
+}
+
+/*****************************************************************************\
+ * *
+ * MMC callbacks *
+ * *
+\*****************************************************************************/
+
+static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ WARN_ON(host->mrq != NULL);
+
+#ifndef CONFIG_LEDS_CLASS
+ sdhci_activate_led(host);
+#endif
+
+ host->mrq = mrq;
+
+ if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
+ || (host->flags & SDHCI_DEVICE_DEAD)) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ } else
+ sdhci_send_command(host, mrq->cmd);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+ u8 ctrl;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF) {
+ writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
+ sdhci_init(host);
+ }
+
+ sdhci_set_clock(host, ios->clock);
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ sdhci_set_power(host, -1);
+ else
+ sdhci_set_power(host, ios->vdd);
+
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+
+ if (ios->timing == MMC_TIMING_SD_HS)
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+
+ /*
+ * Some (ENE) controllers go apeshit on some ios operation,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+out:
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdhci_get_ro(struct mmc_host *mmc)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+ int present;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ present = 0;
+ else
+ present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return !(present & SDHCI_WRITE_PROTECT);
+}
+
+static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+ u32 ier;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
+
+ ier &= ~SDHCI_INT_CARD_INT;
+ if (enable)
+ ier |= SDHCI_INT_CARD_INT;
+
+ writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
+ writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
+
+out:
+ mmiowb();
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static const struct mmc_host_ops sdhci_ops = {
+ .request = sdhci_request,
+ .set_ios = sdhci_set_ios,
+ .get_ro = sdhci_get_ro,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+};
+
+/*****************************************************************************\
+ * *
+ * Tasklets *
+ * *
+\*****************************************************************************/
+
+static void sdhci_tasklet_card(unsigned long param)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = (struct sdhci_host*)param;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ printk(KERN_ERR "%s: Resetting controller.\n",
+ mmc_hostname(host->mmc));
+
+ sdhci_reset(host, SDHCI_RESET_CMD);
+ sdhci_reset(host, SDHCI_RESET_DATA);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+}
+
+static void sdhci_tasklet_finish(unsigned long param)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+ struct mmc_request *mrq;
+
+ host = (struct sdhci_host*)param;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ del_timer(&host->timer);
+
+ mrq = host->mrq;
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (!(host->flags & SDHCI_DEVICE_DEAD) &&
+ (mrq->cmd->error ||
+ (mrq->data && (mrq->data->error ||
+ (mrq->data->stop && mrq->data->stop->error))) ||
+ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
+
+ /* Some controllers need this kick or reset won't work here */
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
+ unsigned int clock;
+
+ /* This is to force an update */
+ clock = host->clock;
+ host->clock = 0;
+ sdhci_set_clock(host, clock);
+ }
+
+ /* Spec says we should do both at the same time, but Ricoh
+ controllers do not like that. */
+ sdhci_reset(host, SDHCI_RESET_CMD);
+ sdhci_reset(host, SDHCI_RESET_DATA);
+ }
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+#ifndef CONFIG_LEDS_CLASS
+ sdhci_deactivate_led(host);
+#endif
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void sdhci_timeout_timer(unsigned long data)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = (struct sdhci_host*)data;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Timeout waiting for hardware "
+ "interrupt.\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+
+ if (host->data) {
+ host->data->error = -ETIMEDOUT;
+ sdhci_finish_data(host);
+ } else {
+ if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->mrq->cmd->error = -ETIMEDOUT;
+
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
+ printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_dumpregs(host);
+ return;
+ }
+
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX))
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error) {
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ /*
+ * The host can send and interrupt when the busy state has
+ * ended, allowing us to wait without wasting CPU cycles.
+ * Unfortunately this is overloaded on the "data complete"
+ * interrupt, so we need to take some care when handling
+ * it.
+ *
+ * Note: The 1.0 specification is a bit ambiguous about this
+ * feature so there might be some problems with older
+ * controllers.
+ */
+ if (host->cmd->flags & MMC_RSP_BUSY) {
+ if (host->cmd->data)
+ DBG("Cannot wait for busy signal when also "
+ "doing a data transfer");
+ else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
+ return;
+
+ /* The controller does not support the end-of-busy IRQ,
+ * fall through and take the SDHCI_INT_RESPONSE */
+ }
+
+ if (intmask & SDHCI_INT_RESPONSE)
+ sdhci_finish_command(host);
+}
+
+static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->data) {
+ /*
+ * The "data complete" interrupt is also used to
+ * indicate that a busy state has ended. See comment
+ * above in sdhci_cmd_irq().
+ */
+ if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & SDHCI_INT_DATA_END) {
+ sdhci_finish_command(host);
+ return;
+ }
+ }
+
+ printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
+ "though no data operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_dumpregs(host);
+
+ return;
+ }
+
+ if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+ host->data->error = -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR)
+ host->data->error = -EIO;
+
+ if (host->data->error)
+ sdhci_finish_data(host);
+ else {
+ if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
+ sdhci_transfer_pio(host);
+
+ /*
+ * We currently don't do anything fancy with DMA
+ * boundaries, but as we can't disable the feature
+ * we need to at least restart the transfer.
+ */
+ if (intmask & SDHCI_INT_DMA_END)
+ writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
+ host->ioaddr + SDHCI_DMA_ADDRESS);
+
+ if (intmask & SDHCI_INT_DATA_END) {
+ if (host->cmd) {
+ /*
+ * Data managed to finish before the
+ * command completed. Make sure we do
+ * things in the proper order.
+ */
+ host->data_early = 1;
+ } else {
+ sdhci_finish_data(host);
+ }
+ }
+ }
+}
+
+static irqreturn_t sdhci_irq(int irq, void *dev_id)
+{
+ irqreturn_t result;
+ struct sdhci_host* host = dev_id;
+ u32 intmask;
+ int cardint = 0;
+
+ spin_lock(&host->lock);
+
+ intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
+
+ if (!intmask || intmask == 0xffffffff) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ DBG("*** %s got interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), intmask);
+
+ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
+ host->ioaddr + SDHCI_INT_STATUS);
+ tasklet_schedule(&host->card_tasklet);
+ }
+
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ writel(intmask & SDHCI_INT_CMD_MASK,
+ host->ioaddr + SDHCI_INT_STATUS);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+ }
+
+ if (intmask & SDHCI_INT_DATA_MASK) {
+ writel(intmask & SDHCI_INT_DATA_MASK,
+ host->ioaddr + SDHCI_INT_STATUS);
+ sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+ }
+
+ intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
+
+ intmask &= ~SDHCI_INT_ERROR;
+
+ if (intmask & SDHCI_INT_BUS_POWER) {
+ printk(KERN_ERR "%s: Card is consuming too much power!\n",
+ mmc_hostname(host->mmc));
+ writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
+ }
+
+ intmask &= ~SDHCI_INT_BUS_POWER;
+
+ if (intmask & SDHCI_INT_CARD_INT)
+ cardint = 1;
+
+ intmask &= ~SDHCI_INT_CARD_INT;
+
+ if (intmask) {
+ printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
+ mmc_hostname(host->mmc), intmask);
+ sdhci_dumpregs(host);
+
+ writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
+ }
+
+ result = IRQ_HANDLED;
+
+ mmiowb();
+out:
+ spin_unlock(&host->lock);
+
+ /*
+ * We have to delay this as it calls back into the driver.
+ */
+ if (cardint)
+ mmc_signal_sdio_irq(host->mmc);
+
+ return result;
+}
+
+/*****************************************************************************\
+ * *
+ * Suspend/resume *
+ * *
+\*****************************************************************************/
+
+#ifdef CONFIG_PM
+
+int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+{
+ int ret;
+
+ ret = mmc_suspend_host(host->mmc, state);
+ if (ret)
+ return ret;
+
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_suspend_host);
+
+int sdhci_resume_host(struct sdhci_host *host)
+{
+ int ret;
+
+ if (host->flags & SDHCI_USE_DMA) {
+/*
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+*/
+ }
+
+ ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+
+ sdhci_init(host);
+ mmiowb();
+
+ ret = mmc_resume_host(host->mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_resume_host);
+
+#endif /* CONFIG_PM */
+
+/*****************************************************************************\
+ * *
+ * Device allocation/registration *
+ * *
+\*****************************************************************************/
+
+struct sdhci_host *sdhci_alloc_host(struct device *dev,
+ size_t priv_size)
+{
+ struct mmc_host *mmc;
+ struct sdhci_host *host;
+
+ WARN_ON(dev == NULL);
+
+ mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
+ if (!mmc)
+ return ERR_PTR(-ENOMEM);
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ return host;
+}
+
+//EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+
+int sdhci_add_host(struct sdhci_host *host)
+{
+ struct mmc_host *mmc;
+ unsigned int caps, temp;
+ int ret;
+
+ WARN_ON(host == NULL);
+ if (host == NULL)
+ return -EINVAL;
+#if 0
+ //TODO
+//Both ports's capabilities are 0, software needs to reset SDIO
+#define SDIO000 0x1e740000
+#define SDIO004 0x1e740004
+
+#define SDIO_ALL_SOFTWARE_RESET 0x01
+
+ if ((*(unsigned int*)(IO_ADDRESS(0x1E740140)) == 0) && (*(unsigned int*)(IO_ADDRESS(0x1E740240)) == 0)) {
+ temp = *(unsigned int*)(IO_ADDRESS(SDIO000));
+ *(unsigned int*)(IO_ADDRESS(SDIO000)) = temp | SDIO_ALL_SOFTWARE_RESET;
+ barrier();
+ do {
+ temp = (*(unsigned int*)(IO_ADDRESS(SDIO000)) & SDIO_ALL_SOFTWARE_RESET);
+ } while (temp == SDIO_ALL_SOFTWARE_RESET);
+ }
+ //Card detect debounce timing
+ *(unsigned int*)(IO_ADDRESS(SDIO004)) = 0x1000;
+#endif
+ ///////////////////////////////////////////////////////////////////
+
+ mmc = host->mmc;
+
+ if (debug_quirks)
+ host->quirks = debug_quirks;
+
+ sdhci_reset(host, SDHCI_RESET_ALL);
+
+ host->version = readw(host->ioaddr + SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+ >> SDHCI_SPEC_VER_SHIFT;
+ if (host->version > SDHCI_SPEC_200) {
+ printk(KERN_ERR "%s: Unknown controller version (%d). "
+ "You may experience problems.\n", mmc_hostname(mmc),
+ host->version);
+ }
+
+ caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
+
+ //Ryan Add for timeout
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+// host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+ if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
+ host->flags |= SDHCI_USE_DMA;
+ else if (!(caps & SDHCI_CAN_DO_DMA))
+ DBG("Controller doesn't have DMA capability\n");
+ else
+ host->flags |= SDHCI_USE_DMA;
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
+ (host->flags & SDHCI_USE_DMA)) {
+ DBG("Disabling DMA as it is marked broken\n");
+ host->flags &= ~SDHCI_USE_DMA;
+ }
+
+ if (host->flags & SDHCI_USE_DMA) {
+ if ((host->version >= SDHCI_SPEC_200) &&
+ (caps & SDHCI_CAN_DO_ADMA2))
+ host->flags |= SDHCI_USE_ADMA;
+ }
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
+ (host->flags & SDHCI_USE_ADMA)) {
+ DBG("Disabling ADMA as it is marked broken\n");
+ host->flags &= ~SDHCI_USE_ADMA;
+ }
+
+ if (host->flags & SDHCI_USE_DMA) {
+/*
+ if (host->ops->enable_dma) {
+ if (host->ops->enable_dma(host)) {
+ printk(KERN_WARNING "%s: No suitable DMA "
+ "available. Falling back to PIO.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
+ }
+ }
+*/
+ }
+
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * We need to allocate descriptors for all sg entries
+ * (128) and potentially one alignment transfer for
+ * each of those entries.
+ */
+ host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
+ host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
+ if (!host->adma_desc || !host->align_buffer) {
+ kfree(host->adma_desc);
+ kfree(host->align_buffer);
+ printk(KERN_WARNING "%s: Unable to allocate ADMA "
+ "buffers. Falling back to standard DMA.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+ }
+ }
+
+ /*
+ * If we use DMA, then it's up to the caller to set the DMA
+ * mask, but PIO does not need the hw shim so we set a new
+ * mask here in that case.
+ */
+ if (!(host->flags & SDHCI_USE_DMA)) {
+ host->dma_mask = DMA_BIT_MASK(64);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ }
+
+// host->max_clk =
+// (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
+
+ host->max_clk = ast_sdhc_info->sd_clock_src_get()/1000000;
+// printk("host->max_clk = %d Mhz\n",host->max_clk);
+
+ if (host->max_clk == 0) {
+ printk(KERN_ERR "%s: Hardware doesn't specify base clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ host->max_clk *= 1000000;
+
+ //Ryan modify for calc timeout issue
+ host->timeout_clk = ast_sdhc_info->sd_clock_src_get()/1000000;
+// (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
+ if (host->timeout_clk == 0) {
+ printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ if (caps & SDHCI_TIMEOUT_CLK_UNIT)
+ host->timeout_clk *= 1000;
+
+ /*
+ * Set host parameters.
+ */
+ mmc->ops = &sdhci_ops;
+ mmc->f_min = host->max_clk / 256;
+ mmc->f_max = host->max_clk;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+
+ if ((caps & SDHCI_CAN_DO_HISPD) ||
+ (host->quirks & SDHCI_QUIRK_FORCE_HIGHSPEED))
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+
+ mmc->ocr_avail = 0;
+ if (caps & SDHCI_CAN_VDD_330)
+ mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+ if (caps & SDHCI_CAN_VDD_300)
+ mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+ if (caps & SDHCI_CAN_VDD_180)
+ mmc->ocr_avail |= MMC_VDD_165_195;
+
+ if (mmc->ocr_avail == 0) {
+ printk(KERN_ERR "%s: Hardware doesn't report any "
+ "support voltages.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+
+ spin_lock_init(&host->lock);
+
+ /*
+ * Maximum number of segments. Depends on if the hardware
+ * can do scatter/gather or not.
+ */
+ if (host->flags & SDHCI_USE_ADMA)
+ mmc->max_hw_segs = 128;
+ else if (host->flags & SDHCI_USE_DMA)
+ mmc->max_hw_segs = 1;
+ else /* PIO */
+ mmc->max_hw_segs = 128;
+ mmc->max_phys_segs = 128;
+
+ /*
+ * Maximum number of sectors in one transfer. Limited by DMA boundary
+ * size (512KiB).
+ */
+ mmc->max_req_size = 524288;
+
+ /*
+ * Maximum segment size. Could be one segment with the maximum number
+ * of bytes. When doing hardware scatter/gather, each entry cannot
+ * be larger than 64 KiB though.
+ */
+ if (host->flags & SDHCI_USE_ADMA)
+ mmc->max_seg_size = 65536;
+ else
+ mmc->max_seg_size = mmc->max_req_size;
+
+ /*
+ * Maximum block size. This varies from controller to controller and
+ * is specified in the capabilities register.
+ */
+ mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
+ if (mmc->max_blk_size >= 3) {
+ printk(KERN_WARNING "%s: Invalid maximum block size, "
+ "assuming 512 bytes\n", mmc_hostname(mmc));
+ mmc->max_blk_size = 512;
+ } else
+ mmc->max_blk_size = 512 << mmc->max_blk_size;
+
+ /*
+ * Maximum block count.
+ */
+ mmc->max_blk_count = 65535;
+
+ /*
+ * Init tasklets.
+ */
+ tasklet_init(&host->card_tasklet,
+ sdhci_tasklet_card, (unsigned long)host);
+ tasklet_init(&host->finish_tasklet,
+ sdhci_tasklet_finish, (unsigned long)host);
+
+ setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
+
+ ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+ mmc_hostname(mmc), host);
+ if (ret)
+ goto untasklet;
+
+ sdhci_init(host);
+
+#ifdef CONFIG_MMC_DEBUG
+ sdhci_dumpregs(host);
+#endif
+
+#ifdef CONFIG_LEDS_CLASS
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = sdhci_led_control;
+
+ ret = led_classdev_register(mmc_dev(mmc), &host->led);
+ if (ret)
+ goto reset;
+#endif
+
+ mmiowb();
+
+ mmc_add_host(mmc);
+
+ printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
+ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ (host->flags & SDHCI_USE_ADMA)?"A":"",
+ (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
+
+ return 0;
+
+#ifdef CONFIG_LEDS_CLASS
+reset:
+ sdhci_reset(host, SDHCI_RESET_ALL);
+ free_irq(host->irq, host);
+#endif
+untasklet:
+ tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ return ret;
+}
+
+//EXPORT_SYMBOL_GPL(sdhci_add_host);
+
+static int sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct resource *res;
+ int ret;
+ ast_sdhc_info = pdev->dev.platform_data;
+
+ host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_host));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (NULL == res) {
+ dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ return ret;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ dev_err(&pdev->dev, "cannot reserved region\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ host->ioaddr = ioremap(res->start, resource_size(res));
+ if (!host->ioaddr) {
+ ret = -EIO;
+ return ret;
+ }
+
+ host->hw_name = res->name;
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ dev_err(&pdev->dev, "no irq specified\n");
+ ret = -ENOENT;
+ return ret;
+ }
+
+ ret = sdhci_add_host(host);
+
+ return ret;
+}
+
+void sdhci_remove_host(struct sdhci_host *host, int dead)
+{
+ unsigned long flags;
+
+ if (dead) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->flags |= SDHCI_DEVICE_DEAD;
+
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Controller removed during "
+ " transfer!\n", mmc_hostname(host->mmc));
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ mmc_remove_host(host->mmc);
+
+#ifdef CONFIG_LEDS_CLASS
+ led_classdev_unregister(&host->led);
+#endif
+
+ if (!dead)
+ sdhci_reset(host, SDHCI_RESET_ALL);
+
+ free_irq(host->irq, host);
+
+ del_timer_sync(&host->timer);
+
+ tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ kfree(host->adma_desc);
+ kfree(host->align_buffer);
+
+ host->adma_desc = NULL;
+ host->align_buffer = NULL;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_remove_host);
+
+void sdhci_free_host(struct sdhci_host *host)
+{
+ mmc_free_host(host->mmc);
+}
+
+EXPORT_SYMBOL_GPL(sdhci_free_host);
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static struct platform_driver ast_sdhci_driver = {
+ .driver.name = "ast_sdhci",
+ .driver.owner = THIS_MODULE,
+ .probe = sdhci_probe,
+ .remove = __exit_p(sdhci_remove_host),
+#ifdef CONFIG_PM
+ .resume = sdhci_resume_host,
+ .suspend = sdhci_suspend_host,
+#endif
+};
+
+static int __init ast_sdhci_drv_init(void)
+{
+ return platform_driver_register(&ast_sdhci_driver);
+}
+
+static void __exit ast_sdhci_drv_exit(void)
+{
+ platform_driver_unregister(&ast_sdhci_driver);
+}
+
+module_init(ast_sdhci_drv_init);
+module_exit(ast_sdhci_drv_exit);
+
+module_param(debug_quirks, uint, 0444);
+
+MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx> & River Huang");
+MODULE_DESCRIPTION("ASPEED Secure Digital Host Controller Interface core driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 6659b2275c0c..45cb05ad8f3d 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -503,6 +503,15 @@ static struct flash_info __devinitdata m25p_data [] = { { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, }, { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, + /* Macronix */ + { "mx25l4005a", 0xc22013, 0, 64 * 1024, 8, SECT_4K }, + { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, 0 }, + { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, 0 }, + { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, 0 }, + + { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, 0 }, + { "mx25l25635e", 0xc22019, 0, 64 * 1024, 512, 0 }, + /* Spansion -- single (large) sector size only, at least * for the chips listed here (without boot sectors). */ @@ -511,7 +520,7 @@ static struct flash_info __devinitdata m25p_data [] = { { "s25sl016a", 0x010214, 0, 64 * 1024, 32, }, { "s25sl032a", 0x010215, 0, 64 * 1024, 64, }, { "s25sl064a", 0x010216, 0, 64 * 1024, 128, }, - { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, + { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, }, /* SST -- large erase sizes are "overlays", "sectors" are 4K */ diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 5ea169362164..bafa60cb0d96 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -351,6 +351,10 @@ config MTD_ARM_INTEGRATOR tristate "CFI Flash device mapped on ARM Integrator/P720T" depends on ARM && MTD_CFI +config MTD_AST + tristate "CFI Flash device mapped on ASPEED" + depends on ARCH_ASPEED && MTD_CFI + config MTD_CDB89712 tristate "Cirrus CDB89712 evaluation board mappings" depends on MTD_CFI && ARCH_CDB89712 diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 6d9ba35caf11..3d8b1f2024e8 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -9,6 +9,7 @@ endif # Chip mappings obj-$(CONFIG_MTD_CDB89712) += cdb89712.o obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o +obj-$(CONFIG_MTD_AST) += ast-nor.o obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o obj-$(CONFIG_MTD_DC21285) += dc21285.o obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o diff --git a/drivers/mtd/maps/ast-nor.c b/drivers/mtd/maps/ast-nor.c new file mode 100644 index 000000000000..7cc474156642 --- /dev/null +++ b/drivers/mtd/maps/ast-nor.c @@ -0,0 +1,221 @@ +/*====================================================================== + + drivers/mtd/maps/ast-nor.c: ASPEED flash map driver + + Copyright (C) 2012-2020 ASPEED Technology Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + This is access code for flashes using ARM's flash partitioning + standards. + +* History: +* 2012.10.11: Initial version [Ryan Chen] + +======================================================================*/ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/init.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/map.h> +#include <linux/mtd/partitions.h> + +#include <asm/mach/flash.h> +#include <mach/hardware.h> +#include <asm/io.h> +#include <asm/system.h> +#include <mach/platform.h> + +#define PFX "ast-flash: " + +struct ast_flash_info { + struct mtd_info *mtd; + struct map_info map; + struct resource *res; + struct mtd_partition *partitions; +// struct flash_platform_data *plat; +}; + +static int ast_flash_probe(struct platform_device *pdev) +{ + struct ast_flash_info *info; + struct flash_platform_data *pdata = pdev->dev.platform_data; + struct resource *res = NULL; + + int ret; + static int no_partitions; + info = kzalloc(sizeof(struct ast_flash_info), GFP_KERNEL); + if (info == NULL) { + printk(KERN_ERR PFX "no memory for flash info\n"); + return -ENOMEM; + } + + /* request register map resource & check */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "register resources unusable\n"); + ret = -ENXIO; + goto free_dev; + } + + if (!request_mem_region(res->start, res->end - res->start + 1, pdev->name)) { + ret = -EBUSY; + goto free_something_1; + } + + info->map.virt = ioremap(res->start, (res->end - res->start) + 1); + if (!info->map.virt) { + dev_err(&pdev->dev, "cannot map ast_flash_info registers\n"); + ret = -ENOMEM; + goto release_mem; + } + info->map.phys = res->start; + info->map.size = res->end - res->start + 1; + info->map.name = (char *) pdata->map_name; +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 + info->map.bankwidth = 1; +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 + info->map.bankwidth = 2; +#endif + platform_set_drvdata(pdev, info); + + printk("%s %x, %x, %s \n",__FUNCTION__,res->start,res->end,pdev->dev.bus_id); + + printk("%s: area %08lx, size %lx\n", __FUNCTION__, info->map.phys, info->map.size); + + printk("%s: virt at %08x, res->start %x \n", __FUNCTION__, (int)info->map.virt,res->start); + + info->partitions = pdata->parts; + + simple_map_init(&info->map); + + /* probe for the device(s) */ + + info->mtd = do_map_probe(pdata->map_name, &info->map); + if (!info->mtd) { + ret = -EIO; + goto reset_drvdata; + } + + /* mark ourselves as the owner */ + info->mtd->owner = THIS_MODULE; + + no_partitions = pdata->nr_parts;//ARRAY_SIZE(nor_partitions); + ret = add_mtd_partitions(info->mtd, info->partitions, no_partitions); + if (ret){ + printk(KERN_ERR PFX "cannot add/parse partitions\n"); + goto free_something_2; + } + + return 0; + + /* fall through to exit error */ + +free_something_2: + del_mtd_partitions(info->mtd); + map_destroy(info->mtd); +reset_drvdata: + //kfree(info->partitions); + platform_set_drvdata(pdev, NULL); +//unmap_regs: + iounmap(info->map.virt); +release_mem: + release_mem_region(res->start, (res->end - res->start) + 1); +free_something_1: + +free_dev: + kfree(info); + + return ret; +} + +static int ast_flash_remove(struct platform_device *pdev) +{ + struct ast_flash_info *info = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if(info) { + if (info->mtd) { + del_mtd_partitions(info->mtd); + map_destroy(info->mtd); + } + platform_set_drvdata(pdev, NULL); + iounmap(info->map.virt); + + release_mem_region(res->start, (res->end - res->start) + 1); + kfree(info); + } + + return 0; +} + + +#ifdef CONFIG_PM +static int +ast_flash_suspend(struct platform_device *pdev, pm_message_t msg) +{ + pr_debug("ast_flash_suspend\n"); + + return 0; +} + +static int +ast_flash_resume(struct platform_device *pdev) +{ + pr_debug("ast_flash_resume\n"); + + return 0; +} +#else +#define ast_flash_suspend NULL +#define ast_flash_resume NULL +#endif + +static struct platform_driver ast_flash_driver = { + .probe = ast_flash_probe, + .remove = ast_flash_remove, + .suspend = ast_flash_suspend, + .resume = ast_flash_resume, + .driver = { + .name = "ast-nor", + .owner = THIS_MODULE, + }, +}; + +static int __init ast_flash_init(void) +{ + printk("ASPEED NOR-Flash Driver, (c) 2012 Aspeed Tech \n"); + return platform_driver_register(&ast_flash_driver); +} + +static void __exit ast_flash_exit(void) +{ + platform_driver_unregister(&ast_flash_driver); +} + +module_init(ast_flash_init); +module_exit(ast_flash_exit); + +MODULE_AUTHOR("Ryan Chen"); +MODULE_DESCRIPTION("ASPEED AST CFI map driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:astflash"); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 1c2e9450d663..e1ab76b3b90e 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -56,6 +56,12 @@ config MTD_NAND_H1900 help This enables the driver for the iPAQ h1900 flash. +config MTD_NAND_AST + tristate "AST NAND flash" + depends on MTD_NAND && ARCH_ASPEED && MTD_PARTITIONS + help + This enables the driver for the ASPEED NAND flash. + config MTD_NAND_GPIO tristate "GPIO NAND Flash driver" depends on GENERIC_GPIO && ARM diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index b661586afbfc..129dfd3de2f5 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -36,5 +36,6 @@ obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o +obj-$(CONFIG_MTD_NAND_AST) += ast_nand.o nand-objs := nand_base.o nand_bbt.o diff --git a/drivers/mtd/nand/ast_nand.c b/drivers/mtd/nand/ast_nand.c new file mode 100755 index 000000000000..240a832a54ef --- /dev/null +++ b/drivers/mtd/nand/ast_nand.c @@ -0,0 +1,197 @@ +/******************************************************************************** +* File Name : drivers/mtd/nand/aspeed_nand.c +* Author : Ryan Chen +* Description : ASPEED NAND driver +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* Overview: +* This is a device driver for the NAND flash device found on the +* ASPEED board which utilizes the Samsung K9F2808 part. This is +* a 128Mibit (16MiB x 8 bits) NAND flash device. + +* History : +* 1. 2012/10/20 Ryan Chen create this file +* +********************************************************************************/ +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> + +struct ast_nand_data { + struct nand_chip chip; + struct mtd_info mtd; + void __iomem *io_base; +#ifdef CONFIG_MTD_PARTITIONS + int nr_parts; + struct mtd_partition *parts; +#endif +}; + +static struct nand_ecclayout aspeed_nand_hw_eccoob = { + .eccbytes = 24, + .eccpos = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, +}; + +/* hardware specific access to control-lines */ +static void +ast_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ + struct nand_chip *chip = mtd->priv; + + if (cmd != NAND_CMD_NONE) { + //writeb(cmd, chip->IO_ADDR_W + ((ctrl & 0x6) << 11)); + //user mode cmd addr[13:12] + if(ctrl & NAND_CLE) + writeb(cmd, chip->IO_ADDR_W + (0x1 << 12)); + else if (ctrl & NAND_ALE) + writeb(cmd, chip->IO_ADDR_W + (0x2 << 12)); + else + writeb(cmd, chip->IO_ADDR_W + (1 << 12)); + } +} + +/* + * Main initialization routine + */ +static int __init +ast_nand_probe(struct platform_device *pdev) +{ + struct ast_nand_data *data; + struct platform_nand_data *pdata = pdev->dev.platform_data; + int res = 0; + + /* Allocate memory for the device structure (and zero it) */ + data = kzalloc(sizeof(struct ast_nand_data), GFP_KERNEL); + if (!data) { + dev_err(&pdev->dev, "failed to allocate device structure.\n"); + return -ENOMEM; + } + + data->io_base = ioremap(pdev->resource[0].start, + pdev->resource[0].end - pdev->resource[0].start ); + if (data->io_base == NULL) { + dev_err(&pdev->dev, "ioremap failed\n"); + kfree(data); + return -EIO; + } + + data->chip.priv = &data; + data->mtd.priv = &data->chip; + data->mtd.owner = THIS_MODULE; + data->mtd.name = pdev->dev.bus_id; + + data->chip.IO_ADDR_R = data->io_base; + data->chip.IO_ADDR_W = data->io_base; + data->chip.cmd_ctrl = ast_hwcontrol; + data->chip.dev_ready = pdata->ctrl.dev_ready; + data->chip.select_chip = pdata->ctrl.select_chip; + data->chip.chip_delay = pdata->chip.chip_delay; + data->chip.options |= pdata->chip.options; + + data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; + data->chip.ecc.calculate = pdata->ctrl.calculate; + data->chip.ecc.correct = pdata->ctrl.correct; +// data->chip.ecc.layout = pdata->chip.ecclayout; + data->chip.ecc.layout = &aspeed_nand_hw_eccoob; + + data->chip.ecc.bytes = 6; + data->chip.ecc.size = 512; + data->chip.ecc.mode = NAND_ECC_HW; //NAND_ECC_SOFT; + + platform_set_drvdata(pdev, data); + + /* Scan to find existance of the device */ + if (nand_scan(&data->mtd, 1)) { + res = -ENXIO; + goto out; + } +#ifdef CONFIG_MTD_PARTITIONS + if (pdata->chip.part_probe_types) { + res = parse_mtd_partitions(&data->mtd, + pdata->chip.part_probe_types, + &data->parts, 0); + if (res > 0) { + add_mtd_partitions(&data->mtd, data->parts, res); + return 0; + } + } + if (pdata->chip.partitions) { + data->parts = pdata->chip.partitions; + res = add_mtd_partitions(&data->mtd, data->parts, + pdata->chip.nr_partitions); + } else +#endif + res = add_mtd_device(&data->mtd); + + if (!res) + return res; + + nand_release(&data->mtd); +out: + platform_set_drvdata(pdev, NULL); + iounmap(data->io_base); + kfree(data); + + return res; + +} + +static int __devexit ast_nand_remove(struct platform_device *pdev) +{ + struct ast_nand_data *data = platform_get_drvdata(pdev); +#ifdef CONFIG_MTD_PARTITIONS + struct platform_nand_data *pdata = pdev->dev.platform_data; +#endif + + nand_release(&data->mtd); +#ifdef CONFIG_MTD_PARTITIONS + if (data->parts && data->parts != pdata->chip.partitions) + kfree(data->parts); +#endif + iounmap(data->io_base); + kfree(data); + + return 0; +} + +static struct platform_driver ast_nand_driver = { + .probe = ast_nand_probe, + .remove = ast_nand_remove, + .driver = { + .name = "ast-nand", + .owner = THIS_MODULE, + }, +}; + +static int __init ast_nand_init(void) +{ + return platform_driver_register(&ast_nand_driver); +} + +static void __exit ast_nand_exit(void) +{ + platform_driver_unregister(&ast_nand_driver); +} + +module_init(ast_nand_init); +module_exit(ast_nand_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ryan Chen"); +MODULE_DESCRIPTION("NAND flash driver for ASPEED"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 231eeaf1d552..e017c36280da 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1889,6 +1889,9 @@ menuconfig NETDEV_1000 if NETDEV_1000 +config ASPEEDMAC + tristate "ASPEED MAC support" + config ACENIC tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 017383ad5ec6..3c04c0bc627a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -85,6 +85,7 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o obj-$(CONFIG_RIONET) += rionet.o obj-$(CONFIG_SH_ETH) += sh_eth.o +obj-$(CONFIG_ASPEEDMAC) += ftgmac100_26.o # # end link order section diff --git a/drivers/net/ftgmac100_26.c b/drivers/net/ftgmac100_26.c new file mode 100644 index 000000000000..8575293015e7 --- /dev/null +++ b/drivers/net/ftgmac100_26.c @@ -0,0 +1,1883 @@ +/******************************************************************************** +* File Name : ftgmac100_26.c +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +********************************************************************************/ +//----------------------------------------------------------------------------- +// "ASPEED MAC Driver, (Linux Kernel 2.6.15.7) 10/02/07 - by ASPEED\n" +// Further improvements: +// +// -- Assume MAC1 has a PHY chip. Read the chip type and handle Marvell +// or Broadcom, else don't touch PHY chip (if present). +// +// -- If MAC2 is on, check if U-Boot enabled the MII2DC+MII2DIO pins. +// If yes, handle Marvell or Broadcom PHY. If no, assume sideband RMII +// interface with no PHY chip. +// 1.12/27/07 - by river@aspeed +// Workaround for the gigabit hash function +// 2.12/27/07 - by river@aspeed +// Synchronize the EDORR bit with document, D[30], D[15] both are EDORR +// 3.12/31/07 - by river@aspeed +// Add aspeed_i2c_init and aspeed_i2c_read function for DHCP +// 4.04/10/2008 - by river@aspeed +// Synchronize the EDOTR bit with document, D[30] is EDOTR +// 5.04/10/2008 - by river@aspeed +// Remove the workaround for multicast hash function in A2 chip +// SDK 0.19 +// 6.05/15/2008 - by river@aspeed +// Fix bug of free sk_buff in wrong routine +// 7.05/16/2008 - by river@aspeed +// Fix bug of skb_over_panic() +// 8.05/22/2008 - by river@aspeed +// Support NCSI Feature +// SDK 0.20 +// 9.07/02/2008 - by river@aspeed +// Fix TX will drop packet bug +// SDK 0.21 +//10.08/06/2008 - by river@aspeed +// Add the netif_carrier_on() and netif_carrier_off() +//11.08/06/2008 - by river@aspeed +// Fix the timer did not work after device closed +// SDK0.22 +//12.08/12/2008 - by river@aspeed +// Support different PHY configuration +// SDK0.23 +//13.10/14/2008 - by river@aspeed +// Support Realtek RTL8211BN Gigabit PHY +//14.11/17/2008 - by river@aspeed +// Modify the allocate buffer to alignment to IP header +// SDK0.26 +//15.07/28/2009 - by river@aspeed +// Fix memory leakage problem in using multicast +//16.07/28/2009 - by river@aspeed +// tx_free field in the local structure should be integer +// +// +// +//AST2300 SDK 0.12 +//17.03/30/2010 - by river@aspeed +// Modify for AST2300's hardware CLOCK/RESET/MULTI-PIN configuration +//18.03/30/2010 - by river@aspeed +// Fix does not report netif_carrier_on() and netif_carrier_off() when use MARVELL PHY +//AST2300 SDK 0.13 +//17.06/10/2010 - by river@aspeed +// Support AST2300 A0 +//18.06/10/2010 - by river@aspeed +// EEPROM is at I2C channel 4 on AST2300 A0 EVB +//AST2300 SDK 0.14 +//19.09/13/2010 - by river@aspeed +// Support Realtek RTL8201EL 10/100M PHY +//AST2400 +//20.06/25/2013 - by CC@aspeed +// Support BCM54612E 10/100/1000M PHY +//----------------------------------------------------------------------------- + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/in.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <mach/ftgmac100_drv.h> + +#include <linux/skbuff.h> + +#include "ftgmac100_26.h" + +#if defined(CONFIG_ARM) +#include <mach/hardware.h> +#include <asm/cacheflush.h> + +#elif defined(CONFIG_COLDFIRE) +#include <asm/astsim.h> + +#else +#err "Not define include for GMAC" +#endif + +/*------------------------------------------------------------------------ + . + . Configuration options, for the experienced user to change. + . + -------------------------------------------------------------------------*/ + +/* + . DEBUGGING LEVELS + . + . 0 for normal operation + . 1 for slightly more details + . >2 for various levels of increasingly useless information + . 2 for interrupt tracking, status flags + . 3 for packet info + . 4 for complete packet dumps +*/ + +#define DO_PRINT(args...) printk(": " args) + +#define FTMAC100_DEBUG 1 + +#if (FTMAC100_DEBUG > 2 ) +#define PRINTK3(args...) DO_PRINT(args) +#else +#define PRINTK3(args...) +#endif + +#if FTMAC100_DEBUG > 1 +#define PRINTK2(args...) DO_PRINT(args) +#else +#define PRINTK2(args...) +#endif + +#ifdef FTMAC100_DEBUG +#define PRINTK(args...) DO_PRINT(args) +#else +#define PRINTK(args...) +#endif + +/* + . A rather simple routine to print out a packet for debugging purposes. +*/ +#if FTMAC100_DEBUG > 2 +static void print_packet( u8 *, int ); +#endif + +static int ftgmac100_wait_to_send_packet(struct sk_buff * skb, struct net_device * dev); + +static volatile int trans_busy = 0; + + +void ftgmac100_phy_rw_waiting(unsigned int ioaddr) +{ + unsigned int tmp; + + do { + mdelay(10); + tmp =inl(ioaddr + PHYCR_REG); + } while ((tmp&(PHY_READ_bit|PHY_WRITE_bit)) > 0); +} + + +/*------------------------------------------------------------ + . Reads a register from the MII Management serial interface + .-------------------------------------------------------------*/ +static u16 ftgmac100_read_phy_register(unsigned int ioaddr, u8 phyaddr, u8 phyreg) +{ + unsigned int tmp; + + if (phyaddr > 0x1f) // MII chip IDs are 5 bits long + return 0xffff; + + tmp = inl(ioaddr + PHYCR_REG); + tmp &= 0x3000003F; + tmp |=(phyaddr<<16); + tmp |=(phyreg<<(16+5)); + tmp |=PHY_READ_bit; + + outl( tmp, ioaddr + PHYCR_REG ); + ftgmac100_phy_rw_waiting(ioaddr); + + return (inl(ioaddr + PHYDATA_REG)>>16); +} + + +/*------------------------------------------------------------ + . Writes a register to the MII Management serial interface + .-------------------------------------------------------------*/ +static void ftgmac100_write_phy_register(unsigned int ioaddr, + u8 phyaddr, u8 phyreg, u16 phydata) +{ + unsigned int tmp; + + if (phyaddr > 0x1f) // MII chip IDs are 5 bits long + return; + + tmp = inl(ioaddr + PHYCR_REG); + tmp &= 0x3000003F; + tmp |=(phyaddr<<16); + tmp |=(phyreg<<(16+5)); + tmp |=PHY_WRITE_bit; + + outl( phydata, ioaddr + PHYDATA_REG ); + outl( tmp, ioaddr + PHYCR_REG ); + ftgmac100_phy_rw_waiting(ioaddr); +} + +static void ast_gmac_set_mac(struct ftgmac100_priv *priv, const unsigned char *mac) +{ + unsigned int maddr = mac[0] << 8 | mac[1]; + unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; + + iowrite32(maddr, priv->netdev->base_addr + MAC_MADR_REG); + iowrite32(laddr, priv->netdev->base_addr + MAC_LADR_REG); +} + +/* + * MAC1 always has MII MDC+MDIO pins to access PHY registers. We assume MAC1 + * always has a PHY chip, if MAC1 is enabled. + * U-Boot can enable MAC2 MDC+MDIO pins for a 2nd PHY, or MAC2 might be + * disabled (only one port), or it's sideband-RMII which has no PHY chip. + * + * Return miiPhyId==0 if the MAC cannot be accessed. + * Return miiPhyId==1 if the MAC registers are OK but it cannot carry traffic. + * Return miiPhyId==2 if the MAC can send/receive but it has no PHY chip. + * Else return the PHY 22-bit vendor ID, 6-bit model and 4-bit revision. + */ +static void getMacHwConfig( struct net_device* dev, struct AstMacHwConfig* out ) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + +// out->macId = dev->dev_id; +//.. getMacAndPhy(dev, out); + out->miiPhyId = 0; + + // We assume the Clock Stop register does not disable the MAC1 or MAC2 clock + // unless Reset Control also holds the MAC in reset. + // For now, we only support a PHY chip on the MAC's own MDC+MDIO bus. + if (out->phyAddr > 0x1f) { +no_phy_access: + out->phyAddr = 0xff; + return; + } + + if (priv->NCSI_support == 0) { + out->miiPhyId = ftgmac100_read_phy_register(dev->base_addr, out->phyAddr, 0x02); + if (out->miiPhyId == 0xFFFF) { //Realtek PHY at address 1 + out->phyAddr = 1; + } + if (out->miiPhyId == 0x0362) { + out->phyAddr = 1; + } + out->miiPhyId = ftgmac100_read_phy_register(dev->base_addr, out->phyAddr, 0x02); + out->miiPhyId = (out->miiPhyId & 0xffff) << 16; + out->miiPhyId |= ftgmac100_read_phy_register(dev->base_addr, out->phyAddr, 0x03) & 0xffff; + + switch (out->miiPhyId >> 16) { + case 0x0040: // Broadcom + case 0x0141: // Marvell + case 0x001c: // Realtek + case 0x0362: // BCM54612 + break; + + default: + // Leave miiPhyId for DO_PRINT(), but reset phyAddr. + // out->miiPhyId = 2; + goto no_phy_access; + break; + } + } + return; +} + + +static void ftgmac100_reset( struct net_device* dev ) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + struct AstMacHwConfig* ids = &priv->ids; + unsigned int tmp, speed, duplex; + + getMacHwConfig(dev, ids); + PRINTK("%s:ftgmac100_reset, phyAddr=0x%x, miiPhyId=0x%04x_%04x\n", + dev->name, ids->phyAddr, (ids->miiPhyId >> 16), (ids->miiPhyId & 0xffff)); + + if (ids->miiPhyId < 1) + return; // Cannot access MAC registers + + // Check the link speed and duplex. + // They are not valid until auto-neg is resolved, which is reg.1 bit[5], + // or the link is up, which is reg.1 bit[2]. + + if (ids->phyAddr < 0xff) + tmp = ftgmac100_read_phy_register(dev->base_addr, ids->phyAddr, 0x1); + else tmp = 0; + + if (0==(tmp & (1u<<5 | 1u<<2)) || ids->phyAddr >= 0xff) { + // No PHY chip, or link has not negotiated. + speed = PHY_SPEED_100M; + duplex = 1; + netif_carrier_off(dev); + } + else if (((ids->miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8201EL)) { + tmp = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x00); + duplex = (tmp & 0x0100) ? 1 : 0; + speed = (tmp & 0x2000) ? PHY_SPEED_100M : PHY_SPEED_10M; + } + else if (((ids->miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_MARVELL) || + ((ids->miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8211)) { + // Use reg.17_0.bit[15:13] for {speed[1:0], duplex}. + tmp = ftgmac100_read_phy_register(dev->base_addr, ids->phyAddr, 0x11); + duplex = (tmp & PHY_DUPLEX_mask)>>13; + speed = (tmp & PHY_SPEED_mask)>>14; + netif_carrier_on(dev); + } + else if (priv->ids.miiPhyId == PHYID_BCM54612E) { + // Get link status + // First Switch shadow register selector + ftgmac100_write_phy_register(dev->base_addr, priv->ids.phyAddr, 0x1C, 0x2000); + tmp = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x1C); + if ( (tmp & 0x0080) == 0x0080 ) + duplex = 0; + else + duplex = 1; + + switch(tmp & 0x0018) { + case 0x0000: + speed = PHY_SPEED_1G; break; + case 0x0008: + speed = PHY_SPEED_100M; break; + case 0x0010: + speed = PHY_SPEED_10M; break; + default: + speed = PHY_SPEED_100M; + } + } + else { + // Assume Broadcom BCM5221. Use reg.18 bits [1:0] for {100Mb/s, fdx}. + tmp = ftgmac100_read_phy_register(dev->base_addr, ids->phyAddr, 0x18); + duplex = (tmp & 0x0001); + speed = (tmp & 0x0002) ? PHY_SPEED_100M : PHY_SPEED_10M; + } + + if (speed == PHY_SPEED_1G) { + // Set SPEED_100_bit too, for consistency. + priv->maccr_val |= GMAC_MODE_bit | SPEED_100_bit; + tmp = inl( dev->base_addr + MACCR_REG ); + tmp |= GMAC_MODE_bit | SPEED_100_bit; + outl(tmp, dev->base_addr + MACCR_REG ); + } else { + priv->maccr_val &= ~(GMAC_MODE_bit | SPEED_100_bit); + tmp = inl( dev->base_addr + MACCR_REG ); + tmp &= ~(GMAC_MODE_bit | SPEED_100_bit); + if (speed == PHY_SPEED_100M) { + priv->maccr_val |= SPEED_100_bit; + tmp |= SPEED_100_bit; + } + outl(tmp, dev->base_addr + MACCR_REG ); + } + if (duplex) + priv->maccr_val |= FULLDUP_bit; + else + priv->maccr_val &= ~FULLDUP_bit; + + outl( SW_RST_bit, dev->base_addr + MACCR_REG ); + +#ifdef not_complete_yet + /* Setup for fast accesses if requested */ + /* If the card/system can't handle it then there will */ + /* be no recovery except for a hard reset or power cycle */ + if (dev->dma) + { + outw( inw( dev->base_addr + CONFIG_REG ) | CONFIG_NO_WAIT, + dev->base_addr + CONFIG_REG ); + } +#endif /* end_of_not */ + + /* this should pause enough for the chip to be happy */ + for (; (inl( dev->base_addr + MACCR_REG ) & SW_RST_bit) != 0; ) + { + mdelay(10); + PRINTK3("RESET: reset not complete yet\n" ); + } + + outl( 0, dev->base_addr + IER_REG ); /* Disable all interrupts */ +} + +static void ftgmac100_enable( struct net_device *dev ) +{ + int i; + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + unsigned int tmp_rsize; //Richard + unsigned int rfifo_rsize; //Richard + unsigned int tfifo_rsize; //Richard + unsigned int rxbuf_size; + + rxbuf_size = RX_BUF_SIZE & 0x3fff; + outl( rxbuf_size , dev->base_addr + RBSR_REG); //for NC Body + + for (i=0; i<RXDES_NUM; ++i) + priv->rx_descs[i].RXPKT_RDY = RX_OWNBY_FTGMAC100; // owned by FTMAC100 + + priv->rx_idx = 0; + + for (i=0; i<TXDES_NUM; ++i) { + priv->tx_descs[i].TXDMA_OWN = TX_OWNBY_SOFTWARE; // owned by software + priv->tx_skbuff[i] = 0; + } + + priv->tx_idx = 0; + priv->old_tx = 0; + priv->tx_free=TXDES_NUM; + + /* Set the MAC address */ + ast_gmac_set_mac(priv, dev->dev_addr); + + outl( priv->rx_descs_dma, dev->base_addr + RXR_BADR_REG); + outl( priv->tx_descs_dma, dev->base_addr + TXR_BADR_REG); + outl( 0x00001010, dev->base_addr + ITC_REG); + + outl( (0UL<<TXPOLL_CNT)|(0x1<<RXPOLL_CNT), dev->base_addr + APTC_REG); + outl( 0x44f97, dev->base_addr + DBLAC_REG ); + + /// outl( inl(FCR_REG)|0x1, ioaddr + FCR_REG ); // enable flow control + /// outl( inl(BPR_REG)|0x1, ioaddr + BPR_REG ); // enable back pressure register + + // +++++ Richard +++++ // + tmp_rsize = inl( dev->base_addr + FEAR_REG ); + rfifo_rsize = tmp_rsize & 0x00000007; + tfifo_rsize = (tmp_rsize >> 3)& 0x00000007; + + tmp_rsize = inl( dev->base_addr + TPAFCR_REG ); + tmp_rsize &= ~0x3f000000; + tmp_rsize |= (tfifo_rsize << 27); + tmp_rsize |= (rfifo_rsize << 24); + + outl(tmp_rsize, dev->base_addr + TPAFCR_REG); + // ----- Richard ----- // + +//river set MAHT0, MAHT1 + if (priv->maccr_val & GMAC_MODE_bit) { + outl (priv->GigaBit_MAHT0, dev->base_addr + MAHT0_REG); + outl (priv->GigaBit_MAHT1, dev->base_addr + MAHT1_REG); + } + else { + outl (priv->Not_GigaBit_MAHT0, dev->base_addr + MAHT0_REG); + outl (priv->Not_GigaBit_MAHT1, dev->base_addr + MAHT1_REG); + } + + /// enable trans/recv,... + outl(priv->maccr_val, dev->base_addr + MACCR_REG ); +#if 0 +//NCSI Start +//DeSelect Package/ Select Package + if ((priv->NCSI_support == 1) || (priv->INTEL_NCSI_EVA_support == 1)) { + NCSI_Struct_Initialize(dev); + for (i = 0; i < 4; i++) { + DeSelect_Package (dev, i); + Package_Found = Select_Package (dev, i); + if (Package_Found == 1) { +//AST2100/AST2050/AST1100 supports 1 slave only + priv->NCSI_Cap.Package_ID = i; + break; + } + } + if (Package_Found != 0) { +//Initiali State + for (i = 0; i < 2; i++) { //Suppose 2 channels in current version, You could modify it to 0x1F to support 31 channels + Channel_Found = Clear_Initial_State(dev, i); + if (Channel_Found == 1) { + priv->NCSI_Cap.Channel_ID = i; + printk ("Found NCSI Network Controller at (%d, %d)\n", priv->NCSI_Cap.Package_ID, priv->NCSI_Cap.Channel_ID); +//Get Version and Capabilities + Get_Version_ID(dev); + Get_Capabilities(dev); +//Configuration + Select_Active_Package(dev); +//Set MAC Address + Enable_Set_MAC_Address(dev); +//Enable Broadcast Filter + Enable_Broadcast_Filter(dev); +//Disable VLAN + Disable_VLAN(dev); +//Enable AEN + Enable_AEN(dev); +//Get Parameters + Get_Parameters(dev); +//Enable TX + Enable_Network_TX(dev); +//Enable Channel + Enable_Channel(dev); +//Get Link Status +Re_Get_Link_Status: + Link_Status = Get_Link_Status(dev); + if (Link_Status == LINK_UP) { + printk ("Using NCSI Network Controller (%d, %d)\n", priv->NCSI_Cap.Package_ID, priv->NCSI_Cap.Channel_ID); + netif_carrier_on(dev); + break; + } + else if ((Link_Status == LINK_DOWN) && (Re_Send < 2)) { + Re_Send++; + netif_carrier_off(dev); + goto Re_Get_Link_Status; + } +//Disable TX + Disable_Network_TX(dev); +//Disable Channel +// Disable_Channel(dev); + Re_Send = 0; + Channel_Found = 0; + } + } + } + } + /* now, enable interrupts */ +#endif + if (((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_MARVELL) || + ((priv->ids.miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8211)) { + outl( + PHYSTS_CHG_bit | + AHB_ERR_bit | + TPKT_LOST_bit | + TPKT2E_bit | + RXBUF_UNAVA_bit | + RPKT2B_bit + ,dev->base_addr + IER_REG + ); + } + else if (((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_BROADCOM) || + ((priv->ids.miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8201EL)) { + outl( + AHB_ERR_bit | + TPKT_LOST_bit | + TPKT2E_bit | + RXBUF_UNAVA_bit | + RPKT2B_bit + ,dev->base_addr + IER_REG + ); + } + else if (priv->ids.miiPhyId == PHYID_BCM54612E) { + outl( +// no link PHY link status pin PHYSTS_CHG_bit | + AHB_ERR_bit | + TPKT_LOST_bit | + TPKT2E_bit | + RXBUF_UNAVA_bit | + RPKT2B_bit + ,dev->base_addr + IER_REG + ); + } else { + outl( +// no link PHY link status pin PHYSTS_CHG_bit | + AHB_ERR_bit | + TPKT_LOST_bit | + TPKT2E_bit | + RXBUF_UNAVA_bit | + RPKT2B_bit + ,dev->base_addr + IER_REG + ); + } +} + +static void aspeed_mac_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + unsigned int status, tmp, speed, duplex, macSpeed; + +#ifdef CONFIG_ARCH_AST2300 + //Fix issue for tx/rx arbiter lock + outl( 0xffffffff, dev->base_addr + TXPD_REG); +#endif + status = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x01); + + if (status & LINK_STATUS) { // Bit[2], Link Status, link is up + priv->timer.expires = jiffies + 10 * HZ; + + if ((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_BROADCOM) { + tmp = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x18); + duplex = (tmp & 0x0001); + speed = (tmp & 0x0002) ? PHY_SPEED_100M : PHY_SPEED_10M; + } + else if ((priv->ids.miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8201EL) { + tmp = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x00); + duplex = (tmp & 0x0100) ? 1 : 0; + speed = (tmp & 0x2000) ? PHY_SPEED_100M : PHY_SPEED_10M; + } + else if (((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_MARVELL) || + ((priv->ids.miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8211)) { + tmp = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x11); + duplex = (tmp & PHY_DUPLEX_mask)>>13; + speed = (tmp & PHY_SPEED_mask)>>14; + } + else if (priv->ids.miiPhyId == PHYID_BCM54612E) { + // Get link status + // First Switch shadow register selector + ftgmac100_write_phy_register(dev->base_addr, priv->ids.phyAddr, 0x1C, 0x2000); + tmp = ftgmac100_read_phy_register(dev->base_addr, priv->ids.phyAddr, 0x1C); + if ( (tmp & 0x0080) == 0x0080 ) + duplex = 0; + else + duplex = 1; + + switch(tmp & 0x0018) { + case 0x0000: + speed = PHY_SPEED_1G; + + break; + case 0x0008: + speed = PHY_SPEED_100M; + + break; + case 0x0010: + speed = PHY_SPEED_10M; + + break; + default: + speed = PHY_SPEED_100M; + } + } + else { + duplex = 1; speed = PHY_SPEED_100M; + } + + macSpeed = ((priv->maccr_val & GMAC_MODE_bit)>>8 // Move bit[9] to bit[1] + | (priv->maccr_val & SPEED_100_bit)>>19); // bit[19] to bit[0] + // The MAC hardware ignores SPEED_100_bit if GMAC_MODE_bit is set. + if (macSpeed > PHY_SPEED_1G) macSpeed = PHY_SPEED_1G; // 0x3 --> 0x2 + + if ( ((priv->maccr_val & FULLDUP_bit)!=0) != duplex + || macSpeed != speed ) + { + PRINTK("%s:aspeed_mac_timer, priv->maccr_val=0x%05x, PHY {speed,duplex}=%d,%d\n", + dev->name, priv->maccr_val, speed, duplex); + ftgmac100_reset(dev); + ftgmac100_enable(dev); + } + netif_carrier_on(dev); + } + else { + netif_carrier_off(dev); + priv->timer.expires = jiffies + 1 * HZ; + } + add_timer(&priv->timer); +} + +/* + . Function: ftgmac100_shutdown + . Purpose: closes down the SMC91xxx chip. + . Method: + . 1. zero the interrupt mask + . 2. clear the enable receive flag + . 3. clear the enable xmit flags + . + . TODO: + . (1) maybe utilize power down mode. + . Why not yet? Because while the chip will go into power down mode, + . the manual says that it will wake up in response to any I/O requests + . in the register space. Empirical results do not show this working. +*/ +static void ftgmac100_shutdown( unsigned int ioaddr ) +{ + ///interrupt mask register + outl( 0, ioaddr + IER_REG ); + /* enable trans/recv,... */ + outl( 0, ioaddr + MACCR_REG ); +} + +/* + . Function: ftgmac100_wait_to_send_packet( struct sk_buff * skb, struct device * ) + . Purpose: + . Attempt to allocate memory for a packet, if chip-memory is not + . available, then tell the card to generate an interrupt when it + . is available. + . + . Algorithm: + . + . o if the saved_skb is not currently null, then drop this packet + . on the floor. This should never happen, because of TBUSY. + . o if the saved_skb is null, then replace it with the current packet, + . o See if I can sending it now. + . o (NO): Enable interrupts and let the interrupt handler deal with it. + . o (YES):Send it now. +*/ +static int ftgmac100_wait_to_send_packet( struct sk_buff * skb, struct net_device * dev ) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + unsigned long ioaddr = dev->base_addr; + volatile TX_DESC *cur_desc; + int length; + unsigned long flags; + + spin_lock_irqsave(&priv->tx_lock,flags); + + if (skb==NULL) + { + DO_PRINT("%s(%d): NULL skb???\n", __FILE__,__LINE__); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return 0; + } + + PRINTK3("%s:ftgmac100_wait_to_send_packet, skb=%x\n", dev->name, skb); + cur_desc = &priv->tx_descs[priv->tx_idx]; + +#ifdef not_complete_yet + if (cur_desc->TXDMA_OWN != TX_OWNBY_SOFTWARE) /// no empty transmit descriptor + { + DO_PRINT("no empty transmit descriptor\n"); + DO_PRINT("jiffies = %d\n", jiffies); + priv->stats.tx_dropped++; + netif_stop_queue(dev); /// waiting to do: + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return 1; + } +#endif /* end_of_not */ + + if (cur_desc->TXDMA_OWN != TX_OWNBY_SOFTWARE) /// no empty transmit descriptor + { + DO_PRINT("no empty TX descriptor:0x%x:0x%x\n", + (unsigned int)cur_desc,((unsigned int *)cur_desc)[0]); + priv->stats.tx_dropped++; + netif_stop_queue(dev); /// waiting to do: + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return 1; + } + priv->tx_skbuff[priv->tx_idx] = skb; + length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; + length = min(length, TX_BUF_SIZE); + +#if FTMAC100_DEBUG > 2 + DO_PRINT("Transmitting Packet at 0x%x, skb->data = %x, len = %x\n", + (unsigned int)cur_desc->VIR_TXBUF_BADR, skb->data, length); + print_packet( skb->data, length ); +#endif + + cur_desc->VIR_TXBUF_BADR = (unsigned long)skb->data; + cur_desc->TXBUF_BADR = virt_to_phys(skb->data); +#ifndef CONFIG_CPU_FA52x_DCE + dmac_clean_range((void *)skb->data, (void *)(skb->data + length)); +#endif + + //clean_dcache_range(skb->data, (char*)(skb->data + length)); + + cur_desc->TXBUF_Size = length; + cur_desc->LTS = 1; + cur_desc->FTS = 1; + + cur_desc->TX2FIC = 0; + cur_desc->TXIC = 0; + + cur_desc->TXDMA_OWN = TX_OWNBY_FTGMAC100; + + outl( 0xffffffff, ioaddr + TXPD_REG); + + priv->tx_idx = (priv->tx_idx + 1) % TXDES_NUM; + priv->stats.tx_packets++; + priv->tx_free--; + + if (priv->tx_free <= 0) { + netif_stop_queue(dev); + + } + + + dev->trans_start = jiffies; + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return 0; +} + +static int ftgmac100_ringbuf_alloc(struct ftgmac100_priv *priv) +{ + int i; + struct sk_buff *skb; + + priv->rx_descs = dma_alloc_coherent(priv->dev, + sizeof(RX_DESC)*RXDES_NUM, + &priv->rx_descs_dma, GFP_KERNEL); + + if(!priv->rx_descs) + return -ENOMEM; + + memset(priv->rx_descs, 0, sizeof(RX_DESC)*RXDES_NUM); + priv->rx_descs[RXDES_NUM-1].EDORR = 1; + + for (i=0; i<RXDES_NUM; i++) { + dma_addr_t mapping; + skb = dev_alloc_skb(RX_BUF_SIZE + NET_IP_ALIGN); + skb_reserve(skb, NET_IP_ALIGN); + + priv->rx_skbuff[i] = skb; + if (skb == NULL) { + printk ("alloc_list: allocate Rx buffer error! "); + break; + } + mapping = dma_map_single(priv->dev, skb->data, skb->len, DMA_FROM_DEVICE); + skb->dev = priv->netdev; /* Mark as being used by this device. */ + priv->rx_descs[i].RXBUF_BADR = mapping; + priv->rx_descs[i].VIR_RXBUF_BADR = skb->data; + } + + priv->tx_descs = dma_alloc_coherent(priv->dev, + sizeof(TX_DESC)*TXDES_NUM, + &priv->tx_descs_dma ,GFP_KERNEL); + + if(!priv->tx_descs) + return -ENOMEM; + + memset((void*)priv->tx_descs, 0, sizeof(TX_DESC)*TXDES_NUM); + priv->tx_descs[TXDES_NUM-1].EDOTR = 1; // is last descriptor + +} + +#if FTMAC100_DEBUG > 2 +static void print_packet( u8 * buf, int length ) +{ +#if 1 +#if FTMAC100_DEBUG > 3 + int i; + int remainder; + int lines; +#endif + + +#if FTMAC100_DEBUG > 3 + lines = length / 16; + remainder = length % 16; + + for ( i = 0; i < lines ; i ++ ) { + int cur; + + for ( cur = 0; cur < 8; cur ++ ) { + u8 a, b; + + a = *(buf ++ ); + b = *(buf ++ ); + DO_PRINT("%02x%02x ", a, b ); + } + DO_PRINT("\n"); + } + for ( i = 0; i < remainder/2 ; i++ ) { + u8 a, b; + + a = *(buf ++ ); + b = *(buf ++ ); + DO_PRINT("%02x%02x ", a, b ); + } + DO_PRINT("\n"); +#endif +#endif +} +#endif + +/*------------------------------------------------------------ + . Configures the specified PHY using Autonegotiation. + .-------------------------------------------------------------*/ +static void ftgmac100_phy_configure(struct net_device* dev) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + unsigned long ioaddr = dev->base_addr; + u32 tmp; +// printk("priv->ids.miiPhyId = %x \n",priv->ids.miiPhyId); + switch (priv->ids.miiPhyId & PHYID_VENDOR_MASK) { + case PHYID_VENDOR_MARVELL: + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x12, 0x4400); + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x13 ); + break; + case PHYID_VENDOR_REALTEK: + switch (priv->ids.miiPhyId) { + case PHYID_RTL8211: + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x12, 0x4400); + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x13 ); + break; + case PHYID_RTL8201EL: + break; + case PHYID_RTL8201F: + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x1f, 0x0007); + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x13 ); + tmp &= ~(0x0030); + tmp |= 0x0008; + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x13, (u16) tmp); + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x11); + tmp &= ~(0x0fff); + tmp |= 0x0008; + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x11, (u16) tmp); + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x1f, 0x0000); + break; + } + break; + case PHYID_VENDOR_BROADCOM: + switch (priv->ids.miiPhyId) { + case PHYID_BCM54612E: + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x1C, 0x8C00); // Disable GTXCLK Clock Delay Enable + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x18, 0xF0E7); // Disable RGMII RXD to RXC Skew + break; + case PHYID_BCM5221A4: + default: + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x1b); + tmp |= 0x0004; + ftgmac100_write_phy_register(ioaddr, priv->ids.phyAddr, 0x1b, (u16) tmp); + break; + } + break; + } +} + + +/*-------------------------------------------------------- + . Called by the kernel to send a packet out into the void + . of the net. This routine is largely based on + . skeleton.c, from Becker. + .-------------------------------------------------------- +*/ +static void ftgmac100_timeout (struct net_device *dev) +{ + /* If we get here, some higher level has decided we are broken. + There should really be a "kick me" function call instead. */ + DO_PRINT(KERN_WARNING "%s: transmit timed out? (jiffies=%ld)\n", + dev->name, jiffies); + /* "kick" the adaptor */ + ftgmac100_reset( dev ); + ftgmac100_enable( dev ); + + /* Reconfigure the PHY */ + ftgmac100_phy_configure(dev); + + netif_wake_queue(dev); + dev->trans_start = jiffies; +} + + +static void ftgmac100_free_tx (struct net_device *dev) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + int entry = priv->old_tx % TXDES_NUM; + unsigned long flags = 0; + + spin_lock_irqsave(&priv->tx_lock,flags); + + /* Free used tx skbuffs */ + + while ((priv->tx_descs[entry].TXDMA_OWN == TX_OWNBY_SOFTWARE) && (priv->tx_skbuff[entry] != NULL)) { + struct sk_buff *skb; + + skb = priv->tx_skbuff[entry]; + dev_kfree_skb_any (skb); + priv->tx_skbuff[entry] = 0; + entry = (entry + 1) % TXDES_NUM; + priv->tx_free++; + } + + spin_unlock_irqrestore(&priv->tx_lock, flags); + priv->old_tx = entry; + if ((netif_queue_stopped(dev)) && (priv->tx_free > 0)) { + netif_wake_queue (dev); + } +} + + +/*------------------------------------------------------------- + . + . ftgmac100_rcv - receive a packet from the card + . + . There is ( at least ) a packet waiting to be read from + . chip-memory. + . + . o Read the status + . o If an error, record it + . o otherwise, read in the packet + -------------------------------------------------------------- +*/ +// extern dce_dcache_invalidate_range(unsigned int start, unsigned int end); + +static void ftgmac100_rcv(struct net_device *dev) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + unsigned long ioaddr = dev->base_addr; + int packet_length; + int rcv_cnt; + volatile RX_DESC *cur_desc; + int cur_idx; + int have_package; + int have_frs; + int start_idx; + int count = 0; + int packet_full = 0; + int data_not_fragment = 1; + + start_idx = priv->rx_idx; + + for (rcv_cnt=0; rcv_cnt<RXDES_NUM ; ++rcv_cnt) + { + packet_length = 0; + cur_idx = priv->rx_idx; + + have_package = 0; + have_frs = 0; + + for (; (cur_desc = &priv->rx_descs[priv->rx_idx])->RXPKT_RDY==RX_OWNBY_SOFTWARE; ) + { + have_package = 1; + priv->rx_idx = (priv->rx_idx+1)%RXDES_NUM; + count++; + if (count == RXDES_NUM) { + packet_full = 1; + } +//DF_support + if (data_not_fragment == 1) { + if (!(cur_desc->DF)) { + data_not_fragment = 0; + } + } + + if (cur_desc->FRS) + { + have_frs = 1; + if (cur_desc->RX_ERR || cur_desc->CRC_ERR || cur_desc->FTL || + cur_desc->RUNT || cur_desc->RX_ODD_NB + // cur_desc->IPCS_FAIL || cur_desc->UDPCS_FAIL || cur_desc->TCPCS_FAIL + ) + { + // #ifdef not_complete_yet + if (cur_desc->RX_ERR) + { + DO_PRINT("err: RX_ERR\n"); + } + if (cur_desc->CRC_ERR) + { + // DO_PRINT("err: CRC_ERR\n"); + } + if (cur_desc->FTL) + { + DO_PRINT("err: FTL\n"); + } + if (cur_desc->RX_ODD_NB) + { + // DO_PRINT("err: RX_ODD_NB\n"); + } +// if (cur_desc->IPCS_FAIL || cur_desc->UDPCS_FAIL || cur_desc->TCPCS_FAIL) +// { +// DO_PRINT("err: CS FAIL\n"); +// } + // #endif /* end_of_not */ + priv->stats.rx_errors++; // error frame.... + break; + } +//DF_support + if (cur_desc->DF) { + if (cur_desc->IPCS_FAIL || cur_desc->UDPCS_FAIL || cur_desc->TCPCS_FAIL) + { + DO_PRINT("err: CS FAIL\n"); + priv->stats.rx_errors++; // error frame.... + break; + } + } + + if (cur_desc->MULTICAST) + { + priv->stats.multicast++; + } + if ((priv->NCSI_support == 1) || (priv->INTEL_NCSI_EVA_support == 1)) { + if (cur_desc->BROADCAST) { + if (*(unsigned short *)(cur_desc->VIR_RXBUF_BADR + 12) == NCSI_HEADER) { + printk ("AEN PACKET ARRIVED\n"); + ftgmac100_reset(dev); + ftgmac100_enable(dev); + return; + } + } + } + } + + packet_length += cur_desc->VDBC; + +// if ( cur_desc->LRS ) // packet's last frame +// { + break; +// } + } + if (have_package==0) + { + goto done; + } + if (!have_frs) + { + DO_PRINT("error, loss first\n"); + priv->stats.rx_over_errors++; + } + + if (packet_length > 0) + { + struct sk_buff * skb; + u8 * data = 0; if (data) { } + + packet_length -= 4; + + skb_put (skb = priv->rx_skbuff[cur_idx], packet_length); + +// Rx Offload DF_support + + if (data_not_fragment) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + data_not_fragment = 1; + } + +#if FTMAC100_DEBUG > 2 + DO_PRINT("Receiving Packet at 0x%x, packet len = %x\n",(unsigned int)data, packet_length); + print_packet( data, packet_length ); +#endif + + skb->protocol = eth_type_trans(skb, dev ); + netif_rx(skb); + priv->stats.rx_packets++; + priv->rx_skbuff[cur_idx] = NULL; + } + if (packet_full) { +// DO_PRINT ("RX Buffer full before driver entered ISR\n"); + goto done; + } + } + +done: + + if (packet_full) { + + struct sk_buff *skb; + + for (cur_idx = 0; cur_idx < RXDES_NUM; cur_idx++) + { + if (priv->rx_skbuff[cur_idx] == NULL) { + skb = dev_alloc_skb (RX_BUF_SIZE + 16); + if (skb == NULL) { + printk (KERN_INFO + "%s: receive_packet: " + "Unable to re-allocate Rx skbuff.#%d\n", + dev->name, cur_idx); + } + priv->rx_skbuff[cur_idx] = skb; + skb->dev = dev; + // ASPEED: See earlier skb_reserve() cache alignment + skb_reserve (skb, 2); + dmac_inv_range ((void *)skb->data, (void *)skb->data + RX_BUF_SIZE); + priv->rx_descs[cur_idx].RXBUF_BADR = cpu_to_le32(virt_to_phys(skb->tail)); + priv->rx_descs[cur_idx].VIR_RXBUF_BADR = cpu_to_le32((u32)skb->tail); + } + priv->rx_descs[cur_idx].RXPKT_RDY = RX_OWNBY_FTGMAC100; + } + packet_full = 0; + + } + else { + if (start_idx != priv->rx_idx) { + struct sk_buff *skb; + + for (cur_idx = (start_idx+1)%RXDES_NUM; cur_idx != priv->rx_idx; cur_idx = (cur_idx+1)%RXDES_NUM) + { + + + //struct sk_buff *skb; + /* Dropped packets don't need to re-allocate */ + if (priv->rx_skbuff[cur_idx] == NULL) { + skb = dev_alloc_skb (RX_BUF_SIZE + 16); + if (skb == NULL) { + printk (KERN_INFO + "%s: receive_packet: " + "Unable to re-allocate Rx skbuff.#%d\n", + dev->name, cur_idx); + break; + } + priv->rx_skbuff[cur_idx] = skb; + skb->dev = dev; + /* 16 byte align the IP header */ + skb_reserve (skb, 2); + dmac_inv_range ((void *)skb->data, + (void *)skb->data + RX_BUF_SIZE); + priv->rx_descs[cur_idx].RXBUF_BADR = cpu_to_le32(virt_to_phys(skb->tail)); + priv->rx_descs[cur_idx].VIR_RXBUF_BADR = cpu_to_le32((u32)skb->tail); + } + + priv->rx_descs[cur_idx].RXPKT_RDY = RX_OWNBY_FTGMAC100; + } + + + //struct sk_buff *skb; + /* Dropped packets don't need to re-allocate */ + if (priv->rx_skbuff[start_idx] == NULL) { + skb = dev_alloc_skb (RX_BUF_SIZE + 16); + if (skb == NULL) { + printk (KERN_INFO + "%s: receive_packet: " + "Unable to re-allocate Rx skbuff.#%d\n", + dev->name, start_idx); + } + priv->rx_skbuff[start_idx] = skb; + skb->dev = dev; + /* 16 byte align the IP header */ + skb_reserve (skb, 2); + dmac_inv_range ((void *)skb->data, + (void *)skb->data + RX_BUF_SIZE); + priv->rx_descs[start_idx].RXBUF_BADR = cpu_to_le32(virt_to_phys(skb->tail)); + priv->rx_descs[start_idx].VIR_RXBUF_BADR = cpu_to_le32((u32)skb->tail); + } + + + priv->rx_descs[start_idx].RXPKT_RDY = RX_OWNBY_FTGMAC100; + } + } + if (trans_busy == 1) + { + /// priv->maccr_val |= RXMAC_EN_bit; + outl( priv->maccr_val, ioaddr + MACCR_REG ); + outl( inl(ioaddr + IER_REG) | RXBUF_UNAVA_bit, ioaddr + IER_REG); + } + return; +} + +/*-------------------------------------------------------------------- + . + . This is the main routine of the driver, to handle the net_device when + . it needs some attention. + . + . So: + . first, save state of the chipset + . branch off into routines to handle each case, and acknowledge + . each to the interrupt register + . and finally restore state. + . + ---------------------------------------------------------------------*/ +static irqreturn_t ftgmac100_interrupt(int irq, void * dev_id, struct pt_regs * regs) +{ + struct net_device *dev = dev_id; + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + unsigned long ioaddr = dev->base_addr; + int timeout; + unsigned int tmp; + unsigned int mask; // interrupt mask + unsigned int status; // interrupt status + +// PRINTK3("%s: ftgmac100 interrupt started \n", dev->name); + + if (dev == NULL) { + DO_PRINT(KERN_WARNING "%s: irq %d for unknown device.\n", dev->name, irq); + return IRQ_HANDLED; + } + + /* read the interrupt status register */ + mask = inl( ioaddr + IER_REG ); + + /* set a timeout value, so I don't stay here forever */ + + for (timeout=1; timeout>0; --timeout) + { + /* read the status flag, and mask it */ + status = inl( ioaddr + ISR_REG ) & mask; + + outl(status, ioaddr + ISR_REG ); //Richard, write to clear + + if (!status ) + { + break; + } + + if (status & PHYSTS_CHG_bit) { + DO_PRINT("PHYSTS_CHG \n"); + // Is this interrupt for changes of the PHYLINK pin? + // Note: PHYLINK is optional; not all boards connect it. + if (((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_MARVELL) || + ((priv->ids.miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8211)) + { + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x13); + PRINTK("%s: PHY interrupt status, read_phy_reg(0x13) = 0x%04x\n", + dev->name, tmp); + tmp &= (PHY_SPEED_CHG_bit | PHY_DUPLEX_CHG_bit | PHY_LINK_CHG_bit); + } + else if ((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_BROADCOM) + { + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x1a); + PRINTK("%s: PHY interrupt status, read_phy_reg(0x1a) = 0x%04x\n", + dev->name, tmp); + // Bits [3:1] are {duplex, speed, link} change interrupts. + tmp &= 0x000e; + } + else if (priv->ids.miiPhyId == PHYID_BCM54612E) { + tmp = ftgmac100_read_phy_register(ioaddr, priv->ids.phyAddr, 0x1A); + PRINTK("%s: PHY interrupt status, read_phy_reg(0x1A) = 0x%04x\n", + dev->name, tmp); + tmp &= 0x000E; + } + else tmp = 0; + + if (tmp) { + ftgmac100_reset(dev); + ftgmac100_enable(dev); + } + } + +#ifdef not_complete_yet + if (status & AHB_ERR_bit) + { + DO_PRINT("AHB_ERR \n"); + } + + if (status & RPKT_LOST_bit) + { + DO_PRINT("RPKT_LOST "); + } + if (status & RPKT2F_bit) + { + PRINTK2("RPKT_SAV "); + } + + if (status & TPKT_LOST_bit) + { + PRINTK("XPKT_LOST "); + } + if (status & TPKT2E_bit) + { + PRINTK("XPKT_OK "); + } + if (status & NPTXBUF_UNAVA_bit) + { + PRINTK("NOTXBUF "); + } + if (status & TPKT2F_bit) + { + PRINTK("XPKT_FINISH "); + } + + if (status & RPKT2B_bit) + { + DO_PRINT("RPKT_FINISH "); + } + PRINTK2("\n"); +#endif /* end_of_not */ + +// PRINTK3(KERN_WARNING "%s: Handling interrupt status %x \n", dev->name, status); + + if ( status & (TPKT2E_bit|TPKT_LOST_bit)) + { + //free tx skb buf + ftgmac100_free_tx(dev); + + } + + if ( status & RPKT2B_bit ) + { + ftgmac100_rcv(dev); //Richard + } + else if (status & RXBUF_UNAVA_bit) + { + outl( mask & ~RXBUF_UNAVA_bit, ioaddr + IER_REG); + trans_busy = 1; + /* + rcv_tq.sync = 0; + rcv_tq.routine = ftgmac100_rcv; + rcv_tq.data = dev; + queue_task(&rcv_tq, &tq_timer); + */ + + } else if (status & AHB_ERR_bit) + { + DO_PRINT("AHB ERR \n"); + } + } + +// PRINTK3("%s: Interrupt done\n", dev->name); + return IRQ_HANDLED; +} + +/*------------------------------------------------------------ + . Get the current statistics. + . This may be called with the card open or closed. + .-------------------------------------------------------------*/ +static struct net_device_stats* ftgmac100_query_statistics(struct net_device *dev) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + + return &priv->stats; +} + +#ifdef HAVE_MULTICAST + +// -------------------------------------------------------------------- +// Finds the CRC32 of a set of bytes. +// Again, from Peter Cammaert's code. +// -------------------------------------------------------------------- +static int crc32( char * s, int length ) +{ + /* indices */ + int perByte; + int perBit; + /* crc polynomial for Ethernet */ + const u32 poly = 0xedb88320; + /* crc value - preinitialized to all 1's */ + u32 crc_value = 0xffffffff; + + for ( perByte = 0; perByte < length; perByte ++ ) { + unsigned char c; + + c = *(s++); + for ( perBit = 0; perBit < 8; perBit++ ) { + crc_value = (crc_value>>1)^ + (((crc_value^c)&0x01)?poly:0); + c >>= 1; + } + } + return crc_value; +} + +/* + . Function: ftgmac100_setmulticast( struct net_device *dev, int count, struct dev_mc_list * addrs ) + . Purpose: + . This sets the internal hardware table to filter out unwanted multicast + . packets before they take up memory. +*/ + +static void ftgmac100_setmulticast( struct net_device *dev, int count, struct dev_mc_list * addrs ) +{ + struct dev_mc_list * cur_addr; + int crc_val; + unsigned int ioaddr = dev->base_addr; + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + struct AstMacHwConfig* ids = &priv->ids; + unsigned long Combined_Channel_ID, i; + struct sk_buff * skb; + cur_addr = addrs; + +//TX +#if 0 + if (priv->NCSI_support == 1) { + skb = dev_alloc_skb (TX_BUF_SIZE + 16); + priv->InstanceID++; + priv->NCSI_Request.IID = priv->InstanceID; + priv->NCSI_Request.Command = SET_MAC_ADDRESS; + Combined_Channel_ID = (priv->NCSI_Cap.Package_ID << 5) + priv->NCSI_Cap.Channel_ID; + priv->NCSI_Request.Channel_ID = Combined_Channel_ID; + priv->NCSI_Request.Payload_Length = (8 << 8); + memcpy ((unsigned char *)skb->data, &priv->NCSI_Request, 30); + priv->NCSI_Request.Payload_Length = 8; + for (i = 0; i < 6; i++) { + priv->Payload_Data[i] = cur_addr->dmi_addr[i]; + } + priv->Payload_Data[6] = 2; //MAC Address Num = 1 --> address filter 1, fixed in sample code + priv->Payload_Data[7] = MULTICAST_ADDRESS + 0 + ENABLE_MAC_ADDRESS_FILTER; //AT + Reserved + E + copy_data (dev, skb, priv->NCSI_Request.Payload_Length); + skb->len = 30 + priv->NCSI_Request.Payload_Length + 4; + ftgmac100_wait_to_send_packet(skb, dev); + } +#endif + for (cur_addr = addrs ; cur_addr!=NULL ; cur_addr = cur_addr->next ) + { + /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ + if ( !( *cur_addr->dmi_addr & 1 ) ) + { + continue; + } +#if 1 +//A0, A1 + crc_val = crc32( cur_addr->dmi_addr, 5 ); + crc_val = (~(crc_val>>2)) & 0x3f; + if (crc_val >= 32) + { + outl(inl(ioaddr+MAHT1_REG) | (1UL<<(crc_val-32)), ioaddr+MAHT1_REG); + priv->GigaBit_MAHT1 = inl (ioaddr + MAHT1_REG); + } + else + { + outl(inl(ioaddr+MAHT0_REG) | (1UL<<crc_val), ioaddr+MAHT0_REG); + priv->GigaBit_MAHT0 = inl (ioaddr + MAHT0_REG); + } +//10/100M + crc_val = crc32( cur_addr->dmi_addr, 6 ); + crc_val = (~(crc_val>>2)) & 0x3f; + if (crc_val >= 32) + { + outl(inl(ioaddr+MAHT1_REG) | (1UL<<(crc_val-32)), ioaddr+MAHT1_REG); + priv->Not_GigaBit_MAHT1 = inl (ioaddr + MAHT1_REG); + } + else + { + outl(inl(ioaddr+MAHT0_REG) | (1UL<<crc_val), ioaddr+MAHT0_REG); + priv->Not_GigaBit_MAHT0 = inl (ioaddr + MAHT0_REG); + } +#else +//A2 + crc_val = crc32( cur_addr->dmi_addr, 6 ); + crc_val = (~(crc_val>>2)) & 0x3f; + if (crc_val >= 32) + { + outl(inl(ioaddr+MAHT1_REG) | (1UL<<(crc_val-32)), ioaddr+MAHT1_REG); + priv->Not_GigaBit_MAHT1 = inl (ioaddr + MAHT1_REG); + priv->GigaBit_MAHT1 = inl (ioaddr + MAHT1_REG); + } + else + { + outl(inl(ioaddr+MAHT0_REG) | (1UL<<crc_val), ioaddr+MAHT0_REG); + priv->Not_GigaBit_MAHT0 = inl (ioaddr + MAHT0_REG); + priv->GigaBit_MAHT0 = inl (ioaddr + MAHT0_REG); + } +#endif + } +} + +/*----------------------------------------------------------- + . ftgmac100_set_multicast_list + . + . This routine will, depending on the values passed to it, + . either make it accept multicast packets, go into + . promiscuous mode ( for TCPDUMP and cousins ) or accept + . a select set of multicast packets +*/ +static void ftgmac100_set_multicast_list(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + + PRINTK2("%s:ftgmac100_set_multicast_list\n", dev->name); + + if (dev->flags & IFF_PROMISC) + priv->maccr_val |= RX_ALLADR_bit; + else + priv->maccr_val &= ~RX_ALLADR_bit; + + if (dev->flags & IFF_ALLMULTI) + priv->maccr_val |= RX_MULTIPKT_bit; + else + priv->maccr_val &= ~RX_MULTIPKT_bit; + + if (dev->mc_count) + { +// PRINTK("set multicast\n"); + priv->maccr_val |= RX_HT_EN_bit; + ftgmac100_setmulticast( dev, dev->mc_count, dev->mc_list ); + } + else + { + priv->maccr_val &= ~RX_HT_EN_bit; + } + + outl( priv->maccr_val, ioaddr + MACCR_REG ); + +} +#endif + +static int ast_gmac_stop(struct net_device *dev) +{ + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + + netif_stop_queue(dev); + + /* clear everything */ + ftgmac100_shutdown(dev->base_addr); + free_irq(dev->irq, dev); + + if (priv->timer.function != NULL) { + del_timer_sync(&priv->timer); + } + + if (priv->rx_descs) + dma_free_coherent( NULL, sizeof(RX_DESC)*RXDES_NUM, (void*)priv->rx_descs, (dma_addr_t)priv->rx_descs_dma ); + if (priv->tx_descs) + dma_free_coherent( NULL, sizeof(TX_DESC)*TXDES_NUM, (void*)priv->tx_descs, (dma_addr_t)priv->tx_descs_dma ); + if (priv->tx_buf) + dma_free_coherent( NULL, TX_BUF_SIZE*TXDES_NUM, (void*)priv->tx_buf, (dma_addr_t)priv->tx_buf_dma ); + priv->rx_descs = NULL; priv->rx_descs_dma = 0; + priv->tx_descs = NULL; priv->tx_descs_dma = 0; + priv->tx_buf = NULL; priv->tx_buf_dma = 0; + + + return 0; +} + +static struct proc_dir_entry *proc_ftgmac100; + +static int ftgmac100_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct net_device *dev = (struct net_device *)data; + struct ftgmac100_priv *priv = (struct ftgmac100_priv *)dev->priv; + int num; + int i; + + num = sprintf(page, "priv->rx_idx = %d\n", priv->rx_idx); + for (i=0; i<RXDES_NUM; ++i) + { + num += sprintf(page + num, "[%d].RXDMA_OWN = %d\n", i, priv->rx_descs[i].RXPKT_RDY); + } + return num; +} + +static int ftgmac100_open(struct net_device *netdev) +{ + struct ftgmac100_priv *priv = netdev_priv(netdev); + int err; + + DO_PRINT("%s:ftgmac100_open\n", netdev->name); + + priv->maccr_val = (CRC_APD_bit | RXMAC_EN_bit | TXMAC_EN_bit | RXDMA_EN_bit + | TXDMA_EN_bit | CRC_CHK_bit | RX_BROADPKT_bit | SPEED_100_bit | FULLDUP_bit); + + ftgmac100_ringbuf_alloc(priv); + + + /* Grab the IRQ next. Beyond this, we will free the IRQ. */ + err = request_irq(netdev->irq, (void *)&ftgmac100_interrupt, + IRQF_DISABLED, netdev->name, netdev); + if (err) + { + DO_PRINT("%s: unable to get IRQ %d (retval=%d).\n", + netdev->name, netdev->irq, err); + kfree(netdev->priv); + netdev->priv = NULL; + return err; + } + + + netif_start_queue(netdev); + + /* reset the hardware */ + ftgmac100_reset(netdev); + ftgmac100_enable(netdev); + + if (((priv->ids.miiPhyId & PHYID_VENDOR_MASK) == PHYID_VENDOR_BROADCOM) || + ((priv->ids.miiPhyId & PHYID_VENDOR_MODEL_MASK) == PHYID_RTL8201EL) || + (priv->ids.miiPhyId == PHYID_BCM54612E)) { + + init_timer(&priv->timer); + priv->timer.data = (unsigned long)netdev; + priv->timer.function = aspeed_mac_timer; + priv->timer.expires = jiffies + 1 * HZ; + add_timer (&priv->timer); + } + + /* Configure the PHY */ + ftgmac100_phy_configure(netdev); + + netif_start_queue(netdev); + return 0; +} + +static int __init ast_gmac_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *netdev; + struct ftgmac100_priv *priv; + struct ftgmac100_eth_data *ast_eth_data = pdev->dev.platform_data;; + int err; + + if (!pdev) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + /* setup net_device */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + netdev->irq = platform_get_irq(pdev, 0); + if (netdev->irq < 0) { + err = -ENXIO; + goto err_netdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + +// SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + netdev->netdev_ops = &ftgmac100_netdev_ops; +#else + printk("ast_gmac_probe 5\n"); + + ether_setup(netdev); + + netdev->open = ftgmac100_open; + netdev->stop = ast_gmac_stop; + netdev->hard_start_xmit = ftgmac100_wait_to_send_packet; + netdev->tx_timeout = ftgmac100_timeout; + netdev->get_stats = ftgmac100_query_statistics; +//#ifdef HAVE_MULTICAST +#if 0 + netdev->set_multicast_list = &ftgmac100_set_multicast_list; +#endif + +#endif + + +#ifdef CONFIG_AST_NPAI +// netdev->features = NETIF_F_GRO; +// netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; +#endif + + platform_set_drvdata(pdev, netdev); + + /* setup private data */ + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + + priv->ids.macId = pdev->id; + + priv->NCSI_support = ast_eth_data->NCSI_support; + priv->INTEL_NCSI_EVA_support= ast_eth_data->INTEL_NCSI_EVA_support; + spin_lock_init(&priv->tx_lock); + +#if 0 + /* initialize NAPI */ + netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); +#endif + /* map io memory */ + res = request_mem_region(res->start, resource_size(res), + dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "Could not reserve memory region\n"); + err = -ENOMEM; + goto err_req_mem; + } + + netdev->base_addr = (u32)ioremap(res->start, resource_size(res)); + + if (!netdev->base_addr) { + dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); + err = -EIO; + goto err_ioremap; + } + +// priv->irq = irq; +#if 0//CONFIG_AST_MDIO + /* initialize mdio bus */ + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + err = -EIO; + goto err_alloc_mdiobus; + } + + priv->mii_bus->name = "ftgmac100_mdio"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii.%d",pdev->id); + + priv->mii_bus->priv = netdev; + priv->mii_bus->read = ftgmac100_mdiobus_read; + priv->mii_bus->write = ftgmac100_mdiobus_write; + priv->mii_bus->reset = ftgmac100_mdiobus_reset; + priv->mii_bus->irq = priv->phy_irq; + + for (i = 0; i < PHY_MAX_ADDR; i++) + priv->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(priv->mii_bus); + if (err) { + dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + goto err_register_mdiobus; + } + + err = ftgmac100_mii_probe(priv); + if (err) { + dev_err(&pdev->dev, "MII Probe failed!\n"); + goto err_mii_probe; + } +#endif + /* register network device */ + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_alloc_mdiobus; + } + +// printk("irq %d, mapped at %x\n", netdev->irq, (u32)netdev->base_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + random_ether_addr(netdev->dev_addr); + printk("generated random MAC address %pM\n", + netdev->dev_addr); + } +#if 0 + if ((proc_ftgmac100 = create_proc_entry( dev->name, 0, 0 ))) + { + proc_ftgmac100->read_proc = ftgmac100_read_proc; + proc_ftgmac100->data = dev; + proc_ftgmac100->owner = THIS_MODULE; + } +#endif + return 0; + +//err_register_netdev: +// phy_disconnect(priv->phydev); +//err_mii_probe: +// mdiobus_unregister(priv->mii_bus); +//err_register_mdiobus: +// mdiobus_free(priv->mii_bus); +err_alloc_mdiobus: + iounmap((void __iomem *)netdev->base_addr); +err_ioremap: + release_resource(res); +err_req_mem: +// netif_napi_del(&priv->napi); + platform_set_drvdata(pdev, NULL); +err_netdev: + free_netdev(netdev); +err_alloc_etherdev: + return err; + +} + +static int __devexit ast_gmac_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); +// struct ftgmac100_priv *priv = netdev_priv(dev); + +// remove_proc_entry(dev->name, 0); + + unregister_netdev(dev); + +#ifdef CONFIG_MII_PHY + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mii_bus); + mdiobus_free(priv->mii_bus); +#endif + + iounmap((void __iomem *)dev->base_addr); + +#ifdef CONFIG_AST_NPAI + netif_napi_del(&priv->napi); +#endif + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +static struct platform_driver ast_gmac_driver = { + .remove = __devexit_p(ast_gmac_remove), + .driver = { + .name = "ast_gmac", + .owner = THIS_MODULE, + }, +}; + +static int __init ast_gmac_init(void) +{ + return platform_driver_probe(&ast_gmac_driver, ast_gmac_probe); +} + +static void __exit ast_gmac_exit(void) +{ + platform_driver_unregister(&ast_gmac_driver); +} + +module_init(ast_gmac_init) +module_exit(ast_gmac_exit) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("ASPEED Technology Inc."); +MODULE_DESCRIPTION("NIC driver for AST Series"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ftgmac100_26.h b/drivers/net/ftgmac100_26.h new file mode 100644 index 000000000000..f145b05a4d43 --- /dev/null +++ b/drivers/net/ftgmac100_26.h @@ -0,0 +1,580 @@ +/******************************************************************************** +* File Name : ftgmac100_26.h +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +********************************************************************************/ +// -------------------------------------------------------------------- + +#ifndef FTMAC100_H +#define FTMAC100_H + +#define HAVE_MULTICAST + +#define ISR_REG 0x00 // interrups status register +#define IER_REG 0x04 // interrupt maks register +#define MAC_MADR_REG 0x08 // MAC address (Most significant) +#define MAC_LADR_REG 0x0c // MAC address (Least significant) + +#define MAHT0_REG 0x10 // Multicast Address Hash Table 0 register +#define MAHT1_REG 0x14 // Multicast Address Hash Table 1 register +#define TXPD_REG 0x18 // Transmit Poll Demand register +#define RXPD_REG 0x1c // Receive Poll Demand register +#define TXR_BADR_REG 0x20 // Transmit Ring Base Address register +#define RXR_BADR_REG 0x24 // Receive Ring Base Address register + +#define HPTXPD_REG 0x28 // +#define HPTXR_BADR_REG 0x2c // + +#define ITC_REG 0x30 // interrupt timer control register +#define APTC_REG 0x34 // Automatic Polling Timer control register +#define DBLAC_REG 0x38 // DMA Burst Length and Arbitration control register + +#define DMAFIFOS_REG 0x3c // +#define FEAR_REG 0x44 // +#define TPAFCR_REG 0x48 // +#define RBSR_REG 0x4c //for NC Body +#define MACCR_REG 0x50 // MAC control register +#define MACSR_REG 0x54 // MAC status register +#define PHYCR_REG 0x60 // PHY control register +#define PHYDATA_REG 0x64 // PHY Write Data register +#define FCR_REG 0x68 // Flow Control register +#define BPR_REG 0x6c // back pressure register +#define WOLCR_REG 0x70 // Wake-On-Lan control register +#define WOLSR_REG 0x74 // Wake-On-Lan status register +#define WFCRC_REG 0x78 // Wake-up Frame CRC register +#define WFBM1_REG 0x80 // wake-up frame byte mask 1st double word register +#define WFBM2_REG 0x84 // wake-up frame byte mask 2nd double word register +#define WFBM3_REG 0x88 // wake-up frame byte mask 3rd double word register +#define WFBM4_REG 0x8c // wake-up frame byte mask 4th double word register + +#define NPTXR_PTR_REG 0x90 // +#define HPTXR_PTR_REG 0x94 // +#define RXR_PTR_REG 0x98 // + + +// -------------------------------------------------------------------- +// ISR_REG ¤Î IMR_REG +// -------------------------------------------------------------------- +#define HPTXBUF_UNAVA_bit (1UL<<10) +#define PHYSTS_CHG_bit (1UL<<9) +#define AHB_ERR_bit (1UL<<8) +#define TPKT_LOST_bit (1UL<<7) +#define NPTXBUF_UNAVA_bit (1UL<<6) +#define TPKT2F_bit (1UL<<5) +#define TPKT2E_bit (1UL<<4) +#define RPKT_LOST_bit (1UL<<3) +#define RXBUF_UNAVA_bit (1UL<<2) +#define RPKT2F_bit (1UL<<1) +#define RPKT2B_bit (1UL<<0) + + +// -------------------------------------------------------------------- +// APTC_REG +// -------------------------------------------------------------------- + + +typedef struct +{ + u32 RXPOLL_CNT:4; + u32 RXPOLL_TIME_SEL:1; + u32 Reserved1:3; + u32 TXPOLL_CNT:4; + u32 TXPOLL_TIME_SEL:1; + u32 Reserved2:19; +}FTGMAC100_APTCR_Status; + +// -------------------------------------------------------------------- +// PHYCR_REG +// -------------------------------------------------------------------- +#define PHY_RE_AUTO_bit (1UL<<9) +#define PHY_READ_bit (1UL<<26) +#define PHY_WRITE_bit (1UL<<27) +// -------------------------------------------------------------------- +// PHYCR_REG +// -------------------------------------------------------------------- +#define PHY_AUTO_OK_bit (1UL<<5) +// -------------------------------------------------------------------- +// PHY INT_STAT_REG +// -------------------------------------------------------------------- +#define PHY_SPEED_CHG_bit (1UL<<14) +#define PHY_DUPLEX_CHG_bit (1UL<<13) +#define PHY_LINK_CHG_bit (1UL<<10) +#define PHY_AUTO_COMP_bit (1UL<<11) +// -------------------------------------------------------------------- +// PHY SPE_STAT_REG +// -------------------------------------------------------------------- +#define PHY_RESOLVED_bit (1UL<<11) +#define PHY_SPEED_mask 0xC000 +#define PHY_SPEED_10M 0x0 +#define PHY_SPEED_100M 0x1 +#define PHY_SPEED_1G 0x2 +#define PHY_DUPLEX_mask 0x2000 +//#define PHY_FULLDUPLEX 0x1 +#define PHY_SPEED_DUPLEX_MASK 0x01E0 +#define PHY_100M_DUPLEX 0x0100 +#define PHY_100M_HALF 0x0080 +#define PHY_10M_DUPLEX 0x0040 +#define PHY_10M_HALF 0x0020 +#define LINK_STATUS 0x04 + + +// -------------------------------------------------------------------- +// MACCR_REG +// -------------------------------------------------------------------- + +#define SW_RST_bit (1UL<<31) // software reset/ +#define DIRPATH_bit (1UL<<21) +#define RX_IPCS_FAIL_bit (1UL<<20) // +#define SPEED_100_bit (1UL<<19) // +#define RX_UDPCS_FAIL_bit (1UL<<18) // +#define RX_BROADPKT_bit (1UL<<17) // Receiving broadcast packet +#define RX_MULTIPKT_bit (1UL<<16) // receiving multicast packet +#define RX_HT_EN_bit (1UL<<15) +#define RX_ALLADR_bit (1UL<<14) // not check incoming packet's destination address +#define JUMBO_LF_bit (1UL<<13) // +#define RX_RUNT_bit (1UL<<12) // Store incoming packet even its length is les than 64 byte +#define CRC_CHK_bit (1UL<<11) // +#define CRC_APD_bit (1UL<<10) // append crc to transmit packet +#define GMAC_MODE_bit (1UL<<9) // +#define FULLDUP_bit (1UL<<8) // full duplex +#define ENRX_IN_HALFTX_bit (1UL<<7) // +#define LOOP_EN_bit (1UL<<6) // Internal loop-back +#define HPTXR_EN_bit (1UL<<5) // +#define REMOVE_VLAN_bit (1UL<<4) // +//#define MDC_SEL_bit (1UL<<13) // set MDC as TX_CK/10 +//#define RX_FTL_bit (1UL<<11) // Store incoming packet even its length is great than 1518 byte +#define RXMAC_EN_bit (1UL<<3) // receiver enable +#define TXMAC_EN_bit (1UL<<2) // transmitter enable +#define RXDMA_EN_bit (1UL<<1) // enable DMA receiving channel +#define TXDMA_EN_bit (1UL<<0) // enable DMA transmitting channel + + +// -------------------------------------------------------------------- +// SCU_REG +// -------------------------------------------------------------------- +#define SCU_PROTECT_KEY_REG 0x0 +#define SCU_PROT_KEY_MAGIC 0x1688a8a8 +#define SCU_RESET_CONTROL_REG 0x04 +#define SCU_RESET_MAC1 (1u << 11) +#define SCU_RESET_MAC2 (1u << 12) + +#define SCU_HARDWARE_TRAPPING_REG 0x70 +#define SCU_HT_MAC_INTF_LSBIT 6 +#define SCU_HT_MAC_INTERFACE (0x7u << SCU_HT_MAC_INTF_LSBIT) +#define MAC_INTF_SINGLE_PORT_MODES (1u<<0/*GMII*/ | 1u<<3/*MII_ONLY*/ | 1u<<4/*RMII_ONLY*/) +#define SCU_HT_MAC_GMII 0x0u +// MII and MII mode +#define SCU_HT_MAC_MII_MII 0x1u +#define SCU_HT_MAC_MII_ONLY 0x3u +#define SCU_HT_MAC_RMII_ONLY 0x4u + +/* +SCU88 D[31]: MAC1 MDIO +SCU88 D[30]: MAC1 MDC +SCU90 D[2]: MAC2 MDC/MDIO +SCU80 D[0]: MAC1 Link +SCU80 D[1]: MAC2 Link +*/ +#define SCU_MULTIFUNCTION_PIN_REG 0x74 +#define SCU_MULTIFUNCTION_PIN_CTL1_REG 0x80 +#define SCU_MULTIFUNCTION_PIN_CTL3_REG 0x88 +#define SCU_MULTIFUNCTION_PIN_CTL5_REG 0x90 +#define SCU_MFP_MAC2_PHYLINK (1u << 1) +#define SCU_MFP_MAC1_PHYLINK (1u << 0) +#define SCU_MFP_MAC2_MII_INTF (1u << 21) +#define SCU_MFP_MAC2_MDC_MDIO (1u << 2) +#define SCU_MFP_MAC1_MDIO (1u << 31) +#define SCU_MFP_MAC1_MDC (1u << 30) +#define SCU_SILICON_REVISION_REG 0x7C +#define SCU_SCRATCH_REG 0x40 + + + +// -------------------------------------------------------------------- +// NCSI +// -------------------------------------------------------------------- + +//NCSI define & structure +//NC-SI Command Packet +typedef struct { +//Ethernet Header + unsigned char DA[6]; + unsigned char SA[6]; + unsigned short EtherType; //DMTF NC-SI +//NC-SI Control Packet + unsigned char MC_ID; //Management Controller should set this field to 0x00 + unsigned char Header_Revision; //For NC-SI 1.0 spec, this field has to set 0x01 + unsigned char Reserved_1; //Reserved has to set to 0x00 + unsigned char IID; //Instance ID + unsigned char Command; + unsigned char Channel_ID; + unsigned short Payload_Length; //Payload Length = 12 bits, 4 bits are reserved + unsigned long Reserved_2; + unsigned long Reserved_3; +} NCSI_Command_Packet; + +//Command and Response Type +#define CLEAR_INITIAL_STATE 0x00 //M +#define SELECT_PACKAGE 0x01 //M +#define DESELECT_PACKAGE 0x02 //M +#define ENABLE_CHANNEL 0x03 //M +#define DISABLE_CHANNEL 0x04 //M +#define RESET_CHANNEL 0x05 //M +#define ENABLE_CHANNEL_NETWORK_TX 0x06 //M +#define DISABLE_CHANNEL_NETWORK_TX 0x07 //M +#define AEN_ENABLE 0x08 +#define SET_LINK 0x09 //M +#define GET_LINK_STATUS 0x0A //M +#define SET_VLAN_FILTER 0x0B //M +#define ENABLE_VLAN 0x0C //M +#define DISABLE_VLAN 0x0D //M +#define SET_MAC_ADDRESS 0x0E //M +#define ENABLE_BROADCAST_FILTERING 0x10 //M +#define DISABLE_BROADCAST_FILTERING 0x11 //M +#define ENABLE_GLOBAL_MULTICAST_FILTERING 0x12 +#define DISABLE_GLOBAL_MULTICAST_FILTERING 0x13 +#define SET_NCSI_FLOW_CONTROL 0x14 +#define GET_VERSION_ID 0x15 //M +#define GET_CAPABILITIES 0x16 //M +#define GET_PARAMETERS 0x17 //M +#define GET_CONTROLLER_PACKET_STATISTICS 0x18 +#define GET_NCSI_STATISTICS 0x19 +#define GET_NCSI_PASS_THROUGH_STATISTICS 0x1A + +//NC-SI Response Packet +typedef struct { + unsigned char DA[6]; + unsigned char SA[6]; + unsigned short EtherType; //DMTF NC-SI +//NC-SI Control Packet + unsigned char MC_ID; //Management Controller should set this field to 0x00 + unsigned char Header_Revision; //For NC-SI 1.0 spec, this field has to set 0x01 + unsigned char Reserved_1; //Reserved has to set to 0x00 + unsigned char IID; //Instance ID + unsigned char Command; + unsigned char Channel_ID; + unsigned short Payload_Length; //Payload Length = 12 bits, 4 bits are reserved + unsigned short Reserved_2; + unsigned short Reserved_3; + unsigned short Reserved_4; + unsigned short Reserved_5; + unsigned short Response_Code; + unsigned short Reason_Code; + unsigned char Payload_Data[64]; +} NCSI_Response_Packet; + +//Standard Response Code +#define COMMAND_COMPLETED 0x00 +#define COMMAND_FAILED 0x01 +#define COMMAND_UNAVAILABLE 0x02 +#define COMMAND_UNSUPPORTED 0x03 + +//Standard Reason Code +#define NO_ERROR 0x0000 +#define INTERFACE_INITIALIZATION_REQUIRED 0x0001 +#define PARAMETER_IS_INVALID 0x0002 +#define CHANNEL_NOT_READY 0x0003 +#define PACKAGE_NOT_READY 0x0004 +#define INVALID_PAYLOAD_LENGTH 0x0005 +#define UNKNOWN_COMMAND_TYPE 0x7FFF + + +struct AEN_Packet { +//Ethernet Header + unsigned char DA[6]; + unsigned char SA[6]; //Network Controller SA = FF:FF:FF:FF:FF:FF + unsigned short EtherType; //DMTF NC-SI +//AEN Packet Format + unsigned char MC_ID; //Network Controller should set this field to 0x00 + unsigned char Header_Revision; //For NC-SI 1.0 spec, this field has to set 0x01 + unsigned char Reserved_1; //Reserved has to set to 0x00 +// unsigned char IID = 0x00; //Instance ID = 0 in Network Controller +// unsigned char Command = 0xFF; //AEN = 0xFF + unsigned char Channel_ID; +// unsigned short Payload_Length = 0x04; //Payload Length = 4 in Network Controller AEN Packet + unsigned long Reserved_2; + unsigned long Reserved_3; + unsigned char AEN_Type; +// unsigned char Reserved_4[3] = {0x00, 0x00, 0x00}; + unsigned long Optional_AEN_Data; + unsigned long Payload_Checksum; +}; + +//AEN Type +#define LINK_STATUS_CHANGE 0x0 +#define CONFIGURATION_REQUIRED 0x1 +#define HOST_NC_DRIVER_STATUS_CHANGE 0x2 + +typedef struct { + unsigned char Package_ID; + unsigned char Channel_ID; + unsigned long Capabilities_Flags; + unsigned long Broadcast_Packet_Filter_Capabilities; + unsigned long Multicast_Packet_Filter_Capabilities; + unsigned long Buffering_Capabilities; + unsigned long AEN_Control_Support; +} NCSI_Capability; +NCSI_Capability NCSI_Cap; + +//SET_MAC_ADDRESS +#define UNICAST (0x00 << 5) +#define MULTICAST_ADDRESS (0x01 << 5) +#define DISABLE_MAC_ADDRESS_FILTER 0x00 +#define ENABLE_MAC_ADDRESS_FILTER 0x01 + +//GET_LINK_STATUS +#define LINK_DOWN 0 +#define LINK_UP 1 + +#define NCSI_LOOP 1500000 +#define RETRY_COUNT 1 + +#define NCSI_HEADER 0xF888 //Reversed because of 0x88 is low byte, 0xF8 is high byte in memory + +// -------------------------------------------------------------------- +// Receive Ring descriptor structure +// -------------------------------------------------------------------- + +typedef struct +{ + // RXDES0 + u32 VDBC:14;//0~10 + u32 Reserved1:1; //11~15 + u32 Reserved3:1; + u32 MULTICAST:1; //16 + u32 BROADCAST:1; //17 + u32 RX_ERR:1; //18 + u32 CRC_ERR:1; //19 + u32 FTL:1; + u32 RUNT:1; + u32 RX_ODD_NB:1; + u32 FIFO_FULL:1; + u32 PAUSE_OPCODE:1; + u32 PAUSE_FRAME:1; + u32 Reserved2:2; + u32 LRS:1; + u32 FRS:1; + u32 EDORR:1; + u32 RXPKT_RDY:1; // 1 ==> owned by FTMAC100, 0 ==> owned by software + + // RXDES1 + u32 VLAN_TAGC:16; + u32 Reserved4:4; + u32 PROTL_TYPE:2; + u32 LLC_PKT:1; + u32 DF:1; + u32 VLAN_AVA:1; + u32 TCPCS_FAIL:1; + u32 UDPCS_FAIL:1; + u32 IPCS_FAIL:1; + u32 Reserved5:4; + + // RXDES2 + u32 Reserved6:32; + + // RXDES3 + u32 RXBUF_BADR; + + u32 VIR_RXBUF_BADR; // not defined, the virtual address of receive buffer is placed here + + u32 RESERVED; + u32 RESERVED1; + u32 RESERVED2; +}RX_DESC; + + +typedef struct +{ + // TXDES0 + u32 TXBUF_Size:14; + u32 Reserved1:1; + u32 Reserved2:1; + u32 Reserved3:3; + u32 CRC_ERR:1; + u32 Reserved4:8; + u32 LTS:1; + u32 FTS:1; + u32 EDOTR:1; + u32 TXDMA_OWN:1; + + // TXDES1 + u32 VLAN_TAGC:16; + u32 INS_VLAN:1; + u32 TCPCS_EN:1; + u32 UDPCS_EN:1; + u32 IPCS_EN:1; + u32 Reserved5:2; + u32 LLC_PKT:1; + u32 Reserved6:7; + u32 TX2FIC:1; + u32 TXIC:1; + + // TXDES2 + u32 Reserved7:32; + + // TXDES3 + u32 TXBUF_BADR; + + u32 VIR_TXBUF_BADR; // Reserve, the virtual address of transmit buffer is placed here + + u32 RESERVED; + u32 RESERVED1; + u32 RESERVED2; + +}TX_DESC; + + + +// waiting to do: +#define TXPOLL_CNT 8 +#define RXPOLL_CNT 0 + +#define TX_OWNBY_SOFTWARE 0 +#define TX_OWNBY_FTGMAC100 1 + + +#define RX_OWNBY_SOFTWARE 1 +#define RX_OWNBY_FTGMAC100 0 + +// -------------------------------------------------------------------- +// driver related definition +// -------------------------------------------------------------------- + + +//#define RXDES_NUM 64//64 // we defined 32 descriptor for OTG issue +#define RXDES_NUM 32 + +#define RX_BUF_SIZE 1536 + +#define TXDES_NUM 32 +#define TX_BUF_SIZE 1536 + +#define PHYID_VENDOR_MASK 0xfffffc00 +#define PHYID_VENDOR_MODEL_MASK 0xfffffff0 +#define PHYID_MODEL_MASK 0x000003f0 +#define PHYID_REVISION_MASK 0x0000000f +#define PHYID_VENDOR_MARVELL 0x01410c00 +#define PHYID_VENDOR_BROADCOM 0x00406000 +#define PHYID_VENDOR_REALTEK 0x001cc800 + +#define PHYID_BCM5221A4 0x004061e4 +//#define PHYID_RTL8201EL 0x001cc815 +#define PHYID_RTL8201EL 0x001cc810 +#define PHYID_RTL8201F 0x001cc816 +#define PHYID_RTL8211 0x001cc910 +#define PHYID_RTL8211E 0x001cc915 +#define PHYID_BCM54612E 0x03625E6A + + +/* store this information for the driver.. */ + +struct AstMacHwConfig { + unsigned char phyAddr; // See IP_phy_addr[] encoding + unsigned char macId; + unsigned char isRevA0; + unsigned char isRevA2; + unsigned char pad[1]; + unsigned int miiPhyId; +}; + +struct ftgmac100_priv { + + // these are things that the kernel wants me to keep, so users + // can find out semi-useless statistics of how well the card is + // performing + struct net_device_stats stats; + + struct AstMacHwConfig ids; + + struct net_device *netdev; + struct device *dev; + + // Set to true during the auto-negotiation sequence + int autoneg_active; + + // Last contents of PHY Register 18 + u32 lastPhy18; + + spinlock_t tx_lock; + + //RX .. + volatile RX_DESC *rx_descs; // receive ring base address + struct sk_buff *rx_skbuff[RXDES_NUM]; + u32 rx_descs_dma; // receive ring physical base address + int rx_idx; // receive descriptor + + //TX .. + volatile TX_DESC *tx_descs; + u32 tx_descs_dma; + char *tx_buf; + int tx_buf_dma; + int tx_idx; + int old_tx; + struct sk_buff *tx_skbuff[TXDES_NUM]; + + int maccr_val; + struct timer_list timer; + u32 GigaBit_MAHT0; + u32 GigaBit_MAHT1; + u32 Not_GigaBit_MAHT0; + u32 Not_GigaBit_MAHT1; + NCSI_Command_Packet NCSI_Request; + NCSI_Response_Packet NCSI_Respond; + NCSI_Capability NCSI_Cap; + unsigned int InstanceID; + unsigned int Retry; + unsigned char Payload_Data[16]; + unsigned char Payload_Pad[4]; + unsigned long Payload_Checksum; + int tx_free; + unsigned long NCSI_support; + unsigned long INTEL_NCSI_EVA_support; +}; + + +#define FTGMAC100_STROBE_TIME (10*HZ) +///#define FTMAC100_STROBE_TIME 1 + +//I2C define for EEPROM +#define AC_TIMING 0x77743335 +#define ALL_CLEAR 0xFFFFFFFF +#define MASTER_ENABLE 0x01 +#define SLAVE_ENABLE 0x02 +#define LOOP_COUNT 0x100000 + + +#define I2C_BASE 0x1e78A000 +#define I2C_FUNCTION_CONTROL_REGISTER 0x00 +#define I2C_AC_TIMING_REGISTER_1 0x04 +#define I2C_AC_TIMING_REGISTER_2 0x08 +#define I2C_INTERRUPT_CONTROL_REGISTER 0x0C +#define I2C_INTERRUPT_STATUS_REGISTER 0x10 +#define I2C_COMMAND_REGISTER 0x14 +#define I2C_BYTE_BUFFER_REGISTER 0x20 + + +#define MASTER_START_COMMAND (1 << 0) +#define MASTER_TX_COMMAND (1 << 1) +#define MASTER_RX_COMMAND (1 << 3) +#define RX_COMMAND_LIST (1 << 4) +#define MASTER_STOP_COMMAND (1 << 5) + +#define TX_ACK (1 << 0) +#define TX_NACK (1 << 1) +#define RX_DONE (1 << 2) +#define STOP_DONE (1 << 4) + + + +#endif /* _SMC_91111_H_ */ + + diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 123092d8a984..f60078c7f8ea 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -679,4 +679,10 @@ config RTC_DRV_STARFIRE If you say Y here you will get support for the RTC found on Starfire systems. +config RTC_DRV_ASPEED + bool "ASPEED RTC" + depends on ARM + help + RTC driver for ASPEED chips. + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 6e79c912bf9e..7a16fed80ce6 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -70,3 +70,4 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o +obj-$(CONFIG_RTC_DRV_ASPEED) += rtc-aspeed.o diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c new file mode 100755 index 000000000000..477032e986ce --- /dev/null +++ b/drivers/rtc/rtc-aspeed.c @@ -0,0 +1,495 @@ +/******************************************************************************** +* File Name : drivers/rtc/rtc-ast.c +* Author : Ryan chen +* Description : ASPEED Real Time Clock Driver (RTC) +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* History : +* 1. 2012/09/21 ryan chen create this file +* +********************************************************************************/ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/rtc.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <asm/io.h> + +#include <plat/regs-rtc.h> + +struct ast_rtc { + void __iomem *base; + int irq; + struct resource *res; + struct rtc_device *rtc_dev; + spinlock_t lock; +}; + +//static char banner[] = "ASPEED RTC, (C) ASPEED Technology Inc.\n"; +//#define CONFIG_RTC_DEBUG + + +static inline u32 +rtc_read(void __iomem *base, u32 reg) +{ +#ifdef CONFIG_RTC_DEBUG + int val = readl(base + reg); + pr_debug("base = 0x%p, offset = 0x%08x, value = 0x%08x\n", base, reg, val); + return val; +#else + return readl(base + reg); +#endif +} + +static inline void +rtc_write(void __iomem * base, u32 val, u32 reg) +{ + pr_debug("base = 0x%p, offset = 0x%08x, data = 0x%08x\n", base, reg, val); + writel(val, base + reg); +} + +static int +ast_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + pr_debug("cmd = 0x%08x, arg = 0x%08lx\n", cmd, arg); + + switch (cmd) { + case RTC_AIE_ON: /* alarm on */ + { + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL) | ENABLE_ALL_ALARM, RTC_CONTROL); + return 0; + } + + case RTC_AIE_OFF: /* alarm off */ + { + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL) &~ENABLE_ALL_ALARM, RTC_CONTROL); + return 0; + } + case RTC_UIE_ON: /* update on */ + { + pr_debug("no such function \n"); + return 0; + } + case RTC_UIE_OFF: /* update off */ + { + pr_debug("no such function \n"); + return 0; + } + case RTC_PIE_OFF: /* periodic off */ + { + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL) | ENABLE_SEC_INTERRUPT, RTC_CONTROL); + + return 0; + } + case RTC_PIE_ON: /* periodic on */ + { + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL) & ~ENABLE_SEC_INTERRUPT, RTC_CONTROL); + + return 0; + } + default: + return -ENOTTY; + } + + return 0; +} + + +/* Time read/write */ +static int +ast_rtc_get_time(struct device *dev, struct rtc_time *rtc_tm) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + unsigned long flags; + u32 reg_time, reg_date; + + spin_lock_irqsave(&ast_rtc->lock, flags); + + reg_time = rtc_read(ast_rtc->base, RTC_CNTR_STS_1); + reg_date = rtc_read(ast_rtc->base, RTC_CNTR_STS_2); + + spin_unlock_irqrestore(&ast_rtc->lock, flags); + + rtc_tm->tm_year = GET_CENT_VAL(reg_date)*1000 | GET_YEAR_VAL(reg_date); + rtc_tm->tm_mon = GET_MON_VAL(reg_date); + + rtc_tm->tm_mday = GET_DAY_VAL(reg_time); + rtc_tm->tm_hour = GET_HOUR_VAL(reg_time); + rtc_tm->tm_min = GET_MIN_VAL(reg_time); + rtc_tm->tm_sec = GET_SEC_VAL(reg_time); + + pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n", + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, + rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); + return 0; + +} + +static int +ast_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + unsigned long flags; + u32 reg_time, reg_date; + + pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n", + tm->tm_year, tm->tm_mon, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); + + spin_lock_irqsave(&ast_rtc->lock, flags); + + /* set hours */ + reg_time = SET_DAY_VAL(tm->tm_mday) | SET_HOUR_VAL(tm->tm_hour) | SET_MIN_VAL(tm->tm_min) | SET_SEC_VAL(tm->tm_sec); + + /* set century */ + /* set mon */ + reg_date = SET_CENT_VAL(tm->tm_year / 1000) | SET_YEAR_VAL(tm->tm_year % 1000) | SET_MON_VAL(tm->tm_mon); + + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL) | RTC_LOCK, RTC_CONTROL); + + rtc_write(ast_rtc->base, reg_time, RTC_CNTR_STS_1); + rtc_write(ast_rtc->base, reg_date, RTC_CNTR_STS_2); + + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL) &~RTC_LOCK , RTC_CONTROL); + + spin_unlock_irqrestore(&ast_rtc->lock, flags); + + return 0; +} +static int +ast_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + unsigned long flags; + struct rtc_time *alm_tm = &alarm->time; + u32 alarm_reg; + + spin_lock_irqsave(&ast_rtc->lock, flags); + alarm_reg = rtc_read(ast_rtc->base, RTC_ALARM); + spin_unlock_irqrestore(&ast_rtc->lock, flags); + +//DAY + alm_tm->tm_mday = GET_DAY_VAL(alarm_reg); + +//HR + alm_tm->tm_hour = GET_HOUR_VAL(alarm_reg); + +//MIN + alm_tm->tm_min= GET_MIN_VAL(alarm_reg); + +//SEC + alm_tm->tm_sec= GET_SEC_VAL(alarm_reg); + + pr_debug("ast_rtc_read_alarm: %d, %02x %02x.%02x.%02x\n", + alarm->enabled, + alm_tm->tm_mday & 0xff, alm_tm->tm_hour & 0xff, alm_tm->tm_min & 0xff, alm_tm->tm_sec); + + return 0; + + +} + +static int +ast_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alarm->time; + unsigned long flags; + u32 reg_alarm = 0; + + pr_debug("ast_rtc_setalarm: %d, %02x %02x.%02x.%02x\n", + alarm->enabled, + tm->tm_mday & 0xff, tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); + +//DAY + /* set day of week */ + if (tm->tm_mday <= 31 && tm->tm_mday >= 1) { + reg_alarm |= SET_DAY_VAL(tm->tm_mday); + } + +//HR + /* set ten hours */ + if (tm->tm_hour <= 23 && tm->tm_hour >= 0) { + reg_alarm |= SET_HOUR_VAL(tm->tm_hour); + } + +//MIN + /* set ten minutes */ + if (tm->tm_min <= 59 && tm->tm_min >= 0) { + reg_alarm |= SET_MIN_VAL(tm->tm_min); + } + +//SEC + /* set ten secondss */ + if (tm->tm_sec <= 59 && tm->tm_sec >= 0) { + reg_alarm |= SET_SEC_VAL(tm->tm_sec); + } + + pr_debug("ast_rtc_set alarm reg: %x \n", reg_alarm); + + spin_lock_irqsave(&ast_rtc->lock, flags); + + rtc_write(ast_rtc->base, reg_alarm, RTC_ALARM); + + if(alarm->enabled) + rtc_write(ast_rtc->base, reg_alarm, RTC_CONTROL); + else + rtc_write(ast_rtc->base, reg_alarm, RTC_CONTROL); + + spin_unlock_irqrestore(&ast_rtc->lock, flags); + return 0; + +} +static int +ast_rtc_proc(struct device *dev, struct seq_file *seq) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + u32 ctrl_reg; + + ctrl_reg = rtc_read(ast_rtc->base, RTC_CONTROL); + + pr_debug("ctrl_reg = 0x%08x\n", ctrl_reg); + + seq_printf(seq, "periodic_IRQ\t: %s\n", + (ctrl_reg & ENABLE_SEC_INTERRUPT) ? "yes" : "no" ); + + return 0; +} + +static int +ast_rtc_irq_set_freq(struct device *dev, int freq) +{ + struct ast_rtc *ast_rtc = dev_get_drvdata(dev); + pr_debug("freq = %d\n", freq); + + spin_lock_irq(&ast_rtc->lock); + + if(freq == 0) + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL)&~ENABLE_SEC_INTERRUPT, RTC_CONTROL); + else + rtc_write(ast_rtc->base, rtc_read(ast_rtc->base, RTC_CONTROL)|ENABLE_SEC_INTERRUPT, RTC_CONTROL); + + spin_unlock_irq(&ast_rtc->lock); + + return 0; +} + +static irqreturn_t +ast_rtc_interrupt(int irq, void *dev_id) +{ + struct ast_rtc *ast_rtc = dev_id; + + unsigned int status = rtc_read(ast_rtc->base, RTC_ALARM_STS); + rtc_write(ast_rtc->base, status, RTC_ALARM_STS); + + if (status & SEC_INTERRUPT_STATUS) { + printk("RTC Alarm SEC_INTERRUPT_STATUS!!\n"); + } + + if (status & DAY_ALARM_STATUS) { + printk("RTC Alarm DAY_ALARM_STATUS!!\n"); + } + + if (status & HOUR_ALARM_STATUS) { + printk("RTC Alarm HOUR_ALARM_STATUS!!\n"); + } + + if (status & MIN_ALARM_STATUS) { + printk("RTC Alarm MIN_ALARM_STATUS!!\n"); + } + + if (status & SEC_ALARM_STATUS) { + printk("RTC Alarm SEC_ALARM_STATUS!!\n"); + } + + rtc_update_irq(ast_rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); + + return (IRQ_HANDLED); +} + +static struct rtc_class_ops ast_rtcops = { + .ioctl = ast_rtc_ioctl, + .read_time = ast_rtc_get_time, + .set_time = ast_rtc_set_time, + .read_alarm = ast_rtc_read_alarm, + .set_alarm = ast_rtc_set_alarm, + .proc = ast_rtc_proc, + .irq_set_freq = ast_rtc_irq_set_freq, +}; + +/* + * Initialize and install RTC driver + */ +static int __init ast_rtc_probe(struct platform_device *pdev) +{ + struct ast_rtc *ast_rtc; + struct rtc_device *rtc_dev; + struct resource *res; + int ret; + + pr_debug("%s: probe=%p\n", __func__, pdev); + + ast_rtc = kzalloc(sizeof *ast_rtc, GFP_KERNEL); + if (!ast_rtc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "register resources unusable\n"); + ret = -ENXIO; + goto free_rtc; + } + + ast_rtc->irq = platform_get_irq(pdev, 0); + if (ast_rtc->irq < 0) { + dev_err(&pdev->dev, "unable to get irq\n"); + ret = -ENXIO; + goto free_rtc; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + ret = -EBUSY; + goto free_rtc; + } + + ast_rtc->base = ioremap(res->start, resource_size(res)); + if (!ast_rtc->base) { + dev_err(&pdev->dev, "cannot map SocleDev registers\n"); + ret = -ENOMEM; + goto release_mem; + } + + pr_debug("base = 0x%p, irq = %d\n", ast_rtc->base, ast_rtc->irq); + + rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &ast_rtcops, THIS_MODULE); + if (IS_ERR(rtc_dev)) { + ret = PTR_ERR(rtc_dev); + goto unmap; + } + + ast_rtc->res = res; + ast_rtc->rtc_dev = rtc_dev; + spin_lock_init(&ast_rtc->lock); + + platform_set_drvdata(pdev, ast_rtc); + +// ast_rtc_irq_set_freq(&pdev->dev, 1); + + /* start the RTC from dddd:hh:mm:ss = 0000:00:00:00 */ + spin_lock_irq(&ast_rtc->lock); + if(!(rtc_read(ast_rtc->base, RTC_CONTROL) & RTC_ENABLE)) { + //combination mode + rtc_write(ast_rtc->base, ALARM_MODE_SELECT | RTC_LOCK | RTC_ENABLE, RTC_CONTROL); + + rtc_write(ast_rtc->base, 0, RTC_CNTR_STS_1); + + rtc_write(ast_rtc->base, 0, RTC_CNTR_STS_2); + + rtc_write(ast_rtc->base, 0, RTC_ALARM); + rtc_write(ast_rtc->base, ~RTC_LOCK & rtc_read(ast_rtc->base, RTC_CONTROL), RTC_CONTROL); + } else + printk("no need to enable RTC \n"); + + spin_unlock_irq(&ast_rtc->lock); + + /* register ISR */ + ret = request_irq(ast_rtc->irq, ast_rtc_interrupt, IRQF_DISABLED, dev_name(&rtc_dev->dev), ast_rtc); + if (ret) { + printk(KERN_ERR "ast_rtc: IRQ %d already in use.\n", + ast_rtc->irq); + goto unregister; + } + + return 0; + +unregister: + rtc_device_unregister(rtc_dev); + platform_set_drvdata(pdev, NULL); +unmap: + iounmap(ast_rtc->base); +release_mem: + release_mem_region(res->start, resource_size(res)); +free_rtc: + kfree(ast_rtc); + return ret; + +} + +/* + * Disable and remove the RTC driver + */ +static int __exit ast_rtc_remove(struct platform_device *pdev) +{ + struct ast_rtc *ast_rtc = platform_get_drvdata(pdev); + + free_irq(IRQ_RTC, pdev); + rtc_device_unregister(ast_rtc->rtc_dev); + platform_set_drvdata(pdev, NULL); + iounmap(ast_rtc->base); + release_resource(ast_rtc->res); + kfree(ast_rtc); + + return 0; +} + +#ifdef CONFIG_PM + +/* ASPEED RTC Power management control */ +static int ast_rtc_suspend(struct platform_device *pdev, pm_message_t state) +{ + return 0; +} + +static int ast_rtc_resume(struct platform_device *pdev) +{ + return 0; +} +#else +#define ast_rtc_suspend NULL +#define ast_rtc_resume NULL +#endif + +static struct platform_driver ast_rtc_driver = { + .probe = ast_rtc_probe, + .remove = __exit_p(ast_rtc_remove), + .suspend = ast_rtc_suspend, + .resume = ast_rtc_resume, + .driver = { + .name = "ast_rtc", + .owner = THIS_MODULE, + }, +}; + +static int __init ast_rtc_init(void) +{ + return platform_driver_register(&ast_rtc_driver); +} + +static void __exit ast_rtc_exit(void) +{ + platform_driver_unregister(&ast_rtc_driver); +} + +module_init(ast_rtc_init); +module_exit(ast_rtc_exit); + +MODULE_AUTHOR("Ryan Chen"); +MODULE_DESCRIPTION("RTC driver for ASPEED AST "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ast_rtc"); + diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 579d63a81aa2..5666583e199c 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -137,6 +137,34 @@ config SERIAL_8250_RUNTIME_UARTS with the module parameter "nr_uarts", or boot-time parameter 8250.nr_uarts +config SERIAL_AST_DMA_UART + tristate "AST UART driver with DMA" + depends on ARCH_ASPEED + select SERIAL_CORE + help + The ASPEED UART driver with DMA supporting. The device node is /dev/ttyDMA + +config AST_NR_DMA_UARTS + int "Maximum number of ast1070 uart dma serial ports" + depends on SERIAL_AST_DMA_UART + default "4" + help + Set this to the number of serial ports you want the driver + to support. This includes any ports discovered via ACPI or + PCI enumeration and any ports that may be added at run-time + via hot-plug, or any ISA multi-port serial cards. + +config AST_RUNTIME_DMA_UARTS + int "Number of ast1070 uart dma serial ports to register at runtime" + depends on SERIAL_AST_DMA_UART + range 0 AST_NR_DMA_UARTS + default "4" + help + Set this to the maximum number of serial ports you want + the kernel to register at boot time. This can be overridden + with the module parameter "nr_uarts", or boot-time parameter + 8250.nr_uarts + config SERIAL_8250_EXTENDED bool "Extended 8250/16550 serial driver options" depends on SERIAL_8250 @@ -510,6 +538,39 @@ config SERIAL_S3C2440 +config SERIAL_AST + tristate "ASPEED serial port support" + depends on ARCH_ASPEED + select SERIAL_CORE + help + Support for the on-chip UARTs on the ASPEED chips, + providing /dev/ttySAC0, 1 and 2 (note, some machines may not + provide all of these ports, depending on how the serial port + pins are configured. + +config SERIAL_ASPEED_CONSOLE + bool "Support for console on ASPEED serial port" + depends on SERIAL_AST=y + select SERIAL_CORE_CONSOLE + help + Allow selection of the ASPEED on-board serial ports for use as + an virtual console. + + Even if you say Y here, the currently visible virtual console + (/dev/ttyS0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySx". (Try "man bootparam" or see the documentation of + your boot loader about how to pass options to the kernel at + boot time.) + +config SERIAL_ASPEED_CONSOLE_BAUD + int "ASPEED serial port baud" + depends on SERIAL_ASPEED_CONSOLE=y + default "115200" + help + Select the ASPEED console baud rate. + This value is only used if the bootloader doesn't pass in the + config SERIAL_DZ bool "DECstation DZ serial driver" depends on MACH_DECSTATION && 32BIT diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index 0c17c8ddb19d..9a0059f416a0 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o +obj-$(CONFIG_SERIAL_AST) += ast_serial.o obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o @@ -28,6 +29,7 @@ obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o +obj-$(CONFIG_SERIAL_AST_DMA_UART) += ast1070_dma_uart.o obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o diff --git a/drivers/serial/ast1070_dma_uart.c b/drivers/serial/ast1070_dma_uart.c new file mode 100644 index 000000000000..16eb3c5f2a29 --- /dev/null +++ b/drivers/serial/ast1070_dma_uart.c @@ -0,0 +1,1511 @@ +/******************************************************************************** +* File Name : ast1070_dma_uart.c +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +********************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial_reg.h> +#include <linux/serial_core.h> +#include <linux/serial.h> +#include <linux/serial_8250.h> +#include <linux/nmi.h> +#include <linux/mutex.h> + +#include <asm/io.h> +#include <asm/irq.h> + +#include "8250.h" +#include <linux/dma-mapping.h> +#include <linux/miscdevice.h> +#include <plat/regs-uart-dma.h> +#include <mach/ast-uart-dma.h> + +//#define CONFIG_UART_DMA_DEBUG + +#ifdef CONFIG_UART_DMA_DEBUG + #define DBG(fmt, args...) printk("%s() " fmt, __FUNCTION__, ## args) +#else + #define DBG(fmt, args...) +#endif + +/* + * Configuration: + * share_irqs - whether we pass IRQF_SHARED to request_irq(). This option + * is unsafe when used on edge-triggered interrupts. + */ +static unsigned int share_irqs = SERIAL8250_SHARE_IRQS; + +static unsigned int nr_uarts = CONFIG_AST_RUNTIME_DMA_UARTS; + +/* + * Debugging. + */ +#if 0 +#define DEBUG_AUTOCONF(fmt...) printk(fmt) +#else +#define DEBUG_AUTOCONF(fmt...) do { } while (0) +#endif + +#if 0 +#define DEBUG_INTR(fmt...) printk(fmt) +#else +#define DEBUG_INTR(fmt...) do { } while (0) +#endif + +#define PASS_LIMIT 256 + +#include <asm/serial.h> + + +#define UART_DMA_NR CONFIG_AST_NR_DMA_UARTS + +struct ast_uart_port { + struct uart_port port; + unsigned short capabilities; /* port capabilities */ + unsigned short bugs; /* port bugs */ + unsigned int tx_loadsz; /* transmit fifo load size */ + unsigned char acr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char mcr_mask; /* mask of user bits */ + unsigned char mcr_force; /* mask of forced bits */ + struct circ_buf rx_dma_buf; + struct circ_buf tx_dma_buf; + dma_addr_t dma_rx_addr; /* Mapped ADMA descr. table */ + dma_addr_t dma_tx_addr; /* Mapped ADMA descr. table */ + unsigned int dma_buf_size; //total allocation dma size .. + struct tasklet_struct rx_tasklet; + int rx_tasklet_done; + struct tasklet_struct tx_tasklet; + spinlock_t lock; + int tx_done; + int tx_count; + /* + * Some bits in registers are cleared on a read, so they must + * be saved whenever the register is read but the bits will not + * be immediately processed. + */ +#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS + unsigned char lsr_saved_flags; +#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA + unsigned char msr_saved_flags; + + /* + * We provide a per-port pm hook. + */ + void (*pm)(struct uart_port *port, + unsigned int state, unsigned int old); +}; + +static struct ast_uart_port ast_uart_ports[UART_DMA_NR]; + +static inline struct ast_uart_port * +to_ast_dma_uart_port(struct uart_port *uart) +{ + return container_of(uart, struct ast_uart_port, port); +} + +struct irq_info { + spinlock_t lock; + struct ast_uart_port *up; +}; + +static struct irq_info ast_uart_irq[1]; +static DEFINE_MUTEX(ast_uart_mutex); + +/* + * Here we define the default xmit fifo size used for each type of UART. + */ +static const struct serial8250_config uart_config[] = { + [PORT_UNKNOWN] = { + .name = "unknown", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_8250] = { + .name = "8250", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16450] = { + .name = "16450", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16550] = { + .name = "16550", + .fifo_size = 1, + .tx_loadsz = 1, + }, + [PORT_16550A] = { + .name = "16550A", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | UART_FCR_DMA_SELECT, + .flags = UART_CAP_FIFO, + }, +}; + +/* sane hardware needs no mapping */ +#define map_8250_in_reg(up, offset) (offset) +#define map_8250_out_reg(up, offset) (offset) + +void ast_uart_unregister_port(int line); +int ast_uart_register_port(struct uart_port *port); + +static unsigned int serial_in(struct ast_uart_port *up, int offset) +{ + offset = map_8250_in_reg(up, offset) << up->port.regshift; + + return readb(up->port.membase + offset); +} + +static void +serial_out(struct ast_uart_port *up, int offset, int value) +{ + /* Save the offset before it's remapped */ + offset = map_8250_out_reg(up, offset) << up->port.regshift; + + writeb(value, up->port.membase + offset); +} + + +/* + * We used to support using pause I/O for certain machines. We + * haven't supported this for a while, but just in case it's badly + * needed for certain old 386 machines, I've left these #define's + * in.... + */ +#define serial_inp(up, offset) serial_in(up, offset) +#define serial_outp(up, offset, value) serial_out(up, offset, value) + +/* Uart divisor latch read */ +static inline int _serial_dl_read(struct ast_uart_port *up) +{ + return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8; +} + +/* Uart divisor latch write */ +static inline void _serial_dl_write(struct ast_uart_port *up, int value) +{ + serial_outp(up, UART_DLL, value & 0xff); + serial_outp(up, UART_DLM, value >> 8 & 0xff); +} + +#define serial_dl_read(up) _serial_dl_read(up) +#define serial_dl_write(up, value) _serial_dl_write(up, value) + +static void ast_uart_tx_tasklet_func(unsigned long data) +{ + struct ast_uart_port *up = to_ast_dma_uart_port((struct uart_port *)data); + struct circ_buf *xmit = &up->port.info->xmit; + struct ast_uart_dma_data *uart_dma_data = up->port.private_data; + + up->tx_done = 0; + DBG("line [%d], xmit->head =%d, xmit->tail = %d\n",up->port.line,xmit->head, xmit->tail); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { + up->tx_count = 0; + up->tx_done = 1; + return; + } + + if (up->port.x_char) { + serial_outp(up, UART_TX, up->port.x_char); + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + + up->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); + + if (up->tx_count > (UART_XMIT_SIZE - xmit->tail)) { + up->tx_count = UART_XMIT_SIZE - xmit->tail; + } + + if (up->tx_count > 4095) { + printk("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! TODO ....\n"); + up->tx_count = 4095; + } + + ast_uart_tx_dma_ctrl(uart_dma_data->chip_no, + uart_dma_data->dma_ch, AST_UART_DMAOP_STOP); + + ast_uart_tx_dma_enqueue(uart_dma_data->chip_no, + uart_dma_data->dma_ch, up->dma_tx_addr, up->tx_count); + + dma_sync_single_for_device(up->port.dev, + up->dma_tx_addr, + up->tx_count, + DMA_TO_DEVICE); + + ast_uart_tx_dma_ctrl(uart_dma_data->chip_no, + uart_dma_data->dma_ch, AST_UART_DMAOP_TRIGGER); + + +} + +static void ast_uart_tx_buffdone(struct ast1070_dma_ch *dma_ch, void *dev_id, u16 len) +{ + struct ast_uart_port *up = (struct ast_uart_port *) dev_id; + struct circ_buf *xmit = &up->port.info->xmit; + + DBG("line [%d] : tx len = %d \n", up->port.line, len); + + spin_lock(&up->port.lock); +//TODO .....................................len ----> + xmit->tail = (xmit->tail + up->tx_count) & (UART_XMIT_SIZE - 1); + up->port.icount.tx += up->tx_count; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + tasklet_schedule(&up->tx_tasklet); + + spin_unlock(&up->port.lock); +} + +static void ast_uart_rx_tasklet_func(unsigned long data) +{ + struct ast_uart_port *up = to_ast_dma_uart_port((struct uart_port *)data); + struct circ_buf *rx_ring = &up->rx_dma_buf; + struct tty_struct *tty = up->port.info->port.tty; + char flag; + DBG("line [%d]\n",up->port.line); + DBG("rx_ring->head = %d, rx_ring->tail = %d , buff addr = %x \n",rx_ring->head, rx_ring->tail, rx_ring->buf); + + spin_lock_irq(&up->lock); +#if 1 + DBG("\n rx data : -- >"); + + while (rx_ring->head != rx_ring->tail) { + DBG(" %x ",rx_ring->buf[rx_ring->tail]); + flag = TTY_NORMAL; + uart_insert_char(&up->port, 0, UART_LSR_OE, \ + rx_ring->buf[rx_ring->tail], flag); + +// tty_insert_flip_string + + rx_ring->tail++; + if (rx_ring->tail == up->dma_buf_size) + rx_ring->tail = 0; + } + DBG("\n"); +#else + + tty_insert_flip_string(tty, rx_ring->buf + rx_ring->tail, (rx_ring->head - rx_ring->tail)); + rx_ring->tail = rx_ring->head; +#endif + spin_unlock_irq(&up->lock); + + spin_unlock(&up->port.lock); + tty_flip_buffer_push(tty); + spin_lock(&up->port.lock); + + +} + +static void ast_uart_rx_buffdone(struct ast1070_dma_ch *dma_ch, + void *dev_id, u16 len) +{ + struct ast_uart_port *up = (struct ast_uart_port *)dev_id; +// struct tty_struct *tty = up->port.info->port.tty; + struct circ_buf *rx_ring = &up->rx_dma_buf; + struct ast_uart_dma_data *uart_dma_data = up->port.private_data; + u16 remain_size; + + DBG("line [%d]\n",up->port.line); +#if 0 + int i; + printk("Buff virt addr = %x \n",rx_ring->buf); + for(i=0;i<len;i++) + printk("Buff [%x] \n", rx_ring->buf[up->rx_dma_buf.head + i]); +#endif + DBG("head = %d, len : %d\n",up->rx_dma_buf.head, len); + + + //FOR NEXT ...... + rx_ring->head += len; + + if (rx_ring->head == up->dma_buf_size) { + rx_ring->head = 0; + } + + remain_size = up->dma_buf_size - rx_ring->head; + + //Trigger Next RX dma + DBG("trigger next size = %d \n",remain_size); + + ast_uart_rx_dma_ctrl(uart_dma_data->chip_no, + uart_dma_data->dma_ch, AST_UART_DMAOP_STOP); + + if(remain_size > DMA_BUFF_SIZE) + printk("Please check ---> \n"); + + if(remain_size != 0) { + ast_uart_rx_dma_enqueue(uart_dma_data->chip_no, + uart_dma_data->dma_ch, up->dma_rx_addr + up->rx_dma_buf.head, remain_size); + } + ast_uart_rx_dma_ctrl(uart_dma_data->chip_no, + uart_dma_data->dma_ch, AST_UART_DMAOP_TRIGGER); + + tasklet_schedule(&up->rx_tasklet); + +} + +/* + * FIFO support. + */ +static inline void serial8250_clear_fifos(struct ast_uart_port *p) +{ + serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + serial_outp(p, UART_FCR, 0); +} + + +/* + * This routine is called by rs_init() to initialize a specific serial + * port. + */ +static void autoconfig(struct ast_uart_port *up, unsigned int probeflags) +{ + unsigned long flags; + + if (!up->port.iobase && !up->port.mapbase && !up->port.membase) + return; + + DEBUG_AUTOCONF("ttyDMA%d: autoconf (0x%04x, 0x%p): ", + up->port.line, up->port.iobase, up->port.membase); + + spin_lock_irqsave(&up->port.lock, flags); + + up->capabilities = 0; + up->bugs = 0; + + up->port.type = PORT_16550A; + up->capabilities |= UART_CAP_FIFO; + + up->port.fifosize = uart_config[up->port.type].fifo_size; + up->capabilities = uart_config[up->port.type].flags; + up->tx_loadsz = uart_config[up->port.type].tx_loadsz; + + if (up->port.type == PORT_UNKNOWN) + goto out; + + /* + * Reset the UART. + */ + serial8250_clear_fifos(up); + serial_in(up, UART_RX); + serial_outp(up, UART_IER, 0); + + out: + spin_unlock_irqrestore(&up->port.lock, flags); + DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name); +} + + +static inline void __stop_tx(struct ast_uart_port *p) +{ + if (p->ier & UART_IER_THRI) { + p->ier &= ~UART_IER_THRI; + serial_out(p, UART_IER, p->ier); + } +} + +static void serial8250_stop_tx(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + + __stop_tx(up); + +} + +static void transmit_chars(struct ast_uart_port *up); + +static void serial8250_start_tx(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + + DBG("line [%d] --> \n", port->line); + if (up->tx_done) + tasklet_schedule(&up->tx_tasklet); +} + +static void serial8250_stop_rx(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + + DBG("line [%d] --> \n", port->line); + up->ier &= ~UART_IER_RLSI; + up->port.read_status_mask &= ~UART_LSR_DR; + serial_out(up, UART_IER, up->ier); +} + +static void serial8250_enable_ms(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + + up->ier |= UART_IER_MSI; + serial_out(up, UART_IER, up->ier); +} + +static void transmit_chars(struct ast_uart_port *up) +{ + struct circ_buf *xmit = &up->port.info->xmit; + int count; + + if (up->port.x_char) { + serial_outp(up, UART_TX, up->port.x_char); + up->port.icount.tx++; + up->port.x_char = 0; + return; + } + if (uart_tx_stopped(&up->port)) { + serial8250_stop_tx(&up->port); + return; + } + if (uart_circ_empty(xmit)) { + __stop_tx(up); + return; + } + +// printk("uart_circ_chars_pending=%d\n",uart_circ_chars_pending(xmit)); + + count = up->tx_loadsz; + do { +//printk("TX : buf = 0x%x\n", xmit->buf[xmit->tail]); + serial_out(up, UART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + up->port.icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&up->port); + + DEBUG_INTR("THRE..."); + + if (uart_circ_empty(xmit)) + __stop_tx(up); +} + +static unsigned int check_modem_status(struct ast_uart_port *up) +{ + unsigned int status = serial_in(up, UART_MSR); + + status |= up->msr_saved_flags; + up->msr_saved_flags = 0; + if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && + up->port.info != NULL) { + if (status & UART_MSR_TERI) + up->port.icount.rng++; + if (status & UART_MSR_DDSR) + up->port.icount.dsr++; + if (status & UART_MSR_DDCD) + uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); + if (status & UART_MSR_DCTS) + uart_handle_cts_change(&up->port, status & UART_MSR_CTS); + + wake_up_interruptible(&up->port.info->delta_msr_wait); + } + + return status; +} + +/* + * This handles the interrupt from one port. + */ +static inline void +serial8250_handle_port(struct ast_uart_port *up) +{ + unsigned int status; + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + DEBUG_INTR("serial8250_handle_port \n"); + + status = serial_inp(up, UART_LSR); + + DEBUG_INTR("status = %x...", status); + + check_modem_status(up); + if (status & UART_LSR_THRE) + transmit_chars(up); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +/* + * This is the serial driver's interrupt routine. + */ +static irqreturn_t ast_uart_interrupt(int irq, void *dev_id) +{ + struct irq_info *i = dev_id; + int pass_counter = 0, handled = 0, end = 0; + + DEBUG_INTR("ast_uart_interrupt(%d)...", irq); + spin_lock(&i->lock); + + do { + struct ast_uart_port *up; + unsigned int iir; + + up = (struct ast_uart_port *)(i->up); + + iir = serial_in(up, UART_IIR); + DEBUG_INTR("iir %x \n", iir); + if (!(iir & UART_IIR_NO_INT)) { + printk("handle port \n"); + serial8250_handle_port(up); + handled = 1; + + } + else + end = 1; + + if (pass_counter++ > PASS_LIMIT) { + /* If we hit this, we're dead. */ + printk(KERN_ERR "ast-uart-dma: too much work for " + "irq%d\n", irq); + break; + } + } while (end); + + spin_unlock(&i->lock); + + DEBUG_INTR("end.\n"); + + return IRQ_RETVAL(handled); +} + +static unsigned int serial8250_tx_empty(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + unsigned long flags; + unsigned int lsr; + + spin_lock_irqsave(&up->port.lock, flags); + lsr = serial_in(up, UART_LSR); + up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + spin_unlock_irqrestore(&up->port.lock, flags); + + return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; +} + +static unsigned int serial8250_get_mctrl(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + unsigned int status; + unsigned int ret; + + status = check_modem_status(up); + + ret = 0; + if (status & UART_MSR_DCD) + ret |= TIOCM_CAR; + if (status & UART_MSR_RI) + ret |= TIOCM_RNG; + if (status & UART_MSR_DSR) + ret |= TIOCM_DSR; + if (status & UART_MSR_CTS) + ret |= TIOCM_CTS; + return ret; +} + +static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + unsigned char mcr = 0; + + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + if (mctrl & TIOCM_OUT1) + mcr |= UART_MCR_OUT1; + if (mctrl & TIOCM_OUT2) + mcr |= UART_MCR_OUT2; + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + + mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr; + + serial_out(up, UART_MCR, mcr); +} + +static void serial8250_break_ctl(struct uart_port *port, int break_state) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + if (break_state == -1) + up->lcr |= UART_LCR_SBC; + else + up->lcr &= ~UART_LCR_SBC; + serial_out(up, UART_LCR, up->lcr); + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static int serial8250_startup(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + //TX DMA + struct circ_buf *xmit = &port->info->xmit; + struct ast_uart_dma_data *uart_dma_data = up->port.private_data; + unsigned long flags; + unsigned char lsr, iir; + int retval; + int irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0; + + DBG("line [%d] \n",port->line); + + up->capabilities = uart_config[up->port.type].flags; + up->mcr = 0; + + /* + * Clear the FIFO buffers and disable them. + * (they will be reenabled in set_termios()) + */ + serial8250_clear_fifos(up); + + /* + * Clear the interrupt registers. + */ + (void) serial_inp(up, UART_LSR); + (void) serial_inp(up, UART_RX); + (void) serial_inp(up, UART_IIR); + (void) serial_inp(up, UART_MSR); + + ast_uart_irq[0].up = up; + retval = request_irq(up->port.irq, ast_uart_interrupt, + irq_flags, "ast-uart-dma", ast_uart_irq); + if (retval) + return retval; + + /* + * Now, initialize the UART + */ + serial_outp(up, UART_LCR, UART_LCR_WLEN8); + + spin_lock_irqsave(&up->port.lock, flags); + up->port.mctrl |= TIOCM_OUT2; + + serial8250_set_mctrl(&up->port, up->port.mctrl); + + /* + * Do a quick test to see if we receive an + * interrupt when we enable the TX irq. + */ + serial_outp(up, UART_IER, UART_IER_THRI); + lsr = serial_in(up, UART_LSR); + iir = serial_in(up, UART_IIR); + serial_outp(up, UART_IER, 0); + + if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) { + if (!(up->bugs & UART_BUG_TXEN)) { + up->bugs |= UART_BUG_TXEN; + printk("ttyDMA%d - enabling bad tx status \n", + port->line); + } + } else { + up->bugs &= ~UART_BUG_TXEN; + } + + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Clear the interrupt registers again for luck, and clear the + * saved flags to avoid getting false values from polling + * routines or the previous session. + */ + serial_inp(up, UART_LSR); + serial_inp(up, UART_RX); + serial_inp(up, UART_IIR); + serial_inp(up, UART_MSR); + up->lsr_saved_flags = 0; + up->msr_saved_flags = 0; + + //RX DMA + up->rx_dma_buf.head = 0; + up->rx_dma_buf.tail = 0; + up->dma_buf_size = 2048;//DMA_BUFF_SIZE -1; //4096 is dma size please check +#if 0 + up->dma_rx_addr = dma_map_single(port->dev, + up->rx_dma_buf.buf, + up->dma_buf_size, + DMA_FROM_DEVICE); +#else + up->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, + up->dma_buf_size, &up->dma_rx_addr, GFP_KERNEL); +#endif + DBG("RX buff vir = %x, phy = %x \n", up->rx_dma_buf.buf, up->dma_rx_addr); + + ast_uart_rx_dma_ctrl(uart_dma_data->chip_no, uart_dma_data->dma_ch, AST_UART_DMAOP_STOP); + + ast_uart_rx_dma_enqueue(uart_dma_data->chip_no, uart_dma_data->dma_ch, up->dma_rx_addr, up->dma_buf_size); + + up->rx_tasklet_done = 1; + ast_uart_rx_dma_ctrl(uart_dma_data->chip_no, uart_dma_data->dma_ch, AST_UART_DMAOP_TRIGGER); + + up->tx_dma_buf.head = 0; + up->tx_dma_buf.buf = xmit->buf; + up->dma_tx_addr = dma_map_single(port->dev, + up->tx_dma_buf.buf, + UART_XMIT_SIZE, + DMA_TO_DEVICE); + up->tx_done = 1; + up->tx_count = 0; + + return 0; +} + +static void serial8250_shutdown(struct uart_port *port) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + struct ast_uart_dma_data *uart_dma_data = up->port.private_data; + unsigned long flags; + //int i; + DBG("line [%d]\n",port->line); + /* + * Disable interrupts from this port + */ +#if 0 + for(i=0; i<100; i++) { + printk("tx_count_table[%d] = %d\n", i, tx_count_table[i]); + } +#endif + + up->ier = 0; + serial_outp(up, UART_IER, 0); + + spin_lock_irqsave(&up->port.lock, flags); + up->port.mctrl &= ~TIOCM_OUT2; + + serial8250_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + + /* + * Disable break condition and FIFOs + */ + serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC); + serial8250_clear_fifos(up); + + (void) serial_in(up, UART_RX); + + ast_uart_rx_dma_ctrl(uart_dma_data->chip_no, uart_dma_data->dma_ch, AST_UART_DMAOP_STOP); + + ast_uart_tx_dma_ctrl(uart_dma_data->chip_no, uart_dma_data->dma_ch, AST_UART_DMAOP_STOP); + //TODO .... Free ---- dma + DBG("free TX , RX buffer \n"); +#if 1 + dma_unmap_single(port->dev, up->dma_rx_addr, + up->dma_buf_size, + DMA_FROM_DEVICE); +#else + dma_free_coherent(port->dev, up->dma_buf_size, + up->rx_dma_buf.buf, up->dma_rx_addr); +#endif + + dma_unmap_single(port->dev, up->dma_tx_addr, + UART_XMIT_SIZE, + DMA_TO_DEVICE); + + + free_irq(up->port.irq, ast_uart_irq); + +} + +static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud) +{ + unsigned int quot; + + quot = uart_get_divisor(port, baud); + + return quot; +} + +static void +serial8250_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + struct ast_uart_port *up = to_ast_dma_uart_port(port); + unsigned char cval, fcr = 0; + unsigned long flags; + unsigned int baud, quot; + + switch (termios->c_cflag & CSIZE) { + case CS5: + cval = UART_LCR_WLEN5; + break; + case CS6: + cval = UART_LCR_WLEN6; + break; + case CS7: + cval = UART_LCR_WLEN7; + break; + default: + case CS8: + cval = UART_LCR_WLEN8; + break; + } + + if (termios->c_cflag & CSTOPB) + cval |= UART_LCR_STOP; + if (termios->c_cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(termios->c_cflag & PARODD)) + cval |= UART_LCR_EPAR; +#ifdef CMSPAR + if (termios->c_cflag & CMSPAR) + cval |= UART_LCR_SPAR; +#endif + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = serial8250_get_divisor(port, baud); + + if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) { + if (baud < 2400) + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1; + else + fcr = uart_config[up->port.type].fcr; + } + + /* + * Ok, we're now changing the port state. Do it with + * interrupts disabled. + */ + spin_lock_irqsave(&up->port.lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (termios->c_iflag & INPCK) + up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & (BRKINT | PARMRK)) + up->port.read_status_mask |= UART_LSR_BI; + + /* + * Characteres to ignore + */ + up->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (termios->c_iflag & IGNBRK) { + up->port.ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + up->port.ignore_status_mask |= UART_LSR_OE; + } + + /* + * ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + up->port.ignore_status_mask |= UART_LSR_DR; + + /* + * CTS flow control flag and modem status interrupts + */ + up->ier &= ~UART_IER_MSI; + if (UART_ENABLE_MS(&up->port, termios->c_cflag)) + up->ier |= UART_IER_MSI; + + serial_out(up, UART_IER, up->ier); + + + serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */ + + serial_dl_write(up, quot); + + /* + * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR + * is written without DLAB set, this mode will be disabled. + */ + + serial_outp(up, UART_LCR, cval); /* reset DLAB */ + up->lcr = cval; /* Save LCR */ + if (fcr & UART_FCR_ENABLE_FIFO) { + /* emulated UARTs (Lucent Venus 167x) need two steps */ + serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); + } + serial_outp(up, UART_FCR, fcr); /* set fcr */ + serial8250_set_mctrl(&up->port, up->port.mctrl); + spin_unlock_irqrestore(&up->port.lock, flags); + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static void +serial8250_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ + struct ast_uart_port *p = (struct ast_uart_port *)port; + + if (p->pm) + p->pm(port, state, oldstate); +} + +/* + * Resource handling. + */ +static int serial8250_request_std_resource(struct ast_uart_port *up) +{ + unsigned int size = 8 << up->port.regshift; + int ret = 0; + + if (!up->port.mapbase) + return ret; + + if (!request_mem_region(up->port.mapbase, size, "ast-uart-dma")) { + ret = -EBUSY; + return ret; + } + + if (up->port.flags & UPF_IOREMAP) { + up->port.membase = ioremap_nocache(up->port.mapbase, + size); + if (!up->port.membase) { + release_mem_region(up->port.mapbase, size); + ret = -ENOMEM; + return ret; + } + } + return ret; +} + +static void serial8250_release_std_resource(struct ast_uart_port *up) +{ + unsigned int size = 8 << up->port.regshift; + + if (!up->port.mapbase) + return; + + if (up->port.flags & UPF_IOREMAP) { + iounmap(up->port.membase); + up->port.membase = NULL; + } + + release_mem_region(up->port.mapbase, size); +} + + +static void serial8250_release_port(struct uart_port *port) +{ + struct ast_uart_port *up = (struct ast_uart_port *)port; + + serial8250_release_std_resource(up); +} + +static int serial8250_request_port(struct uart_port *port) +{ + struct ast_uart_port *up = (struct ast_uart_port *)port; + int ret = 0; + + ret = serial8250_request_std_resource(up); + if (ret == 0 ) + serial8250_release_std_resource(up); + + return ret; +} + +static void serial8250_config_port(struct uart_port *port, int flags) +{ + struct ast_uart_port *up = (struct ast_uart_port *)port; + int probeflags = PROBE_ANY; + int ret; + + /* + * Find the region that we can probe for. This in turn + * tells us whether we can probe for the type of port. + */ + ret = serial8250_request_std_resource(up); + if (ret < 0) + return; + + if (flags & UART_CONFIG_TYPE) + autoconfig(up, probeflags); + + if (up->port.type == PORT_UNKNOWN) + serial8250_release_std_resource(up); +} + +static int +serial8250_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return 0; +} + +static const char * +serial8250_type(struct uart_port *port) +{ + int type = port->type; + + if (type >= ARRAY_SIZE(uart_config)) + type = 0; + return uart_config[type].name; +} + +static struct uart_ops serial8250_pops = { + .tx_empty = serial8250_tx_empty, + .set_mctrl = serial8250_set_mctrl, + .get_mctrl = serial8250_get_mctrl, + .stop_tx = serial8250_stop_tx, + .start_tx = serial8250_start_tx, + .stop_rx = serial8250_stop_rx, + .enable_ms = serial8250_enable_ms, + .break_ctl = serial8250_break_ctl, + .startup = serial8250_startup, + .shutdown = serial8250_shutdown, + .set_termios = serial8250_set_termios, + .pm = serial8250_pm, + .type = serial8250_type, + .release_port = serial8250_release_port, + .request_port = serial8250_request_port, + .config_port = serial8250_config_port, + .verify_port = serial8250_verify_port, +}; + +static void __init serial8250_isa_init_ports(void) +{ + static int first = 1; + int i; + + if (!first) + return; + first = 0; + + for (i = 0; i < nr_uarts; i++) { + struct ast_uart_port *up = &ast_uart_ports[i]; + + up->port.line = i; + spin_lock_init(&up->port.lock); + + /* + * ALPHA_KLUDGE_MCR needs to be killed. + */ + up->mcr_mask = ~ALPHA_KLUDGE_MCR; + up->mcr_force = ALPHA_KLUDGE_MCR; + + up->port.ops = &serial8250_pops; + } + +} + +static void __init +serial8250_register_ports(struct uart_driver *drv, struct device *dev) +{ + int i; + printk("serial8250_register_ports \n"); + + serial8250_isa_init_ports(); + + for (i = 0; i < nr_uarts; i++) { + struct ast_uart_port *up = &ast_uart_ports[i]; + up->port.dev = dev; + uart_add_one_port(drv, &up->port); + } +} + +#define SERIAL8250_CONSOLE NULL + +static struct uart_driver serial8250_reg = { + .owner = THIS_MODULE, + .driver_name = "ast-uart-dma", + .dev_name = "ttyDMA", +#if 0 + .major = TTY_MAJOR, + .minor = 64, +#else + .major = 204, // like atmel_serial + .minor = 155, +#endif + .nr = UART_DMA_NR, + .cons = SERIAL8250_CONSOLE, +}; + + +#if 0 +/** + * serial8250_suspend_port - suspend one serial port + * @line: serial line number + * + * Suspend one serial port. + */ +void serial8250_suspend_port(int line) +{ + uart_suspend_port(&serial8250_reg, &ast_uart_ports[line].port); +} + +/** + * serial8250_resume_port - resume one serial port + * @line: serial line number + * + * Resume one serial port. + */ +void serial8250_resume_port(int line) +{ + struct ast_uart_port *up = &ast_uart_ports[line]; + + uart_resume_port(&serial8250_reg, &up->port); +} +#endif + +/* + * Register a set of serial devices attached to a platform device. The + * list is terminated with a zero flags entry, which means we expect + * all entries to have at least UPF_BOOT_AUTOCONF set. + */ +static int __devinit serial8250_probe(struct platform_device *dev) +{ + struct plat_serial8250_port *p = dev->dev.platform_data; + struct uart_port port; + struct ast_uart_dma_data *uart_dma_data; + int ret, i; + + if(UART_XMIT_SIZE > DMA_BUFF_SIZE) + printk("UART_XMIT_SIZE > DMA_BUFF_SIZE : Please Check \n"); + + memset(&port, 0, sizeof(struct uart_port)); + + for (i = 0; p && p->flags != 0; p++, i++) { + port.iobase = p->iobase; + port.membase = p->membase; + port.irq = p->irq; + port.uartclk = p->uartclk; + port.regshift = p->regshift; + port.iotype = p->iotype; + port.flags = p->flags; + port.mapbase = p->mapbase; + port.hub6 = p->hub6; + port.private_data = p->private_data; + port.dev = &dev->dev; + uart_dma_data = p->private_data; + if (share_irqs) + port.flags |= UPF_SHARE_IRQ; + ret = ast_uart_register_port(&port); + if (ret < 0) { + dev_err(&dev->dev, "unable to register port at index %d " + "(IO%lx MEM%llx IRQ%d): %d\n", i, + p->iobase, (unsigned long long)p->mapbase, + p->irq, ret); + } +// printk("TODO ...... line = %d \n",i); + ret = ast_uart_rx_dma_request(uart_dma_data->chip_no, uart_dma_data->dma_ch, ast_uart_rx_buffdone, &ast_uart_ports[i]); + if (ret < 0) { + printk("Error : failed to get rx dma channel[%d]\n", uart_dma_data->dma_ch); + goto out_ast_uart_unregister_port; + } + + ret = ast_uart_tx_dma_request(uart_dma_data->chip_no, uart_dma_data->dma_ch, ast_uart_tx_buffdone, &ast_uart_ports[i]); + if (ret < 0) { + printk("Error : failed to get tx dma channel[%d]\n", uart_dma_data->dma_ch); + return ret; + } + } + + return 0; + +out_ast_uart_unregister_port: + for (i = 0; i < nr_uarts; i++) { + struct ast_uart_port *up = &ast_uart_ports[i]; + + if (up->port.dev == &dev->dev) + ast_uart_unregister_port(i); + }; + return ret; + +} + +/* + * Remove serial ports registered against a platform device. + */ +static int __devexit serial8250_remove(struct platform_device *dev) +{ + int i; + + for (i = 0; i < nr_uarts; i++) { + struct ast_uart_port *up = &ast_uart_ports[i]; + + if (up->port.dev == &dev->dev) + ast_uart_unregister_port(i); + } + //TODO .. +// pl080_dma_free(uart_dma_rx.channel, (void *)uart_dma_rx.client); +// pl080_dma_free(uart_dma_tx.channel, (void *)uart_dma_tx.client); + + return 0; +} + +static int serial8250_suspend(struct platform_device *dev, pm_message_t state) +{ + int i; + + for (i = 0; i < UART_DMA_NR; i++) { + struct ast_uart_port *up = &ast_uart_ports[i]; + + if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) + uart_suspend_port(&serial8250_reg, &up->port); + } + + return 0; +} + +static int serial8250_resume(struct platform_device *dev) +{ + int i; + + for (i = 0; i < UART_DMA_NR; i++) { + struct ast_uart_port *up = &ast_uart_ports[i]; + + if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) + serial8250_resume_port(i); + } + + return 0; +} + +static struct platform_driver serial8250_ast_dma_driver = { + .probe = serial8250_probe, + .remove = __devexit_p(serial8250_remove), + .suspend = serial8250_suspend, + .resume = serial8250_resume, + .driver = { + .name = "ast-uart-dma", + .owner = THIS_MODULE, + }, +}; + +/* + * This "device" covers _all_ ISA 8250-compatible serial devices listed + * in the table in include/asm/serial.h + */ +static struct platform_device *serial8250_isa_devs; + +/* + * serial8250_register_port and serial8250_unregister_port allows for + * 16x50 serial ports to be configured at run-time, to support PCMCIA + * modems and PCI multiport cards. + */ + +static struct ast_uart_port *serial8250_find_match_or_unused(struct uart_port *port) +{ + int i; + + /* + * First, find a port entry which matches. + */ + for (i = 0; i < nr_uarts; i++) + if (uart_match_port(&ast_uart_ports[i].port, port)) + return &ast_uart_ports[i]; + + /* + * We didn't find a matching entry, so look for the first + * free entry. We look for one which hasn't been previously + * used (indicated by zero iobase). + */ + for (i = 0; i < nr_uarts; i++) + if (ast_uart_ports[i].port.type == PORT_UNKNOWN && + ast_uart_ports[i].port.iobase == 0) + return &ast_uart_ports[i]; + + /* + * That also failed. Last resort is to find any entry which + * doesn't have a real port associated with it. + */ + for (i = 0; i < nr_uarts; i++) + if (ast_uart_ports[i].port.type == PORT_UNKNOWN) + return &ast_uart_ports[i]; + + return NULL; +} + +/** + * serial8250_register_port - register a serial port + * @port: serial port template + * + * Configure the serial port specified by the request. If the + * port exists and is in use, it is hung up and unregistered + * first. + * + * The port is then probed and if necessary the IRQ is autodetected + * If this fails an error is returned. + * + * On success the port is ready to use and the line number is returned. + */ +int ast_uart_register_port(struct uart_port *port) +{ + struct ast_uart_port *uart; + int ret = -ENOSPC; + + if (port->uartclk == 0) + return -EINVAL; +printk("register port line %d\n",port->line); + mutex_lock(&ast_uart_mutex); + + uart = serial8250_find_match_or_unused(port); + if (uart) { + uart_remove_one_port(&serial8250_reg, &uart->port); + + uart->port.iobase = port->iobase; + uart->port.membase = port->membase; + uart->port.irq = port->irq; + uart->port.uartclk = port->uartclk; + uart->port.fifosize = port->fifosize; + uart->port.regshift = port->regshift; + uart->port.iotype = port->iotype; + uart->port.flags = port->flags | UPF_BOOT_AUTOCONF; + uart->port.mapbase = port->mapbase; + uart->port.private_data = port->private_data; + if (port->dev) + uart->port.dev = port->dev; + + ret = uart_add_one_port(&serial8250_reg, &uart->port); + if (ret == 0) + ret = uart->port.line; + + spin_lock_init(&uart->lock); + + tasklet_init(&uart->rx_tasklet, ast_uart_rx_tasklet_func, + (unsigned long)uart); + + tasklet_init(&uart->tx_tasklet, ast_uart_tx_tasklet_func, + (unsigned long)uart); + + } + + mutex_unlock(&ast_uart_mutex); + + return ret; +} +EXPORT_SYMBOL(ast_uart_register_port); + +/** + * serial8250_unregister_port - remove a 16x50 serial port at runtime + * @line: serial line number + * + * Remove one serial port. This may not be called from interrupt + * context. We hand the port back to the our control. + */ +void ast_uart_unregister_port(int line) +{ + struct ast_uart_port *uart = &ast_uart_ports[line]; + + mutex_lock(&ast_uart_mutex); + uart_remove_one_port(&serial8250_reg, &uart->port); + if (serial8250_isa_devs) { + uart->port.flags &= ~UPF_BOOT_AUTOCONF; + uart->port.type = PORT_UNKNOWN; + uart->port.dev = &serial8250_isa_devs->dev; + uart_add_one_port(&serial8250_reg, &uart->port); + } else { + uart->port.dev = NULL; + } + mutex_unlock(&ast_uart_mutex); +} +EXPORT_SYMBOL(ast_uart_unregister_port); + +static int __init ast_uart_init(void) +{ + int ret; + + if (nr_uarts > UART_DMA_NR) + nr_uarts = UART_DMA_NR; + + printk(KERN_INFO "ast-uart-dma: UART driver with DMA " + "%d ports, IRQ sharing %sabled\n", nr_uarts, + share_irqs ? "en" : "dis"); + + spin_lock_init(&ast_uart_irq[0].lock); + + ret = uart_register_driver(&serial8250_reg); + if (ret) + goto out; + + serial8250_isa_devs = platform_device_alloc("ast-uart-dma", + PLAT8250_DEV_LEGACY); + if (!serial8250_isa_devs) { + ret = -ENOMEM; + goto unreg_uart_drv; + } + + ret = platform_device_add(serial8250_isa_devs); + if (ret) + goto put_dev; + + serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev); + + ret = platform_driver_register(&serial8250_ast_dma_driver); + if (ret == 0) + goto out; + + platform_device_del(serial8250_isa_devs); + put_dev: + platform_device_put(serial8250_isa_devs); + unreg_uart_drv: + uart_unregister_driver(&serial8250_reg); + out: + return ret; +} + +static void __exit ast_uart_exit(void) +{ + struct platform_device *isa_dev = serial8250_isa_devs; + + /* + * This tells serial8250_unregister_port() not to re-register + * the ports (thereby making serial8250_ast_dma_driver permanently + * in use.) + */ + serial8250_isa_devs = NULL; + + platform_driver_unregister(&serial8250_ast_dma_driver); + platform_device_unregister(isa_dev); + + uart_unregister_driver(&serial8250_reg); +} + +late_initcall(ast_uart_init); +module_exit(ast_uart_exit); + +#if 0 +EXPORT_SYMBOL(serial8250_suspend_port); +EXPORT_SYMBOL(serial8250_resume_port); +#endif + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AST DMA serial driver"); +MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); diff --git a/drivers/serial/ast_serial.c b/drivers/serial/ast_serial.c new file mode 100644 index 000000000000..d1822e4c2843 --- /dev/null +++ b/drivers/serial/ast_serial.c @@ -0,0 +1,675 @@ +/******************************************************************************** +* File Name : ast_serial.c +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +********************************************************************************/ + +#include <linux/module.h> +#include <linux/tty.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/serial.h> +#include <linux/console.h> +#include <linux/sysrq.h> + +#include <asm/io.h> +#include <asm/irq.h> + +#if defined(CONFIG_SERIAL_ASPEED_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include <linux/serial_core.h> + +#if defined(CONFIG_COLDFIRE) +#include <asm/astsim.h> +#include <asm/ast_serial.h> +#define UART_NR 1 + +#elif defined(CONFIG_ARM) +#include <mach/hardware.h> +#include <mach/aspeed_serial.h> +#define UART_NR 2 + +#else +#err "NO CONFIG CPU for Serial UART" +#endif + +#define PORT_AST 255 + + + +#define SERIAL_AST_CONSLE_NAME "ttyS" +#define SERIAL_AST_TTY_NAME "ttyS" +#define SERIAL_AST_DEVFS_NAME "tts/" +#define SERIAL_AST_MAJOR 4 +#define SERIAL_AST_MINOR 64 +#define SERIAL_AST_NR UART_NR + +#define CALLOUT_AST_NAME "cuaam" +#define CALLOUT_AST_MAJOR 4 +#define CALLOUT_AST_MINOR 65 +#define CALLOUT_AST_NR UART_NR + + +#ifdef SUPPORT_SYSRQ +static struct console ast_console; +#endif + +#define MVP2000_ISR_PASS_LIMIT 256 + + +/* + * Access macros for the UARTs + */ +#define UART_GET_CHAR(p) readl((p)->membase + UART_RBR) +#define UART_PUT_CHAR(p, v) writel((v), (p)->membase + UART_THR) +#define UART_GET_DLL(p) readl((p)->membase + UART_DLL) +#define UART_PUT_DLL(p, v) writel((v), (p)->membase + UART_DLL) +#define UART_GET_DLH(p) readl((p)->membase + UART_DLH) +#define UART_PUT_DLH(p, v) writel((v), (p)->membase + UART_DLH) +#define UART_GET_IER(p) readl((p)->membase + UART_IER) +#define UART_PUT_IER(p, v) writel((v), (p)->membase + UART_IER) +#define UART_GET_IIR(p) readl((p)->membase + UART_IIR) +#define UART_GET_FCR(p) readl((p)->membase + UART_FCR) +#define UART_PUT_FCR(p, v) writel((v), (p)->membase + UART_FCR) +#define UART_GET_LCR(p) readl((p)->membase + UART_LCR) +#define UART_PUT_LCR(p, v) writel((v), (p)->membase + UART_LCR) +#define UART_GET_LSR(p) readl((p)->membase + UART_LSR) + +#define UART_DUMMY_RSR_RX 256 +#define UART_PORT_SIZE 64 + +/* + * We wrap our port structure around the generic uart_port. + */ +struct uart_ast_port { + struct uart_port port; +}; + +static void ast_uart_stop_tx(struct uart_port *port) +{ + unsigned int cr; + + cr = UART_GET_IER(port); + cr &= ~UART_IER_ETEI; + UART_PUT_IER(port, cr); +} + +static void ast_uart_start_tx(struct uart_port *port) +{ + unsigned int cr; + + cr = UART_GET_IER(port); + cr |= UART_IER_ETEI; + UART_PUT_IER(port, cr); +} + +static void ast_uart_stop_rx(struct uart_port *port) +{ + unsigned int cr; + + cr = UART_GET_IER(port); + cr &= ~UART_IER_ERDI; + UART_PUT_IER(port, cr); +} + +static void ast_uart_enable_ms(struct uart_port *port) +{ + /* printk(KERN_WARNING "ASPEED UART DO NOT Support MODEM operations(emable_ms)\n"); */ +} + +static void +ast_uart_rx_chars(struct uart_port *port) +{ + struct tty_struct *tty = port->info->port.tty; + unsigned int status, ch, flag, lsr, max_count = 256; + + status = UART_GET_LSR(port);; + while ((status & UART_LSR_DR) && max_count--) { + + ch = UART_GET_CHAR(port); + flag = TTY_NORMAL; + port->icount.rx++; + + /* + * Note that the error handling code is + * out of the main execution path + */ + lsr = UART_GET_LSR(port); + if (unlikely(lsr & UART_LSR_ANY)) { + if (lsr & UART_LSR_BE) { + lsr &= ~(UART_LSR_FE | UART_LSR_PE); + port->icount.brk++; + if (uart_handle_break(port)) + goto ignore_char; + } else if (lsr & UART_LSR_PE) + port->icount.parity++; + else if (lsr & UART_LSR_FE) + port->icount.frame++; + if (lsr & UART_LSR_OE) + port->icount.overrun++; + + lsr &= port->read_status_mask; + + if (lsr & UART_LSR_BE) + flag = TTY_BREAK; + else if (lsr & UART_LSR_PE) + flag = TTY_PARITY; + else if (lsr & UART_LSR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(port, ch & 255)) + goto ignore_char; + + uart_insert_char(port, lsr, UART_LSR_OE, ch, flag); + + ignore_char: + status = UART_GET_LSR(port); + } + tty_flip_buffer_push(tty); + return; +} + +static void ast_uart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->info->xmit; + int count; + + if (port->x_char) { + UART_PUT_CHAR(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + ast_uart_stop_tx(port); + return; + } + + count = port->fifosize >> 1; + do { + UART_PUT_CHAR(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + ast_uart_stop_tx(port); +} + +static irqreturn_t ast_uart_int(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + unsigned int status, iir, pass_counter = MVP2000_ISR_PASS_LIMIT; + + spin_lock(&port->lock); + + status = UART_GET_LSR(port); + do { + if (status & UART_LSR_DR) + ast_uart_rx_chars(port); + + if (status & UART_LSR_THRE) { + ast_uart_tx_chars(port); + iir = UART_GET_IIR(port); + } + + if (pass_counter-- == 0) + break; + + status = UART_GET_LSR(port); + } while (status & (UART_LSR_THRE|UART_LSR_DR)); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static unsigned int ast_uart_tx_empty(struct uart_port *port) +{ + return UART_GET_LSR(port) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; +} + +static unsigned int ast_uart_get_mctrl(struct uart_port *port) +{ + /* printk(KERN_WARNING "ASPEED UART DO NOT Support MODEM operations(get_mctrl)\n"); */ + return 0; +} + +static void ast_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* printk(KERN_WARNING "ASPEED UART DO NOT Support MODEM operations(set_mctrl)\n"); */ +} + +static void ast_uart_break_ctl(struct uart_port *port, int break_state) +{ + unsigned long flags; + unsigned int lcr; + + spin_lock_irqsave(&port->lock, flags); + lcr = UART_GET_LCR(port); + if (break_state == -1) + lcr |= UART_LCR_BRK; + else + lcr &= ~UART_LCR_BRK; + UART_PUT_LCR(port, lcr); + spin_unlock_irqrestore(&port->lock, flags); +} + +static int ast_uart_startup(struct uart_port *port) +{ + int retval; + + /* + * Allocate the IRQ + */ + retval = request_irq(port->irq, ast_uart_int, IRQF_DISABLED, "ast_serial", port); + if (retval) + { + printk("ast_uart_startup: Can't Get IRQ\n"); + return retval; + } + + /* + * Finally, enable interrupts + */ +// IRQ_SET_HIGH_LEVEL(port->irq); +// IRQ_SET_LEVEL_TRIGGER(port->irq); + UART_PUT_IER(port, UART_IER_ERDI); + + return 0; +} + +static void ast_uart_shutdown(struct uart_port *port) +{ + /* + * Free the interrupt + */ + free_irq(port->irq, port); + + /* + * disable all interrupts, disable the port + */ + UART_PUT_IER(port, 0); + + /* disable break condition and fifos */ + UART_PUT_LCR(port, UART_GET_LCR(port)&(~UART_LCR_BRK)); + UART_PUT_FCR(port, UART_GET_FCR(port)&(~UART_FCR_FIFOE)); +} + +static void +ast_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + unsigned int lcr, fcr = 0; + unsigned long flags; + unsigned int baud, quot; + int ch, i; + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = port->uartclk / (16 * baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr = UART_LCR_WLEN_5; + break; + case CS6: + lcr = UART_LCR_WLEN_6; + break; + case CS7: + lcr = UART_LCR_WLEN_7; + break; + default: // CS8 + lcr = UART_LCR_WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + lcr |= UART_LCR_STOP; + if (termios->c_cflag & PARENB) { + lcr |= UART_LCR_PEN; + if (!(termios->c_cflag & PARODD)) + lcr |= UART_LCR_EPS; + } + if (port->fifosize > 1) + fcr |= (UART_FCR_XMITR|UART_FCR_RCVRR|UART_FCR_FIFOE); + + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = UART_LSR_OE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & (BRKINT | PARMRK)) + port->read_status_mask |= UART_LSR_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= UART_LSR_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_LSR_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_RSR_RX; + + + /* Set baud rate */ + UART_PUT_LCR(port, UART_LCR_DLAB); /* enable Divisor Latach Address Bit */ + UART_PUT_DLH(port, ((quot >> 8) & 0xFF)); + UART_PUT_DLL(port, (quot & 0xff)); + + UART_PUT_FCR(port, fcr); + UART_PUT_LCR(port, lcr); + for (i = 0; i < 16; i++) { + ch = UART_GET_CHAR (port); /* Clear Timeout Interrupt */ + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *ast_uart_type(struct uart_port *port) +{ + return port->type == PORT_AST ? "AST UART" : NULL; +} + +/* + * Release the memory region(s) being used by 'port' + */ +static void ast_uart_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, UART_PORT_SIZE); +} + +/* + * Request the memory region(s) being used by 'port' + */ +static int ast_uart_request_port(struct uart_port *port) +{ + return request_mem_region(port->mapbase, UART_PORT_SIZE, "serial_ast") + != NULL ? 0 : -EBUSY; +} + +/* + * Configure/autoconfigure the port. + */ +static void ast_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_AST; + ast_uart_request_port(port); + } +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int ast_uart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_AST) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static struct uart_ops ast_pops = { + .tx_empty = ast_uart_tx_empty, + .set_mctrl = ast_uart_set_mctrl, + .get_mctrl = ast_uart_get_mctrl, + .stop_tx = ast_uart_stop_tx, + .start_tx = ast_uart_start_tx, + .stop_rx = ast_uart_stop_rx, + .enable_ms = ast_uart_enable_ms, + .break_ctl = ast_uart_break_ctl, + .startup = ast_uart_startup, + .shutdown = ast_uart_shutdown, + .set_termios = ast_set_termios, + .type = ast_uart_type, + .release_port = ast_uart_release_port, + .request_port = ast_uart_request_port, + .config_port = ast_uart_config_port, + .verify_port = ast_uart_verify_port, +}; + +#if defined(CONFIG_COLDFIRE) +static struct uart_ast_port ast_ports[UART_NR] = { + { + .port = { + .membase = (void *) AST_UART0_BASE, + .mapbase = AST_UART0_BASE, + .iotype = SERIAL_IO_MEM, + .irq = IRQ_UART0, + .uartclk = (24*1000000L), + .fifosize = 16, + .ops = &ast_pops, + .flags = ASYNC_BOOT_AUTOCONF, + .line = 0, + }, + } +}; + +#elif defined (CONFIG_ARM) +static struct uart_ast_port ast_ports[UART_NR] = { + { + .port = { + .membase = (void *) (IO_ADDRESS(AST_UART0_BASE)), + .mapbase = AST_UART0_BASE, + .iotype = SERIAL_IO_MEM, + .irq = IRQ_UART0, + .uartclk = (24*1000000L), + .fifosize = 16, + .ops = &ast_pops, + .flags = ASYNC_BOOT_AUTOCONF, + .line = 0, + }, + } +}; +#else +#err "ERROR~~~" +#endif + +#ifdef CONFIG_SERIAL_ASPEED_CONSOLE + +static void aspeed_console_putchar(struct uart_port *port, int ch) +{ + while (!(UART_GET_LSR(port) & UART_LSR_THRE)) + barrier(); + UART_PUT_CHAR(port, ch); +} + +static void ast_uart_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *port = &ast_ports[co->index].port; + unsigned int status, old_ier; + + /* + * First save the IER then disable the interrupts + */ + old_ier = UART_GET_IER(port); + UART_PUT_IER(port, 0); + + /* + * Now, do each character + */ + uart_console_write(port, s, count, aspeed_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the IER + */ + do { + status = UART_GET_LSR(port); + } while (!(status & UART_LSR_TEMT)); + UART_PUT_IER(port, old_ier); +} + +static void __init +ast_uart_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) +{ + if (UART_GET_IER(port) & UART_IER_ERDI) { + unsigned int lcr, quot; + lcr = UART_GET_LCR(port); + + *parity = 'n'; + if (lcr & UART_LCR_PEN) { + if (lcr & UART_LCR_EPS) + *parity = 'e'; + else + *parity = 'o'; + } + + switch (lcr & UART_LCR_WLEN_MASK) { + case UART_LCR_WLEN_8: + default: + *bits = 8; + break; + case UART_LCR_WLEN_7: + *bits = 7; + break; + case UART_LCR_WLEN_6: + *bits = 6; + break; + case UART_LCR_WLEN_5: + *bits = 5; + break; + } + + UART_PUT_LCR(port, UART_LCR_DLAB); /* enable Divisor Latach Address Bit */ + quot = UART_GET_DLL(port) | (UART_GET_DLH(port) << 8); + *baud = port->uartclk / (16 * quot); + UART_PUT_LCR(port, lcr); + } +} + +static int __init ast_uart_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = CONFIG_SERIAL_ASPEED_CONSOLE_BAUD; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= UART_NR) + co->index = 0; + port = &ast_ports[co->index].port; + + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + ast_uart_console_get_options(port, &baud, &parity, &bits); + + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver ast_reg; + +static struct console ast_console = { + .name = SERIAL_AST_CONSLE_NAME, + .write = ast_uart_console_write, + .device = uart_console_device, + .setup = ast_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &ast_reg, +}; + +int __init ast_uart_console_init(void) +{ + register_console(&ast_console); + return 0; +} + +console_initcall(ast_uart_console_init); + +#define MVP2000_CONSOLE &ast_console +#else +#define MVP2000_CONSOLE NULL +#endif + + +static struct uart_driver ast_reg = { + .owner = THIS_MODULE, + .major = SERIAL_AST_MAJOR, + .minor = SERIAL_AST_MINOR, + .dev_name = SERIAL_AST_TTY_NAME, + .nr = UART_NR, + .cons = MVP2000_CONSOLE, +}; + +static int __init ast_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&ast_reg); + if (ret == 0) { + int i; + + for (i = 0; i < UART_NR; i++) + uart_add_one_port(&ast_reg, &ast_ports[i].port); + } + return ret; +} + +static void __exit ast_uart_exit(void) +{ + int i; + + for (i = 0; i < UART_NR; i++) + uart_remove_one_port(&ast_reg, &ast_ports[i].port); + + uart_unregister_driver(&ast_reg); +} + +module_init(ast_uart_init); +module_exit(ast_uart_exit); + +MODULE_AUTHOR("ASPEED Technology Inc."); +MODULE_DESCRIPTION("ASPEED serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index b9d0efb6803f..d95c2c879c5d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -53,6 +53,19 @@ if SPI_MASTER comment "SPI Master Controller Drivers" +config SPI_AST + tristate "Aspeed SPI Controller" + depends on ARCH_ASPEED + select SPI_BITBANG + help + This selects a driver for the AST SPI Controller + +config SPI_FMC + tristate "Aspeed FMC SPI Controller" + depends on ARCH_ASPEED + help + This selects a driver for the AST FMC SPI Controller + config SPI_ATMEL tristate "Atmel SPI Controller" depends on (ARCH_AT91 || AVR32) diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index ccf18de34e1e..3d1286e929e5 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o obj-$(CONFIG_SPI_TXX9) += spi_txx9.o obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o +obj-$(CONFIG_SPI_AST) += ast_spi.o +obj-$(CONFIG_SPI_FMC) += fmc_spi.o # ... add above this line ... # SPI protocol drivers (device/link on bus) diff --git a/drivers/spi/ast_spi.c b/drivers/spi/ast_spi.c new file mode 100644 index 000000000000..e8f80e8a5793 --- /dev/null +++ b/drivers/spi/ast_spi.c @@ -0,0 +1,416 @@ +/******************************************************************************** +* File Name : driver/spi/ast-spi.c +* Author : Ryan Chen +* Description : ASPEED SPI host driver +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* History : +* 1. 2012/10/20 Ryan Chen create this file +* 1. 2013/01/05 Ryan Chen modify +* +********************************************************************************/ +//#define DEBUG + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <asm/io.h> +#include <mach/ast_spi.h> +#include <plat/regs-spi.h> + +struct ast_spi_host { + void __iomem *reg; + void __iomem *buff; + struct ast_spi_driver_data *spi_data; + struct spi_master *master; + struct spi_device *spi_dev; + struct device *dev; + spinlock_t lock; +}; + +static inline void +ast_spi_write(struct ast_spi_host *spi, u32 val, u32 reg) +{ +// dev_dbg(i2c_dev->dev, "ast_i2c_write : val: %x , reg : %x \n",val,reg); + writel(val, spi->reg+ reg); +} + +static inline u32 +ast_spi_read(struct ast_spi_host *spi, u32 reg) +{ + return readl(spi->reg + reg); +} + +/* the spi->mode bits understood by this driver: */ +#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) + +static int +ast_spi_setup(struct spi_device *spi) +{ + struct ast_spi_host *host = (struct ast_spi_host *)spi_master_get_devdata(spi->master); + unsigned int bits = spi->bits_per_word; + u32 spi_ctrl; + u32 divisor; + + int err = 0; + return err; + + dev_dbg(host->dev, "ast_spi_setup() , cs %d\n", spi->chip_select); + + host->spi_dev = spi; + + spi_ctrl = ast_spi_read(host, AST_SPI_CTRL); + + if (spi->chip_select > spi->master->num_chipselect) { + dev_dbg(&spi->dev, + "setup: invalid chipselect %u (%u defined)\n", + spi->chip_select, spi->master->num_chipselect); + return -EINVAL; + } + + if (bits == 0) + bits = 8; + + if (bits < 8 || bits > 16) { + dev_dbg(&spi->dev, + "setup: invalid bits_per_word %u (8 to 16)\n", + bits); + return -EINVAL; + } + + if (spi->mode & ~MODEBITS) { + dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", + spi->mode & ~MODEBITS); + return -EINVAL; + } + + /* see notes above re chipselect */ + if((spi->chip_select == 0) && (spi->mode & SPI_CS_HIGH)) { + dev_dbg(&spi->dev, "setup: can't be active-high\n"); + return -EINVAL; + } + + /* + * Pre-new_1 chips start out at half the peripheral + * bus speed. + */ + + if (spi->max_speed_hz) { + /* Set the SPI slaves select and characteristic control register */ + divisor = host->spi_data->get_div(spi->max_speed_hz); + } else { + /* speed zero means "as slow as possible" */ + divisor = 15; + } + + //TODO MASK first + spi_ctrl |= (divisor << 8); + + if (spi->chip_select > (spi->master->num_chipselect - 1)) { + dev_err(&spi->dev, "chipselect %d exceed the number of chipselect master supoort\n", spi->chip_select); + return -EINVAL; + } + +#if 0 + if (SPI_CPHA & spi->mode) + cpha = SPI_CPHA_1; + else + cpha = SPI_CPHA_0; +#endif + +// if (SPI_CPOL & spi->mode) +// spi_ctrl |= SPI_CPOL_1; +// else +// spi_ctrl &= ~SPI_CPOL_1; + + //ISSUE : ast spi ctrl couldn't use mode 3, so fix mode 0 + spi_ctrl &= ~SPI_CPOL_1; + + + if (SPI_LSB_FIRST & spi->mode) + spi_ctrl |= SPI_LSB_FIRST_CTRL; + else + spi_ctrl &= ~SPI_LSB_FIRST_CTRL; + + + /* Configure SPI controller */ + ast_spi_write(host, spi_ctrl, AST_SPI_CTRL); + + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __FUNCTION__, spi->mode, spi->bits_per_word, spi->max_speed_hz); + return err; +} + +static int ast_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct ast_spi_host *host = (struct ast_spi_host *)spi_master_get_devdata(spi->master); + struct spi_transfer *xfer; + const u8 *tx_buf; + u8 *rx_buf; + + + int i=0,j=0; + + dev_dbg(host->dev, "new message %p submitted for %s\n", + msg, spi->dev.bus_id); + + ast_spi_write(host, ast_spi_read(host, AST_SPI_CONFIG) | SPI_CONF_WRITE_EN, AST_SPI_CONFIG); +// writel( (readl(host->spi_data->ctrl_reg) | SPI_CMD_USER_MODE) | SPI_CE_INACTIVE,host->spi_data->ctrl_reg); + ast_spi_write(host, ast_spi_read(host, AST_SPI_CTRL) | SPI_CMD_USER_MODE, AST_SPI_CTRL); + +// writel( ~SPI_CE_INACTIVE & readl(host->spi_data->ctrl_reg),host->spi_data->ctrl_reg); + msg->actual_length = 0; + msg->status = 0; + + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + dev_dbg(host->dev, + "xfer[%d] %p: width %d, len %u, tx %p/%08x, rx %p/%08x\n", + j, xfer, + xfer->bits_per_word, xfer->len, + xfer->tx_buf, xfer->tx_dma, + xfer->rx_buf, xfer->rx_dma); + + //TX ---- + if(xfer->tx_buf) { +#if 0 + if(xfer->bits_per_word == 16) + const u16 *tx_buf; + else + const u8 *tx_buf; +#endif + tx_buf = xfer->tx_buf; + for(i=0;i<xfer->len;i++) { + dev_dbg(host->dev, "[%d] : %x \n",i, tx_buf[i]); + writeb(tx_buf[i], host->buff); +// writeb(tx_buf[i], host->spi_data->buf_reg); + } + } + udelay(1); + //RX---- + if(xfer->rx_buf) { + + rx_buf = xfer->rx_buf; + dev_dbg(host->dev, "rx len [%d] \n",xfer->len ); + for(i=0;i<xfer->len;i++) { +// rx_buf[i] = readb(host->spi_data->buf_reg); + rx_buf[i] = readb(host->buff); + dev_dbg(host->dev, "[%d] : %x \n",i, rx_buf[i]); + } + } + msg->actual_length += xfer->len; + j++; + } + +// writel( SPI_CE_INACTIVE | readl(host->spi_data->ctrl_reg),host->spi_data->ctrl_reg); + ast_spi_write(host, (ast_spi_read(host, AST_SPI_CTRL) & ~SPI_CMD_USER_MODE) | SPI_CMD_FAST_R_MODE, AST_SPI_CTRL); + + ast_spi_write(host, ast_spi_read(host, AST_SPI_CONFIG) & ~SPI_CONF_WRITE_EN, AST_SPI_CONFIG); + + msg->status = 0; + + spin_unlock(&host->lock); + msg->complete(msg->context); + spin_lock(&host->lock); + + return 0; + +} +static void ast_spi_cleanup(struct spi_device *spi) +{ + struct ast_spi_host *host = spi_master_get_devdata(spi->master); + unsigned long flags; + dev_dbg(host->dev, "ast_spi_cleanup() \n"); + + spin_lock_irqsave(&host->lock, flags); +// if (host->stay == spi) { +// host->stay = NULL; +// cs_deactivate(host, spi); +// } + spin_unlock_irqrestore(&host->lock, flags); +} + +static int ast_spi_probe(struct platform_device *pdev) +{ + struct resource *res0, *res1; + struct ast_spi_host *host; + struct spi_master *master; + int err; + + dev_dbg(&pdev->dev, "ast_spi_probe() \n\n\n"); + + master = spi_alloc_master(&pdev->dev, sizeof(struct ast_spi_host)); + if (NULL == master) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + host = spi_master_get_devdata(master); + memset(host, 0, sizeof(struct ast_spi_host)); + + res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res0) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM 0\n"); + err = -ENXIO; + goto err_no_io_res; + } + + host->reg = ioremap(res0->start, resource_size(res0)); + if (!host->reg) { + dev_err(&pdev->dev, "cannot remap register\n"); + err = -EIO; + goto release_mem; + } + + dev_dbg(&pdev->dev, "remap phy %x, virt %x \n",(u32)res0->start, (u32)host->reg); + + res1 = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res1) { + dev_err(&pdev->dev, "cannot get IORESOURCE_IO 0\n"); + return -ENXIO; + } + + host->buff = ioremap(res1->start, resource_size(res1)); + if (!host->buff) { + dev_err(&pdev->dev, "cannot remap buffer \n"); + err = -EIO; + goto release_mem; + } + + dev_dbg(&pdev->dev, "remap io phy %x, virt %x \n",(u32)res1->start, (u32)host->buff); + + host->master = spi_master_get(master); + host->master->bus_num = pdev->id; + host->master->num_chipselect = 1; + host->dev = &pdev->dev; + + /* Setup the state for bitbang driver */ + host->master->setup = ast_spi_setup; + host->master->transfer = ast_spi_transfer; + host->master->cleanup = ast_spi_cleanup; + + /* Find and claim our resources */ + host->spi_data = pdev->dev.driver_data; + + platform_set_drvdata(pdev, host); + + /* Register our spi controller */ + err = spi_register_master(host->master); + if (err) { + dev_err(&pdev->dev, "failed to register SPI master\n"); + goto err_register; + } + + dev_dbg(&pdev->dev, "ast_spi_probe() return \n\n\n"); + + return 0; + +err_register: + spi_master_put(host->master); + iounmap(host->reg); + iounmap(host->buff); + +release_mem: + release_mem_region(res0->start, res0->end - res0->start + 1); + release_mem_region(res1->start, res1->end - res1->start + 1); + +err_no_io_res: + kfree(master); + kfree(host); + +err_nomem: + return err; + +} + +static int +ast_spi_remove(struct platform_device *pdev) +{ + struct resource *res0, *res1; + struct ast_spi_host *host = platform_get_drvdata(pdev); + + dev_dbg(host->dev, "ast_spi_remove()\n"); + + if (!host) + return -1; + + res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res1 = platform_get_resource(pdev, IORESOURCE_IO, 0); + release_mem_region(res0->start, res0->end - res0->start + 1); + release_mem_region(res1->start, res1->end - res1->start + 1); + iounmap(host->reg); + iounmap(host->buff); + + platform_set_drvdata(pdev, NULL); + spi_unregister_master(host->master); + spi_master_put(host->master); + return 0; +} + +#ifdef CONFIG_PM +static int +ast_spi_suspend(struct platform_device *pdev, pm_message_t msg) +{ + return 0; +} + +static int +ast_spi_resume(struct platform_device *pdev) +{ + return 0; +} +#else +#define ast_spi_suspend NULL +#define ast_spi_resume NULL +#endif + +static struct platform_driver ast_spi_driver = { + .probe = ast_spi_probe, + .remove = ast_spi_remove, + .suspend = ast_spi_suspend, + .resume = ast_spi_resume, + .driver = { + .name = "ast-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init +ast_spi_init(void) +{ + return platform_driver_register(&ast_spi_driver); +} + +static void __exit +ast_spi_exit(void) +{ + platform_driver_unregister(&ast_spi_driver); +} + +subsys_initcall(ast_spi_init); +//module_init(ast_spi_init); +module_exit(ast_spi_exit); + +MODULE_DESCRIPTION("AST SPI Driver"); +MODULE_AUTHOR("Ryan Chen"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/fmc_spi.c b/drivers/spi/fmc_spi.c new file mode 100644 index 000000000000..ccc0d1cfebf1 --- /dev/null +++ b/drivers/spi/fmc_spi.c @@ -0,0 +1,436 @@ +/******************************************************************************** +* File Name : driver/spi/ast-spi.c +* Author : Ryan Chen +* Description : ASPEED SPI host driver +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* History : +* 1. 2012/10/20 Ryan Chen create this file +* 1. 2013/01/05 Ryan Chen modify +* +********************************************************************************/ +//#define DEBUG + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <asm/io.h> +#include <mach/ast_spi.h> +#include <plat/regs-spi.h> + +//IN FMC SPI always control offset 0x00 + +struct fmc_spi_host { + void __iomem *reg; + void __iomem *buff; + struct ast_spi_driver_data *spi_data; + struct spi_master *master; + struct spi_device *spi_dev; + struct device *dev; + spinlock_t lock; +}; + +static inline void +fmc_spi_write(struct fmc_spi_host *host, u32 val, u32 reg) +{ +// printk("write : val: %x , offset : %x \n",val, reg); + writel(val, host->reg + reg); +} + +static inline u32 +fmc_spi_read(struct fmc_spi_host *host, u32 reg) +{ + return readl(host->reg + reg); +} + +/* the spi->mode bits understood by this driver: */ +#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) + +static int +fmc_spi_setup(struct spi_device *spi) +{ + struct fmc_spi_host *host = (struct fmc_spi_host *)spi_master_get_devdata(spi->master); + unsigned int bits = spi->bits_per_word; + u32 spi_ctrl; + u32 divisor; + + int err = 0; + dev_dbg(host->dev, "fmc_spi_setup() ======================>>\n"); + + + dev_dbg(host->dev, "fmc_spi_setup() ======================>>\n"); + + host->spi_dev = spi; + + spi_ctrl = fmc_spi_read(host, 0x00); +// printk("trl : %x \n",spi_ctrl); + + if (spi->chip_select > spi->master->num_chipselect) { + dev_dbg(&spi->dev, + "setup: invalid chipselect %u (%u defined)\n", + spi->chip_select, spi->master->num_chipselect); + return -EINVAL; + } + + if (bits == 0) + bits = 8; + + if (bits < 8 || bits > 16) { + dev_dbg(&spi->dev, + "setup: invalid bits_per_word %u (8 to 16)\n", + bits); + return -EINVAL; + } + + if (spi->mode & ~MODEBITS) { + dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", + spi->mode & ~MODEBITS); + return -EINVAL; + } + + /* see notes above re chipselect */ + if((spi->chip_select == 0) && (spi->mode & SPI_CS_HIGH)) { + dev_dbg(&spi->dev, "setup: can't be active-high\n"); + return -EINVAL; + } + + /* + * Pre-new_1 chips start out at half the peripheral + * bus speed. + */ + + if (spi->max_speed_hz) { + /* Set the SPI slaves select and characteristic control register */ + divisor = host->spi_data->get_div(spi->max_speed_hz); + } else { + /* speed zero means "as slow as possible" */ + divisor = 15; + } + + spi_ctrl &= ~SPI_CLK_DIV_MASK; +// printk("set div %x \n",divisor); + //TODO MASK first + spi_ctrl |= SPI_CLK_DIV(divisor); + + if (spi->chip_select > (spi->master->num_chipselect - 1)) { + dev_err(&spi->dev, "chipselect %d exceed the number of chipselect master supoort\n", spi->chip_select); + return -EINVAL; + } + +#if 0 + if (SPI_CPHA & spi->mode) + cpha = SPI_CPHA_1; + else + cpha = SPI_CPHA_0; +#endif + +// if (SPI_CPOL & spi->mode) +// spi_ctrl |= SPI_CPOL_1; +// else +// spi_ctrl &= ~SPI_CPOL_1; + + //ISSUE : ast spi ctrl couldn't use mode 3, so fix mode 0 + spi_ctrl &= ~SPI_CPOL_1; + + + if (SPI_LSB_FIRST & spi->mode) + spi_ctrl |= SPI_LSB_FIRST_CTRL; + else + spi_ctrl &= ~SPI_LSB_FIRST_CTRL; + + + /* Configure SPI controller */ + fmc_spi_write(host, spi_ctrl, 0x00); + + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __FUNCTION__, spi->mode, spi->bits_per_word, spi->max_speed_hz); + return err; +} + +static int fmc_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct fmc_spi_host *host = (struct fmc_spi_host *)spi_master_get_devdata(spi->master); + struct spi_transfer *xfer; + const u8 *tx_buf; + u8 *rx_buf; + unsigned long flags; + + int i=0,j=0; + + dev_dbg(host->dev, "new message %p submitted for %s \n", + msg, dev_name(&spi->dev)); + + spin_lock_irqsave(&host->lock, flags); +// writel( (readl(host->spi_data->ctrl_reg) | SPI_CMD_USER_MODE) | SPI_CE_INACTIVE,host->spi_data->ctrl_reg); + fmc_spi_write(host, fmc_spi_read(host, 0x00) | SPI_CMD_USER_MODE, 0x00); + msg->actual_length = 0; + msg->status = 0; + +// writel( ~SPI_CE_INACTIVE & readl(host->spi_data->ctrl_reg),host->spi_data->ctrl_reg); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + dev_dbg(host->dev, + "xfer[%d] %p: width %d, len %u, tx %p/%08x, rx %p/%08x\n", + j, xfer, + xfer->bits_per_word, xfer->len, + xfer->tx_buf, xfer->tx_dma, + xfer->rx_buf, xfer->rx_dma); + + tx_buf = xfer->tx_buf; + rx_buf = xfer->rx_buf; + + + if(tx_buf != 0) { +#if 0 + printk("tx : "); + if(xfer->len > 10) { + for(i=0;i<10;i++) + printk("%x ",tx_buf[i]); + } else { + for(i=0;i<xfer->len;i++) + printk("%x ",tx_buf[i]); + } + printk("\n"); +#endif + for(i=0;i<xfer->len;i++) { + writeb(tx_buf[i], host->buff); + } + } + //Issue need clarify + udelay(1); + if(rx_buf != 0) { + for(i=0;i<xfer->len;i++) { + rx_buf[i] = readb(host->buff); + } +#if 0 + printk("rx : "); + if(xfer->len > 10) { + for(i=0;i<10;i++) + printk(" %x",rx_buf[i]); + } else { + for(i=0;i<xfer->len;i++) + printk(" %x",rx_buf[i]); + } + printk("\n"); +#endif + } + dev_dbg(host->dev,"old msg->actual_length %d , +len %d \n",msg->actual_length, xfer->len); + msg->actual_length += xfer->len; + dev_dbg(host->dev,"new msg->actual_length %d \n",msg->actual_length); +// j++; + + } + +// writel( SPI_CE_INACTIVE | readl(host->spi_data->ctrl_reg),host->spi_data->ctrl_reg); + fmc_spi_write(host, (fmc_spi_read(host, 0x00) & ~SPI_CMD_USER_MODE), 0x00); + msg->status = 0; + + msg->complete(msg->context); + +// spin_unlock(&host->lock); + spin_unlock_irqrestore(&host->lock, flags); + + + + + return 0; + +} +static void fmc_spi_cleanup(struct spi_device *spi) +{ + struct fmc_spi_host *host = spi_master_get_devdata(spi->master); + unsigned long flags; + dev_dbg(host->dev, "fmc_spi_cleanup() \n"); + + spin_lock_irqsave(&host->lock, flags); +// if (host->stay == spi) { +// host->stay = NULL; +// cs_deactivate(host, spi); +// } + spin_unlock_irqrestore(&host->lock, flags); +} + +static int fmc_spi_probe(struct platform_device *pdev) +{ + struct resource *res0, *res1=0; + struct fmc_spi_host *host; + struct spi_master *master; + int err; + + dev_dbg(&pdev->dev, "fmc_spi_probe() \n\n\n"); + + master = spi_alloc_master(&pdev->dev, sizeof(struct fmc_spi_host)); + if (NULL == master) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + host = spi_master_get_devdata(master); + memset(host, 0, sizeof(struct fmc_spi_host)); + + res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res0) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM 0\n"); + err = -ENXIO; + goto err_no_io_res; + } + + host->reg = ioremap(res0->start, resource_size(res0)); + if (!host->reg) { + dev_err(&pdev->dev, "cannot remap register\n"); + err = -EIO; + goto release_mem; + } + + dev_dbg(&pdev->dev, "remap phy %x, virt %x \n",(u32)res0->start, (u32)host->reg); + + res1 = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res1) { + dev_err(&pdev->dev, "cannot get IORESOURCE_IO 0\n"); + return -ENXIO; + } + + host->buff = ioremap(res1->start, resource_size(res1)); + if (!host->buff) { + dev_err(&pdev->dev, "cannot remap buffer \n"); + err = -EIO; + goto release_mem; + } + + dev_dbg(&pdev->dev, "remap io phy %x, virt %x \n",(u32)res1->start, (u32)host->buff); + + host->master = spi_master_get(master); + host->master->bus_num = pdev->id; + host->master->num_chipselect = 1; + host->dev = &pdev->dev; + + /* Setup the state for bitbang driver */ + host->master->setup = fmc_spi_setup; + host->master->transfer = fmc_spi_transfer; + host->master->cleanup = fmc_spi_cleanup; + + /* Find and claim our resources */ + host->spi_data = pdev->dev.platform_data; + + platform_set_drvdata(pdev, host); + + /* Register our spi controller */ + err = spi_register_master(host->master); + if (err) { + dev_err(&pdev->dev, "failed to register SPI master\n"); + goto err_register; + } + + dev_dbg(&pdev->dev, "fmc_spi_probe() return \n\n\n"); + + return 0; + +err_register: + spi_master_put(host->master); + iounmap(host->reg); + iounmap(host->buff); + +release_mem: + release_mem_region(res0->start, res0->end - res0->start + 1); + release_mem_region(res1->start, res1->end - res1->start + 1); + +err_no_io_res: + kfree(master); + kfree(host); + +err_nomem: + return err; + +} + +static int +fmc_spi_remove(struct platform_device *pdev) +{ + struct resource *res0, *res1; + struct fmc_spi_host *host = platform_get_drvdata(pdev); + + dev_dbg(host->dev, "fmc_spi_remove()\n"); + + if (!host) + return -1; + + res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res1 = platform_get_resource(pdev, IORESOURCE_IO, 0); + release_mem_region(res0->start, res0->end - res0->start + 1); + release_mem_region(res1->start, res1->end - res1->start + 1); + iounmap(host->reg); + iounmap(host->buff); + + platform_set_drvdata(pdev, NULL); + spi_unregister_master(host->master); + spi_master_put(host->master); + return 0; +} + +#ifdef CONFIG_PM +static int +fmc_spi_suspend(struct platform_device *pdev, pm_message_t msg) +{ + return 0; +} + +static int +fmc_spi_resume(struct platform_device *pdev) +{ + return 0; +} +#else +#define fmc_spi_suspend NULL +#define fmc_spi_resume NULL +#endif + +static struct platform_driver fmc_spi_driver = { + .probe = fmc_spi_probe, + .remove = fmc_spi_remove, + .suspend = fmc_spi_suspend, + .resume = fmc_spi_resume, + .driver = { + .name = "fmc-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init +fmc_spi_init(void) +{ + return platform_driver_register(&fmc_spi_driver); +} + +static void __exit +fmc_spi_exit(void) +{ + platform_driver_unregister(&fmc_spi_driver); +} + +subsys_initcall(fmc_spi_init); +//module_init(fmc_spi_init); +module_exit(fmc_spi_exit); + +MODULE_DESCRIPTION("FMC SPI Driver"); +MODULE_AUTHOR("Ryan Chen"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 3734dc9708e1..ec88fc7fb963 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -652,54 +652,61 @@ static u8 *buf; * Performance-sensitive or bulk transfer code should instead use * spi_{async,sync}() calls with dma-safe buffers. */ -int spi_write_then_read(struct spi_device *spi, - const u8 *txbuf, unsigned n_tx, - u8 *rxbuf, unsigned n_rx) -{ - static DEFINE_MUTEX(lock); - - int status; - struct spi_message message; - struct spi_transfer x; - u8 *local_buf; - - /* Use preallocated DMA-safe buffer. We can't avoid copying here, - * (as a pure convenience thing), but we can keep heap costs - * out of the hot path ... - */ - if ((n_tx + n_rx) > SPI_BUFSIZ) - return -EINVAL; - - spi_message_init(&message); - memset(&x, 0, sizeof x); - x.len = n_tx + n_rx; - spi_message_add_tail(&x, &message); - - /* ... unless someone else is using the pre-allocated buffer */ - if (!mutex_trylock(&lock)) { - local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); - if (!local_buf) - return -ENOMEM; - } else - local_buf = buf; - - memcpy(local_buf, txbuf, n_tx); - x.tx_buf = local_buf; - x.rx_buf = local_buf; - - /* do the i/o */ - status = spi_sync(spi, &message); - if (status == 0) - memcpy(rxbuf, x.rx_buf + n_tx, n_rx); - - if (x.tx_buf == buf) - mutex_unlock(&lock); - else - kfree(local_buf); + int spi_write_then_read(struct spi_device *spi, + const u8 *txbuf, unsigned n_tx, + u8 *rxbuf, unsigned n_rx) + { + static DEFINE_MUTEX(lock); + + int status; + struct spi_message message; + struct spi_transfer x[2]; + u8 *local_buf; + + /* Use preallocated DMA-safe buffer. We can't avoid copying here, + * (as a pure convenience thing), but we can keep heap costs + * out of the hot path ... + */ + if ((n_tx + n_rx) > SPI_BUFSIZ) + return -EINVAL; + + spi_message_init(&message); + memset(x, 0, sizeof x); + if (n_tx) { + x[0].len = n_tx; + spi_message_add_tail(&x[0], &message); + } + if (n_rx) { + x[1].len = n_rx; + spi_message_add_tail(&x[1], &message); + } + + /* ... unless someone else is using the pre-allocated buffer */ + if (!mutex_trylock(&lock)) { + local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); + if (!local_buf) + return -ENOMEM; + } else + local_buf = buf; + + memcpy(local_buf, txbuf, n_tx); + x[0].tx_buf = local_buf; + x[1].rx_buf = local_buf + n_tx; + + /* do the i/o */ + status = spi_sync(spi, &message); + if (status == 0) + memcpy(rxbuf, x[1].rx_buf, n_rx); + + if (x[0].tx_buf == buf) + mutex_unlock(&lock); + else + kfree(local_buf); + + return status; + } + EXPORT_SYMBOL_GPL(spi_write_then_read); - return status; -} -EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 289d81adfb9c..2cba323ffb5e 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -56,6 +56,7 @@ config USB_ARCH_HAS_EHCI default y if PPC_83xx default y if SOC_AU1200 default y if ARCH_IXP4XX + default y if ARCH_ASPEED default PCI # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. @@ -102,6 +103,8 @@ source "drivers/usb/wusbcore/Kconfig" source "drivers/usb/host/Kconfig" +source "drivers/usb/astuhci/Kconfig" + source "drivers/usb/musb/Kconfig" source "drivers/usb/class/Kconfig" diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 8b7c419b876e..f6f180ceb1f1 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -8,6 +8,8 @@ obj-$(CONFIG_USB) += core/ obj-$(CONFIG_USB_MON) += mon/ +obj-$(CONFIG_AST_USB_UHCI_HCD) += astuhci/ + obj-$(CONFIG_PCI) += host/ obj-$(CONFIG_USB_EHCI_HCD) += host/ obj-$(CONFIG_USB_ISP116X_HCD) += host/ diff --git a/drivers/usb/astuhci/Kconfig b/drivers/usb/astuhci/Kconfig new file mode 100644 index 000000000000..69c6d79b5e27 --- /dev/null +++ b/drivers/usb/astuhci/Kconfig @@ -0,0 +1,56 @@ +# +# USB Host Controller Drivers +# +comment "AST USB Drivers" + depends on USB + + +config AST_USB_UHCI_HCD + tristate "AST UHCI (USB 1.1) support" + depends on USB + ---help--- + The AST Universal Host Controller Interface (UHCI) is standard for + USB 1.1 host controller hardware. It is an embedded HC based on AMBA bus. + You may want to read <file:Documentation/usb/uhci.txt>. + + To compile this driver as a module, choose M here: the + module will be called uhci-hcd. + +choice + prompt "Config AST USB UHCI Number of Ports" + default AST_USB_UHCI_MULTIPORT_4 + +config AST_USB_UHCI_MULTIPORT_1 + bool "AST UHCI support 1 ports" + depends on AST_USB_UHCI_HCD + +config AST_USB_UHCI_MULTIPORT_2 + bool "AST UHCI support 2 ports" + depends on AST_USB_UHCI_HCD + +config AST_USB_UHCI_MULTIPORT_4 + bool "AST UHCI support 4 ports" + depends on AST_USB_UHCI_HCD + +endchoice + +config USB_EHCI_SPLIT_ISO + bool "Full speed ISO transactions (EXPERIMENTAL)" + depends on USB_EHCI_HCD + default n + ---help--- + This code is new and hasn't been used with many different + EHCI or USB 2.0 transaction translator implementations. + It should work for ISO-OUT transfers, like audio. + +config USB_EHCI_ROOT_HUB_TT + bool "Root Hub Transaction Translators (EXPERIMENTAL)" + depends on USB_EHCI_HCD + ---help--- + Some EHCI chips have vendor-specific extensions to integrate + transaction translators, so that no OHCI or UHCI companion + controller is needed. It's safe to say "y" even if your + controller doesn't support this feature. + + This supports the EHCI implementation from TransDimension Inc. + diff --git a/drivers/usb/astuhci/Makefile b/drivers/usb/astuhci/Makefile new file mode 100644 index 000000000000..6b858f718991 --- /dev/null +++ b/drivers/usb/astuhci/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for USB Host Controller Drivers +# + +ifeq ($(CONFIG_USB_DEBUG),y) + EXTRA_CFLAGS += -DDEBUG +endif + + +obj-$(CONFIG_AST_USB_UHCI_HCD) += uhci-hcd.o diff --git a/drivers/usb/astuhci/uhci-debug.c b/drivers/usb/astuhci/uhci-debug.c new file mode 100644 index 000000000000..c617644755eb --- /dev/null +++ b/drivers/usb/astuhci/uhci-debug.c @@ -0,0 +1,595 @@ +/********************************************************************************
+* File Name : uhci-debug.c
+*
+* port from uhci-debug.c
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by the Free Software Foundation;
+* either version 2 of the License, or (at your option) any later version.
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+* without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+********************************************************************************/
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/smp_lock.h>
+#include <asm/io.h>
+
+#include "uhci-hcd.h"
+
+#define uhci_debug_operations (* (const struct file_operations *) NULL)
+static struct dentry *uhci_debugfs_root;
+
+#ifdef DEBUG
+
+/* Handle REALLY large printks so we don't overflow buffers */
+static void lprintk(char *buf)
+{
+ char *p;
+
+ /* Just write one line at a time */
+ while (buf) {
+ p = strchr(buf, '\n');
+ if (p)
+ *p = 0;
+ printk(KERN_DEBUG "%s\n", buf);
+ buf = p;
+ if (buf)
+ buf++;
+ }
+}
+
+static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
+{
+ char *out = buf;
+ char *spid;
+ u32 status, token;
+
+ /* Try to make sure there's enough memory */
+ if (len < 160)
+ return 0;
+
+ status = td_status(td);
+ out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, le32_to_cpu(td->link));
+ out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
+ ((status >> 27) & 3),
+ (status & TD_CTRL_SPD) ? "SPD " : "",
+ (status & TD_CTRL_LS) ? "LS " : "",
+ (status & TD_CTRL_IOC) ? "IOC " : "",
+ (status & TD_CTRL_ACTIVE) ? "Active " : "",
+ (status & TD_CTRL_STALLED) ? "Stalled " : "",
+ (status & TD_CTRL_DBUFERR) ? "DataBufErr " : "",
+ (status & TD_CTRL_BABBLE) ? "Babble " : "",
+ (status & TD_CTRL_NAK) ? "NAK " : "",
+ (status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
+ (status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
+ status & 0x7ff);
+
+ token = td_token(td);
+ switch (uhci_packetid(token)) {
+ case USB_PID_SETUP:
+ spid = "SETUP";
+ break;
+ case USB_PID_OUT:
+ spid = "OUT";
+ break;
+ case USB_PID_IN:
+ spid = "IN";
+ break;
+ default:
+ spid = "?";
+ break;
+ }
+
+ out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
+ token >> 21,
+ ((token >> 19) & 1),
+ (token >> 15) & 15,
+ (token >> 8) & 127,
+ (token & 0xff),
+ spid);
+ out += sprintf(out, "(buf=%08x)\n", le32_to_cpu(td->buffer));
+
+ return out - buf;
+}
+
+static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
+{
+ char *out = buf;
+ struct uhci_td *td;
+ int i, nactive, ninactive;
+ char *ptype;
+
+ if (len < 200)
+ return 0;
+
+ out += sprintf(out, "urb_priv [%p] ", urbp);
+ out += sprintf(out, "urb [%p] ", urbp->urb);
+ out += sprintf(out, "qh [%p] ", urbp->qh);
+ out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
+ out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
+ (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
+
+ switch (usb_pipetype(urbp->urb->pipe)) {
+ case PIPE_ISOCHRONOUS: ptype = "ISO"; break;
+ case PIPE_INTERRUPT: ptype = "INT"; break;
+ case PIPE_BULK: ptype = "BLK"; break;
+ default:
+ case PIPE_CONTROL: ptype = "CTL"; break;
+ }
+
+ out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : ""));
+ out += sprintf(out, " Actlen=%d", urbp->urb->actual_length);
+
+ if (urbp->urb->unlinked)
+ out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked);
+ out += sprintf(out, "\n");
+
+ i = nactive = ninactive = 0;
+ list_for_each_entry(td, &urbp->td_list, list) {
+ if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC &&
+ (++i <= 10 || debug > 2)) {
+ out += sprintf(out, "%*s%d: ", space + 2, "", i);
+ out += uhci_show_td(td, out, len - (out - buf), 0);
+ } else {
+ if (td_status(td) & TD_CTRL_ACTIVE)
+ ++nactive;
+ else
+ ++ninactive;
+ }
+ }
+ if (nactive + ninactive > 0)
+ out += sprintf(out, "%*s[skipped %d inactive and %d active "
+ "TDs]\n",
+ space, "", ninactive, nactive);
+
+ return out - buf;
+}
+
+static int uhci_show_qh(struct uhci_hcd *uhci,
+ struct uhci_qh *qh, char *buf, int len, int space)
+{
+ char *out = buf;
+ int i, nurbs;
+ __le32 element = qh_element(qh);
+ char *qtype;
+
+ /* Try to make sure there's enough memory */
+ if (len < 80 * 7)
+ return 0;
+
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break;
+ case USB_ENDPOINT_XFER_INT: qtype = "INT"; break;
+ case USB_ENDPOINT_XFER_BULK: qtype = "BLK"; break;
+ case USB_ENDPOINT_XFER_CONTROL: qtype = "CTL"; break;
+ default: qtype = "Skel" ; break;
+ }
+
+ out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n",
+ space, "", qh, qtype,
+ le32_to_cpu(qh->link), le32_to_cpu(element));
+ if (qh->type == USB_ENDPOINT_XFER_ISOC)
+ out += sprintf(out, "%*s period %d phase %d load %d us, "
+ "frame %x desc [%p]\n",
+ space, "", qh->period, qh->phase, qh->load,
+ qh->iso_frame, qh->iso_packet_desc);
+ else if (qh->type == USB_ENDPOINT_XFER_INT)
+ out += sprintf(out, "%*s period %d phase %d load %d us\n",
+ space, "", qh->period, qh->phase, qh->load);
+
+ if (element & UHCI_PTR_QH)
+ out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
+
+ if (element & UHCI_PTR_DEPTH)
+ out += sprintf(out, "%*s Depth traverse\n", space, "");
+
+ if (element & cpu_to_le32(8))
+ out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, "");
+
+ if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
+ out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
+
+ if (list_empty(&qh->queue)) {
+ out += sprintf(out, "%*s queue is empty\n", space, "");
+ if (qh == uhci->skel_async_qh)
+ out += uhci_show_td(uhci->term_td, out,
+ len - (out - buf), 0);
+ } else {
+ struct urb_priv *urbp = list_entry(qh->queue.next,
+ struct urb_priv, node);
+ struct uhci_td *td = list_entry(urbp->td_list.next,
+ struct uhci_td, list);
+
+ if (element != LINK_TO_TD(td))
+ out += sprintf(out, "%*s Element != First TD\n",
+ space, "");
+ i = nurbs = 0;
+ list_for_each_entry(urbp, &qh->queue, node) {
+ if (++i <= 10)
+ out += uhci_show_urbp(urbp, out,
+ len - (out - buf), space + 2);
+ else
+ ++nurbs;
+ }
+ if (nurbs > 0)
+ out += sprintf(out, "%*s Skipped %d URBs\n",
+ space, "", nurbs);
+ }
+
+ if (qh->dummy_td) {
+ out += sprintf(out, "%*s Dummy TD\n", space, "");
+ out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0);
+ }
+
+ return out - buf;
+}
+
+static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
+{
+ char *out = buf;
+
+ /* Try to make sure there's enough memory */
+ if (len < 160)
+ return 0;
+
+ out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n",
+ port,
+ status,
+ (status & USBPORTSC_SUSP) ? " Suspend" : "",
+ (status & USBPORTSC_OCC) ? " OverCurrentChange" : "",
+ (status & USBPORTSC_OC) ? " OverCurrent" : "",
+ (status & USBPORTSC_PR) ? " Reset" : "",
+ (status & USBPORTSC_LSDA) ? " LowSpeed" : "",
+ (status & USBPORTSC_RD) ? " ResumeDetect" : "",
+ (status & USBPORTSC_PEC) ? " EnableChange" : "",
+ (status & USBPORTSC_PE) ? " Enabled" : "",
+ (status & USBPORTSC_CSC) ? " ConnectChange" : "",
+ (status & USBPORTSC_CCS) ? " Connected" : "");
+
+ return out - buf;
+}
+
+static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len)
+{
+ char *out = buf;
+ char *rh_state;
+
+ /* Try to make sure there's enough memory */
+ if (len < 60)
+ return 0;
+
+ switch (uhci->rh_state) {
+ case UHCI_RH_RESET:
+ rh_state = "reset"; break;
+ case UHCI_RH_SUSPENDED:
+ rh_state = "suspended"; break;
+ case UHCI_RH_AUTO_STOPPED:
+ rh_state = "auto-stopped"; break;
+ case UHCI_RH_RESUMING:
+ rh_state = "resuming"; break;
+ case UHCI_RH_SUSPENDING:
+ rh_state = "suspending"; break;
+ case UHCI_RH_RUNNING:
+ rh_state = "running"; break;
+ case UHCI_RH_RUNNING_NODEVS:
+ rh_state = "running, no devs"; break;
+ default:
+ rh_state = "?"; break;
+ }
+ out += sprintf(out, "Root-hub state: %s FSBR: %d\n",
+ rh_state, uhci->fsbr_is_on);
+ return out - buf;
+}
+
+static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
+{
+ char *out = buf;
+ unsigned long io_addr = uhci->io_addr;
+ unsigned short usbcmd, usbstat, usbint, usbfrnum;
+ unsigned int flbaseadd;
+ unsigned char sof;
+ unsigned short portsc1, portsc2;
+
+ /* Try to make sure there's enough memory */
+ if (len < 80 * 9)
+ return 0;
+
+ usbcmd = inw(io_addr + 0);
+ usbstat = inw(io_addr + 2);
+ usbint = inw(io_addr + 4);
+ usbfrnum = inw(io_addr + 6);
+ flbaseadd = inl(io_addr + 8);
+ sof = inb(io_addr + 12);
+ portsc1 = inw(io_addr + 16);
+ portsc2 = inw(io_addr + 18);
+
+ out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
+ usbcmd,
+ (usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ",
+ (usbcmd & USBCMD_CF) ? "CF " : "",
+ (usbcmd & USBCMD_SWDBG) ? "SWDBG " : "",
+ (usbcmd & USBCMD_FGR) ? "FGR " : "",
+ (usbcmd & USBCMD_EGSM) ? "EGSM " : "",
+ (usbcmd & USBCMD_GRESET) ? "GRESET " : "",
+ (usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
+ (usbcmd & USBCMD_RS) ? "RS " : "");
+
+ out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
+ usbstat,
+ (usbstat & USBSTS_HCH) ? "HCHalted " : "",
+ (usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "",
+ (usbstat & USBSTS_HSE) ? "HostSystemError " : "",
+ (usbstat & USBSTS_RD) ? "ResumeDetect " : "",
+ (usbstat & USBSTS_ERROR) ? "USBError " : "",
+ (usbstat & USBSTS_USBINT) ? "USBINT " : "");
+
+ out += sprintf(out, " usbint = %04x\n", usbint);
+ out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
+ 0xfff & (4*(unsigned int)usbfrnum));
+ out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
+ out += sprintf(out, " sof = %02x\n", sof);
+ out += uhci_show_sc(1, portsc1, out, len - (out - buf));
+ out += uhci_show_sc(2, portsc2, out, len - (out - buf));
+ out += sprintf(out, "Most recent frame: %x (%d) "
+ "Last ISO frame: %x (%d)\n",
+ uhci->frame_number, uhci->frame_number & 1023,
+ uhci->last_iso_frame, uhci->last_iso_frame & 1023);
+
+ return out - buf;
+}
+
+static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
+{
+ char *out = buf;
+ int i, j;
+ struct uhci_qh *qh;
+ struct uhci_td *td;
+ struct list_head *tmp, *head;
+ int nframes, nerrs;
+ __le32 link;
+ __le32 fsbr_link;
+
+ static const char * const qh_names[] = {
+ "unlink", "iso", "int128", "int64", "int32", "int16",
+ "int8", "int4", "int2", "async", "term"
+ };
+
+ out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
+ out += sprintf(out, "HC status\n");
+ out += uhci_show_status(uhci, out, len - (out - buf));
+
+ out += sprintf(out, "Periodic load table\n");
+ for (i = 0; i < MAX_PHASE; ++i) {
+ out += sprintf(out, "\t%d", uhci->load[i]);
+ if (i % 8 == 7)
+ *out++ = '\n';
+ }
+ out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n",
+ uhci->total_load,
+ uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
+ uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
+ if (debug <= 1)
+ return out - buf;
+
+ out += sprintf(out, "Frame List\n");
+ nframes = 10;
+ nerrs = 0;
+ for (i = 0; i < UHCI_NUMFRAMES; ++i) {
+ __le32 qh_dma;
+
+ j = 0;
+ td = uhci->frame_cpu[i];
+ link = uhci->frame[i];
+ if (!td)
+ goto check_link;
+
+ if (nframes > 0) {
+ out += sprintf(out, "- Frame %d -> (%08x)\n",
+ i, le32_to_cpu(link));
+ j = 1;
+ }
+
+ head = &td->fl_list;
+ tmp = head;
+ do {
+ td = list_entry(tmp, struct uhci_td, fl_list);
+ tmp = tmp->next;
+ if (link != LINK_TO_TD(td)) {
+ if (nframes > 0)
+ out += sprintf(out, " link does "
+ "not match list entry!\n");
+ else
+ ++nerrs;
+ }
+ if (nframes > 0)
+ out += uhci_show_td(td, out,
+ len - (out - buf), 4);
+ link = td->link;
+ } while (tmp != head);
+
+check_link:
+ qh_dma = uhci_frame_skel_link(uhci, i);
+ if (link != qh_dma) {
+ if (nframes > 0) {
+ if (!j) {
+ out += sprintf(out,
+ "- Frame %d -> (%08x)\n",
+ i, le32_to_cpu(link));
+ j = 1;
+ }
+ out += sprintf(out, " link does not match "
+ "QH (%08x)!\n", le32_to_cpu(qh_dma));
+ } else
+ ++nerrs;
+ }
+ nframes -= j;
+ }
+ if (nerrs > 0)
+ out += sprintf(out, "Skipped %d bad links\n", nerrs);
+
+ out += sprintf(out, "Skeleton QHs\n");
+
+ fsbr_link = 0;
+ for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
+ int cnt = 0;
+
+ qh = uhci->skelqh[i];
+ out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \
+ out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4);
+
+ /* Last QH is the Terminating QH, it's different */
+ if (i == SKEL_TERM) {
+ if (qh_element(qh) != LINK_TO_TD(uhci->term_td))
+ out += sprintf(out, " skel_term_qh element is not set to term_td!\n");
+ link = fsbr_link;
+ if (!link)
+ link = LINK_TO_QH(uhci->skel_term_qh);
+ goto check_qh_link;
+ }
+
+ head = &qh->node;
+ tmp = head->next;
+
+ while (tmp != head) {
+ qh = list_entry(tmp, struct uhci_qh, node);
+ tmp = tmp->next;
+ if (++cnt <= 10)
+ out += uhci_show_qh(uhci, qh, out,
+ len - (out - buf), 4);
+ if (!fsbr_link && qh->skel >= SKEL_FSBR)
+ fsbr_link = LINK_TO_QH(qh);
+ }
+ if ((cnt -= 10) > 0)
+ out += sprintf(out, " Skipped %d QHs\n", cnt);
+
+ link = UHCI_PTR_TERM;
+ if (i <= SKEL_ISO)
+ ;
+ else if (i < SKEL_ASYNC)
+ link = LINK_TO_QH(uhci->skel_async_qh);
+ else if (!uhci->fsbr_is_on)
+ ;
+ else
+ link = LINK_TO_QH(uhci->skel_term_qh);
+check_qh_link:
+ if (qh->link != link)
+ out += sprintf(out, " last QH not linked to next skeleton!\n");
+ }
+
+ return out - buf;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MAX_OUTPUT (64 * 1024)
+
+struct uhci_debug {
+ int size;
+ char *data;
+};
+
+static int uhci_debug_open(struct inode *inode, struct file *file)
+{
+ struct uhci_hcd *uhci = inode->i_private;
+ struct uhci_debug *up;
+ int ret = -ENOMEM;
+ unsigned long flags;
+
+ lock_kernel();
+ up = kmalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ goto out;
+
+ up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
+ if (!up->data) {
+ kfree(up);
+ goto out;
+ }
+
+ up->size = 0;
+ spin_lock_irqsave(&uhci->lock, flags);
+ if (uhci->is_initialized)
+ up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
+ spin_unlock_irqrestore(&uhci->lock, flags);
+
+ file->private_data = up;
+
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
+{
+ struct uhci_debug *up;
+ loff_t new = -1;
+
+ lock_kernel();
+ up = file->private_data;
+
+ switch (whence) {
+ case 0:
+ new = off;
+ break;
+ case 1:
+ new = file->f_pos + off;
+ break;
+ }
+ if (new < 0 || new > up->size) {
+ unlock_kernel();
+ return -EINVAL;
+ }
+ unlock_kernel();
+ return (file->f_pos = new);
+}
+
+static ssize_t uhci_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct uhci_debug *up = file->private_data;
+ return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
+}
+
+static int uhci_debug_release(struct inode *inode, struct file *file)
+{
+ struct uhci_debug *up = file->private_data;
+
+ kfree(up->data);
+ kfree(up);
+
+ return 0;
+}
+
+#undef uhci_debug_operations
+static const struct file_operations uhci_debug_operations = {
+ .owner = THIS_MODULE,
+ .open = uhci_debug_open,
+ .llseek = uhci_debug_lseek,
+ .read = uhci_debug_read,
+ .release = uhci_debug_release,
+};
+
+#endif /* CONFIG_DEBUG_FS */
+
+#else /* DEBUG */
+
+static inline void lprintk(char *buf)
+{}
+
+static inline int uhci_show_qh(struct uhci_hcd *uhci,
+ struct uhci_qh *qh, char *buf, int len, int space)
+{
+ return 0;
+}
+
+static inline int uhci_sprint_schedule(struct uhci_hcd *uhci,
+ char *buf, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/usb/astuhci/uhci-hcd.c b/drivers/usb/astuhci/uhci-hcd.c new file mode 100644 index 000000000000..fcfb6dbb7344 --- /dev/null +++ b/drivers/usb/astuhci/uhci-hcd.c @@ -0,0 +1,1229 @@ +/********************************************************************************
+* File Name : uhci-hcd.c
+*
+* port from uhci-hcd.c
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by the Free Software Foundation;
+* either version 2 of the License, or (at your option) any later version.
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+* without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+********************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/pm.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+#include <linux/bitops.h>
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+
+#include <mach/platform.h>
+#include "../core/hcd.h"
+#include "uhci-hcd.h"
+//#include "pci-quirks.h"
+
+/*
+ * Version Information
+ */
+#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
+Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
+Alan Stern"
+#define DRIVER_DESC "USB Universal Host Controller Interface driver"
+
+/* for flakey hardware, ignore overcurrent indicators */
+static int ignore_oc;
+module_param(ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
+
+/*
+ * debug = 0, no debugging messages
+ * debug = 1, dump failed URBs except for stalls
+ * debug = 2, dump all failed URBs (including stalls)
+ * show all queues in /debug/uhci/[pci_addr]
+ * debug = 3, show all TDs in URBs when dumping
+ */
+#ifdef DEBUG
+#define DEBUG_CONFIGURED 1
+static int debug = 1;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level");
+
+#else
+#define DEBUG_CONFIGURED 0
+#define debug 0
+#endif
+
+
+
+static char *errbuf;
+#define ERRBUF_LEN (32 * 1024)
+
+static struct kmem_cache *uhci_up_cachep; /* urb_priv */
+
+void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
+void wakeup_rh(struct uhci_hcd *uhci);
+void uhci_get_current_frame_number(struct uhci_hcd *uhci);
+
+void uhci_reset_hc(struct uhci_hcd *uhci);
+int uhci_check_and_reset_hc(struct uhci_hcd *uhci);
+
+/*
+ * Calculate the link pointer DMA value for the first Skeleton QH in a frame.
+ */
+static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
+{
+ int skelnum;
+
+ /*
+ * The interrupt queues will be interleaved as evenly as possible.
+ * There's not much to be done about period-1 interrupts; they have
+ * to occur in every frame. But we can schedule period-2 interrupts
+ * in odd-numbered frames, period-4 interrupts in frames congruent
+ * to 2 (mod 4), and so on. This way each frame only has two
+ * interrupt QHs, which will help spread out bandwidth utilization.
+ *
+ * ffs (Find First bit Set) does exactly what we need:
+ * 1,3,5,... => ffs = 0 => use period-2 QH = skelqh[8],
+ * 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc.
+ * ffs >= 7 => not on any high-period queue, so use
+ * period-1 QH = skelqh[9].
+ * Add in UHCI_NUMFRAMES to insure at least one bit is set.
+ */
+ skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
+ if (skelnum <= 1)
+ skelnum = 9;
+ return LINK_TO_QH(uhci->skelqh[skelnum]);
+}
+
+#include "uhci-debug.c"
+#include "uhci-q.c"
+#include "uhci-hub.c"
+
+/*
+ * Finish up a host controller reset and update the recorded state.
+ */
+void finish_reset(struct uhci_hcd *uhci)
+{
+ int port;
+
+ /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect
+ * bits in the port status and control registers.
+ * We have to clear them by hand.
+ */
+ for (port = 0; port < uhci->rh_numports; ++port)
+ writel(0, uhci->regbase + USBPORTSC1 + (port * 1));
+
+ uhci->port_c_suspend = uhci->resuming_ports = 0;
+ uhci->rh_state = UHCI_RH_RESET;
+ uhci->is_stopped = UHCI_IS_STOPPED;
+ uhci_to_hcd(uhci)->state = HC_STATE_HALT;
+ uhci_to_hcd(uhci)->poll_rh = 0;
+
+ uhci->dead = 0; /* Full reset resurrects the controller */
+}
+
+/*
+ * Last rites for a defunct/nonfunctional controller
+ * or one we don't want to use any more.
+ */
+void uhci_hc_died(struct uhci_hcd *uhci)
+{
+ uhci_get_current_frame_number(uhci);
+//yriver
+// uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr);
+ uhci_reset_hc(uhci);
+ finish_reset(uhci);
+ uhci->dead = 1;
+
+ /* The current frame may already be partway finished */
+ ++uhci->frame_number;
+}
+
+/*
+ * Initialize a controller that was newly discovered or has lost power
+ * or otherwise been reset while it was suspended. In none of these cases
+ * can we be sure of its previous state.
+ */
+void check_and_reset_hc(struct uhci_hcd *uhci)
+{
+//yriver
+// if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr))
+ if (uhci_check_and_reset_hc(uhci))
+ finish_reset(uhci);
+}
+
+/*
+ * Store the basic register settings needed by the controller.
+ */
+void configure_hc(struct uhci_hcd *uhci)
+{
+ /* Set the frame length to the default: 1 ms exactly */
+// outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF);
+ writel(USBSOF_DEFAULT, uhci->regbase + USBSOF);
+
+ /* Store the frame list base address */
+// outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD);
+ writel(uhci->frame_dma_handle, uhci->regbase + USBFLBASEADD);
+
+ /* Set the current frame number */
+// outw(uhci->frame_number & UHCI_MAX_SOF_NUMBER,
+// uhci->io_addr + USBFRNUM);
+ writel(uhci->frame_number & UHCI_MAX_SOF_NUMBER,
+ uhci->regbase + USBFRNUM);
+
+ /* Mark controller as not halted before we enable interrupts */
+ uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED;
+ mb();
+ /* Enable PIRQ */
+//yriver
+// pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
+// USBLEGSUP_DEFAULT);
+}
+
+
+int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
+{
+/*
+
+ int port;
+
+
+ if (ignore_oc)
+ return 1;
+
+ switch (to_pci_dev(uhci_dev(uhci))->vendor) {
+ default:
+ break;
+
+ case PCI_VENDOR_ID_GENESYS:
+ return 1;
+
+ case PCI_VENDOR_ID_INTEL:
+ for (port = 0; port < uhci->rh_numports; ++port) {
+ if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+ USBPORTSC_OC)
+ return 1;
+ }
+ break;
+ }
+*/
+ return 0;
+}
+
+int global_suspend_mode_is_broken(struct uhci_hcd *uhci)
+{
+ int port;
+ const char *sys_info;
+ static char bad_Asus_board[] = "A7V8X";
+
+ /* One of Asus's motherboards has a bug which causes it to
+ * wake up immediately from suspend-to-RAM if any of the ports
+ * are connected. In such cases we will not set EGSM.
+ */
+ sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+ if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
+ for (port = 0; port < uhci->rh_numports; ++port) {
+//yriver
+// if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+ if (readl(uhci->regbase + USBPORTSC1 + port * 1) &
+ USBPORTSC_CCS)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state)
+__releases(uhci->lock)
+__acquires(uhci->lock)
+{
+ int auto_stop;
+ int int_enable, egsm_enable, wakeup_enable;
+ struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub;
+
+ auto_stop = (new_state == UHCI_RH_AUTO_STOPPED);
+ dev_dbg(&rhdev->dev, "%s%s\n", __func__,
+ (auto_stop ? " (auto-stop)" : ""));
+
+ /* Start off by assuming Resume-Detect interrupts and EGSM work
+ * and that remote wakeups should be enabled.
+ */
+ egsm_enable = USBCMD_EGSM;
+ uhci->RD_enable = 1;
+ int_enable = USBINTR_RESUME;
+ wakeup_enable = 1;
+/*
+ if (auto_stop) {
+ if (!device_may_wakeup(&rhdev->dev))
+ int_enable = 0;
+
+ } else {
+#ifdef CONFIG_PM
+ if (!rhdev->do_remote_wakeup)
+ wakeup_enable = 0;
+#endif
+ }
+
+ if (!wakeup_enable || global_suspend_mode_is_broken(uhci))
+ egsm_enable = 0;
+*/
+ /* If we're ignoring wakeup events then there's no reason to
+ * enable Resume-Detect interrupts. We also shouldn't enable
+ * them if they are broken or disallowed.
+ *
+ * This logic may lead us to enabling RD but not EGSM. The UHCI
+ * spec foolishly says that RD works only when EGSM is on, but
+ * there's no harm in enabling it anyway -- perhaps some chips
+ * will implement it!
+ */
+//yriver
+// if (!wakeup_enable || resume_detect_interrupts_are_broken(uhci) ||
+// !int_enable)
+// uhci->RD_enable = int_enable = 0;
+
+// outw(int_enable, uhci->io_addr + USBINTR);
+// outw(egsm_enable | USBCMD_CF, uhci->io_addr + USBCMD);
+ writel(int_enable, uhci->regbase + USBINTR);
+// writel(egsm_enable | USBCMD_CF, uhci->regbase + USBCMD);
+ writel(USBCMD_EGSM | USBCMD_CF, uhci->regbase + USBCMD);
+ mb();
+ udelay(5);
+
+ /* If we're auto-stopping then no devices have been attached
+ * for a while, so there shouldn't be any active URBs and the
+ * controller should stop after a few microseconds. Otherwise
+ * we will give the controller one frame to stop.
+ */
+//yriver
+// if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) {
+ if (!auto_stop && !(readl(uhci->regbase + USBSTS) & USBSTS_HCH)) {
+ uhci->rh_state = UHCI_RH_SUSPENDING;
+ spin_unlock_irq(&uhci->lock);
+ msleep(1);
+ spin_lock_irq(&uhci->lock);
+ if (uhci->dead)
+ return;
+ }
+//yriver
+// if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH))
+// if (!(readl(uhci->regbase + USBSTS) & USBSTS_HCH))
+// dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n");
+
+ uhci_get_current_frame_number(uhci);
+
+ uhci->rh_state = new_state;
+ uhci->is_stopped = UHCI_IS_STOPPED;
+
+ /* If interrupts don't work and remote wakeup is enabled then
+ * the suspended root hub needs to be polled.
+ */
+//yriver
+// uhci_to_hcd(uhci)->poll_rh = (!int_enable && wakeup_enable);
+ uhci_to_hcd(uhci)->poll_rh = !int_enable;
+
+ uhci_scan_schedule(uhci);
+ uhci_fsbr_off(uhci);
+}
+
+void start_rh(struct uhci_hcd *uhci)
+{
+ uhci_to_hcd(uhci)->state = HC_STATE_RUNNING;
+ uhci->is_stopped = 0;
+ /* Mark it configured and running with a 64-byte max packet.
+ * All interrupts are enabled, even though RESUME won't do anything.
+ */
+//yriver
+// outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD);
+// outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
+// uhci->io_addr + USBINTR);
+
+ writel(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->regbase + USBCMD);
+ writel(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
+ uhci->regbase + USBINTR);
+ mb();
+ uhci->rh_state = UHCI_RH_RUNNING;
+ uhci_to_hcd(uhci)->poll_rh = 1;
+}
+
+void wakeup_rh(struct uhci_hcd *uhci)
+__releases(uhci->lock)
+__acquires(uhci->lock)
+{
+ dev_dbg(&uhci_to_hcd(uhci)->self.root_hub->dev,
+ "%s%s\n", __func__,
+ uhci->rh_state == UHCI_RH_AUTO_STOPPED ?
+ " (auto-start)" : "");
+
+ /* If we are auto-stopped then no devices are attached so there's
+ * no need for wakeup signals. Otherwise we send Global Resume
+ * for 20 ms.
+ */
+ if (uhci->rh_state == UHCI_RH_SUSPENDED) {
+ uhci->rh_state = UHCI_RH_RESUMING;
+ }
+
+ start_rh(uhci);
+
+ /* Restart root hub polling */
+ mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
+}
+
+void wakeup_hc(struct uhci_hcd *uhci) {
+// printk("wake_uhci\n");
+ wakeup_rh(uhci);
+
+
+}
+
+irqreturn_t uhci_irq(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ unsigned short status;
+
+ /*
+ * Read the interrupt status, and write it back to clear the
+ * interrupt cause. Contrary to the UHCI specification, the
+ * "HC Halted" status bit is persistent: it is RO, not R/WC.
+ */
+//yriver
+// status = inw(uhci->io_addr + USBSTS);
+ status = readl(uhci->regbase + USBSTS);
+ if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
+ return IRQ_NONE;
+// outw(status, uhci->io_addr + USBSTS); /* Clear it */
+ writel(status, uhci->regbase + USBSTS); /* Clear it */
+
+ if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
+ if (status & USBSTS_HSE)
+ dev_err(uhci_dev(uhci), "host system error, "
+ "PCI problems?\n");
+ if (status & USBSTS_HCPE)
+ dev_err(uhci_dev(uhci), "host controller process "
+ "error, something bad happened!\n");
+ if (status & USBSTS_HCH) {
+ spin_lock(&uhci->lock);
+ if (uhci->rh_state >= UHCI_RH_RUNNING) {
+ dev_err(uhci_dev(uhci),
+ "host controller halted, "
+ "very bad!\n");
+ if (debug > 1 && errbuf) {
+ /* Print the schedule for debugging */
+ uhci_sprint_schedule(uhci,
+ errbuf, ERRBUF_LEN);
+ lprintk(errbuf);
+ }
+ uhci_hc_died(uhci);
+
+ /* Force a callback in case there are
+ * pending unlinks */
+//yriver
+// mod_timer(&hcd->rh_timer, jiffies);
+ mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
+ }
+ spin_unlock(&uhci->lock);
+ }
+ }
+
+ if (status & USBSTS_RD)
+ usb_hcd_poll_rh_status(hcd);
+ else {
+ spin_lock(&uhci->lock);
+ uhci_scan_schedule(uhci);
+ spin_unlock(&uhci->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Store the current frame number in uhci->frame_number if the controller
+ * is runnning. Expand from 11 bits (of which we use only 10) to a
+ * full-sized integer.
+ *
+ * Like many other parts of the driver, this code relies on being polled
+ * more than once per second as long as the controller is running.
+ */
+void uhci_get_current_frame_number(struct uhci_hcd *uhci)
+{
+ if (!uhci->is_stopped) {
+ unsigned delta;
+//yriver
+// delta = (inw(uhci->io_addr + USBFRNUM) - uhci->frame_number) &
+// (UHCI_NUMFRAMES - 1);
+ delta = (readl(uhci->regbase + USBFRNUM) - uhci->frame_number) &
+ (UHCI_NUMFRAMES - 1);
+ uhci->frame_number += delta;
+ }
+}
+
+/*
+ * De-allocate all resources
+ */
+void release_uhci(struct uhci_hcd *uhci)
+{
+ int i;
+
+ if (DEBUG_CONFIGURED) {
+ spin_lock_irq(&uhci->lock);
+ uhci->is_initialized = 0;
+ spin_unlock_irq(&uhci->lock);
+
+ debugfs_remove(uhci->dentry);
+ }
+
+ for (i = 0; i < UHCI_NUM_SKELQH; i++)
+ uhci_free_qh(uhci, uhci->skelqh[i]);
+
+ uhci_free_td(uhci, uhci->term_td);
+
+ dma_pool_destroy(uhci->qh_pool);
+
+ dma_pool_destroy(uhci->td_pool);
+
+ kfree(uhci->frame_cpu);
+
+ dma_free_coherent(uhci_dev(uhci),
+ UHCI_NUMFRAMES * sizeof(*uhci->frame),
+ uhci->frame, uhci->frame_dma_handle);
+}
+
+void uhci_reset_hc(struct uhci_hcd *uhci)
+{
+ /* Reset the HC - this will force us to get a
+ * new notification of any already connected
+ * ports due to the virtual disconnect that it
+ * implies.
+ */
+ writel(0,(uhci->regbase + USBINTR));
+ writel(USBCMD_GRESET,(uhci->regbase + USBCMD));
+ mdelay(50);
+
+ writel(0,(uhci->regbase + USBCMD));
+ mdelay(10);
+}
+
+int uhci_check_and_reset_hc(struct uhci_hcd *uhci)
+{
+ unsigned int cmd, intr;
+
+ cmd = readl((uhci->regbase + USBCMD));
+ if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) ||
+ !(cmd & USBCMD_EGSM)) {
+ printk("%s: cmd = 0x%04x\n", __FUNCTION__, cmd);
+ goto reset_needed;
+ }
+
+ //intr = inw(base + UHCI_USBINTR);
+ intr = readl((uhci->regbase + USBINTR));
+ if (intr & (~USBINTR_RESUME)) {
+ printk("%s: intr = 0x%04x\n", __FUNCTION__, intr);
+ goto reset_needed;
+ }
+ return 0;
+
+reset_needed:
+// printk("Performing full reset\n");
+ uhci_reset_hc(uhci);
+ return 1;
+}
+
+int uhci_reset(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+
+#ifdef CONFIG_GUC_USB_UHCI_MULTIPORT_2
+ uhci->rh_numports = 2;
+#elif defined (CONFIG_GUC_USB_UHCI_MULTIPORT_4)
+ uhci->rh_numports = 4;
+#else
+ uhci->rh_numports = 1;
+#endif
+
+ /* Kick BIOS off this hardware and reset if the controller
+ * isn't already safely quiescent.
+ */
+ check_and_reset_hc(uhci);
+ return 0;
+}
+
+int uhci_init(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ //unsigned io_size = (unsigned) hcd->rsrc_len;
+ int port;
+
+ uhci->io_addr = (unsigned long) hcd->rsrc_start;
+
+ /* The UHCI spec says devices must have 2 ports, and goes on to say
+ * they may have more but gives no way to determine how many there
+ * are. However according to the UHCI spec, Bit 7 of the port
+ * status and control register is always set to 1. So we try to
+ * use this to our advantage. Another common failure mode when
+ * a nonexistent register is addressed is to return all ones, so
+ * we test for that also.
+ */
+ for (port = 0; port < UHCI_RH_MAXCHILD; port++) {
+ unsigned int portstatus;
+//yriver
+// portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2));
+ portstatus = readl(uhci->regbase + USBPORTSC1 + (port * 1));
+ if (!(portstatus & 0x0080) || portstatus == 0xffff)
+ break;
+ }
+ if (debug) {
+ dev_info(uhci_dev(uhci), "detected %d ports\n", port);
+ }
+
+ /* Anything greater than 7 is weird so we'll ignore it. */
+ if (port > UHCI_RH_MAXCHILD) {
+ dev_info(uhci_dev(uhci), "port count misdetected? "
+ "forcing to 2 ports\n");
+ port = 2;
+ }
+ uhci->rh_numports = port;
+
+ /* Kick BIOS off this hardware and reset if the controller
+ * isn't already safely quiescent.
+ */
+ check_and_reset_hc(uhci);
+ return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more. This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel. Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+void uhci_shutdown(struct pci_dev *pdev)
+{
+ struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev);
+
+ uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+/*
+ * Allocate a frame list, and then setup the skeleton
+ *
+ * The hardware doesn't really know any difference
+ * in the queues, but the order does matter for the
+ * protocols higher up. The order in which the queues
+ * are encountered by the hardware is:
+ *
+ * - All isochronous events are handled before any
+ * of the queues. We don't do that here, because
+ * we'll create the actual TD entries on demand.
+ * - The first queue is the high-period interrupt queue.
+ * - The second queue is the period-1 interrupt and async
+ * (low-speed control, full-speed control, then bulk) queue.
+ * - The third queue is the terminating bandwidth reclamation queue,
+ * which contains no members, loops back to itself, and is present
+ * only when FSBR is on and there are no full-speed control or bulk QHs.
+ */
+int uhci_start(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ int retval = -EBUSY;
+ int i;
+ struct dentry *dentry;
+
+ hcd->uses_new_polling = 1;
+
+ spin_lock_init(&uhci->lock);
+ setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
+ (unsigned long) uhci);
+ INIT_LIST_HEAD(&uhci->idle_qh_list);
+ init_waitqueue_head(&uhci->waitqh);
+
+ if (DEBUG_CONFIGURED) {
+ dentry = debugfs_create_file(hcd->self.bus_name,
+ S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root,
+ uhci, &uhci_debug_operations);
+ if (!dentry) {
+ dev_err(uhci_dev(uhci), "couldn't create uhci "
+ "debugfs entry\n");
+ retval = -ENOMEM;
+ goto err_create_debug_entry;
+ }
+ uhci->dentry = dentry;
+ }
+
+ uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
+ UHCI_NUMFRAMES * sizeof(*uhci->frame),
+ &uhci->frame_dma_handle, 0);
+ if (!uhci->frame) {
+ dev_err(uhci_dev(uhci), "unable to allocate "
+ "consistent memory for frame list\n");
+ goto err_alloc_frame;
+ }
+
+ memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame));
+
+ uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
+ GFP_KERNEL);
+ if (!uhci->frame_cpu) {
+ dev_err(uhci_dev(uhci), "unable to allocate "
+ "memory for frame pointers\n");
+ goto err_alloc_frame_cpu;
+ }
+
+ uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
+ sizeof(struct uhci_td), 16, 0);
+ if (!uhci->td_pool) {
+ dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
+ goto err_create_td_pool;
+ }
+
+ uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
+ sizeof(struct uhci_qh), 16, 0);
+ if (!uhci->qh_pool) {
+ dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
+ goto err_create_qh_pool;
+ }
+
+ uhci->term_td = uhci_alloc_td(uhci);
+ if (!uhci->term_td) {
+ dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
+ goto err_alloc_term_td;
+ }
+
+ for (i = 0; i < UHCI_NUM_SKELQH; i++) {
+ uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL);
+ if (!uhci->skelqh[i]) {
+ dev_err(uhci_dev(uhci), "unable to allocate QH\n");
+ goto err_alloc_skelqh;
+ }
+ }
+
+ /*
+ * 8 Interrupt queues; link all higher int queues to int1 = async
+ */
+ for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i)
+ uhci->skelqh[i]->link = LINK_TO_QH(uhci->skel_async_qh);
+ uhci->skel_async_qh->link = UHCI_PTR_TERM;
+ uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh);
+
+ /* This dummy TD is to work around a bug in Intel PIIX controllers */
+ uhci_fill_td(uhci->term_td, 0, uhci_explen(0) |
+ (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
+ uhci->term_td->link = UHCI_PTR_TERM;
+ uhci->skel_async_qh->element = uhci->skel_term_qh->element =
+ LINK_TO_TD(uhci->term_td);
+
+ /*
+ * Fill the frame list: make all entries point to the proper
+ * interrupt queue.
+ */
+ for (i = 0; i < UHCI_NUMFRAMES; i++) {
+
+ /* Only place we don't use the frame list routines */
+ uhci->frame[i] = uhci_frame_skel_link(uhci, i);
+ }
+
+ /*
+ * Some architectures require a full mb() to enforce completion of
+ * the memory writes above before the I/O transfers in configure_hc().
+ */
+ mb();
+
+ configure_hc(uhci);
+
+ uhci->is_initialized = 1;
+ start_rh(uhci);
+
+ return 0;
+
+/*
+ * error exits:
+ */
+err_alloc_skelqh:
+ for (i = 0; i < UHCI_NUM_SKELQH; i++) {
+ if (uhci->skelqh[i])
+ uhci_free_qh(uhci, uhci->skelqh[i]);
+ }
+
+ uhci_free_td(uhci, uhci->term_td);
+
+err_alloc_term_td:
+ dma_pool_destroy(uhci->qh_pool);
+
+err_create_qh_pool:
+ dma_pool_destroy(uhci->td_pool);
+
+err_create_td_pool:
+ kfree(uhci->frame_cpu);
+
+err_alloc_frame_cpu:
+ dma_free_coherent(uhci_dev(uhci),
+ UHCI_NUMFRAMES * sizeof(*uhci->frame),
+ uhci->frame, uhci->frame_dma_handle);
+
+err_alloc_frame:
+ debugfs_remove(uhci->dentry);
+
+err_create_debug_entry:
+ return retval;
+}
+
+void uhci_stop(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ spin_lock_irq(&uhci->lock);
+ if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) && !uhci->dead)
+ uhci_hc_died(uhci);
+ uhci_scan_schedule(uhci);
+ spin_unlock_irq(&uhci->lock);
+
+ del_timer_sync(&uhci->fsbr_timer);
+ release_uhci(uhci);
+}
+
+void stop_hc(struct uhci_hcd *uhci)
+{
+ // Disable all interrupts
+ writel(0, (uhci->regbase + USBINTR));
+ writel(USBCMD_MAXP,(uhci->regbase + USBCMD)); // disable hc
+
+}
+
+#ifdef CONFIG_PM
+int uhci_rh_suspend(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ int rc = 0;
+
+ spin_lock_irq(&uhci->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ rc = -ESHUTDOWN;
+ else if (!uhci->dead)
+ suspend_rh(uhci, UHCI_RH_SUSPENDED);
+ spin_unlock_irq(&uhci->lock);
+ return rc;
+}
+
+int uhci_rh_resume(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ int rc = 0;
+
+ spin_lock_irq(&uhci->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ rc = -ESHUTDOWN;
+ else if (!uhci->dead)
+ wakeup_rh(uhci);
+ spin_unlock_irq(&uhci->lock);
+ return rc;
+}
+
+int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ int rc = 0;
+
+ dev_dbg(uhci_dev(uhci), "%s\n", __func__);
+
+ spin_lock_irq(&uhci->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ goto done_okay; /* Already suspended or dead */
+
+ if (uhci->rh_state > UHCI_RH_SUSPENDED) {
+ dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n");
+ rc = -EBUSY;
+ goto done;
+ };
+
+ /* All PCI host controllers are required to disable IRQ generation
+ * at the source, so we must turn off PIRQ.
+ */
+ pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
+ mb();
+ hcd->poll_rh = 0;
+
+ /* FIXME: Enable non-PME# remote wakeup? */
+
+ /* make sure snapshot being resumed re-enumerates everything */
+ if (message.event == PM_EVENT_PRETHAW)
+ uhci_hc_died(uhci);
+
+done_okay:
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+done:
+ spin_unlock_irq(&uhci->lock);
+ return rc;
+}
+
+int uhci_pci_resume(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ dev_dbg(uhci_dev(uhci), "%s\n", __func__);
+
+ /* Since we aren't in D3 any more, it's safe to set this flag
+ * even if the controller was dead.
+ */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ mb();
+
+ spin_lock_irq(&uhci->lock);
+
+ /* FIXME: Disable non-PME# remote wakeup? */
+
+ /* The firmware or a boot kernel may have changed the controller
+ * settings during a system wakeup. Check it and reconfigure
+ * to avoid problems.
+ */
+ check_and_reset_hc(uhci);
+
+ /* If the controller was dead before, it's back alive now */
+ configure_hc(uhci);
+
+ if (uhci->rh_state == UHCI_RH_RESET) {
+
+ /* The controller had to be reset */
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ suspend_rh(uhci, UHCI_RH_SUSPENDED);
+ }
+
+ spin_unlock_irq(&uhci->lock);
+
+ /* If interrupts don't work and remote wakeup is enabled then
+ * the suspended root hub needs to be polled.
+ */
+ if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup) {
+ hcd->poll_rh = 1;
+ usb_hcd_poll_rh_status(hcd);
+ }
+ return 0;
+}
+#endif
+
+/* Wait until a particular device/endpoint's QH is idle, and free it */
+void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *hep)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ struct uhci_qh *qh;
+
+ spin_lock_irq(&uhci->lock);
+ qh = (struct uhci_qh *) hep->hcpriv;
+ if (qh == NULL)
+ goto done;
+
+ while (qh->state != UHCI_QH_STATE_IDLE) {
+ ++uhci->num_waiting;
+ spin_unlock_irq(&uhci->lock);
+ wait_event_interruptible(uhci->waitqh,
+ qh->state == UHCI_QH_STATE_IDLE);
+ spin_lock_irq(&uhci->lock);
+ --uhci->num_waiting;
+ }
+
+ uhci_free_qh(uhci, qh);
+done:
+ spin_unlock_irq(&uhci->lock);
+}
+
+int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ unsigned frame_number;
+ unsigned delta;
+
+ /* Minimize latency by avoiding the spinlock */
+ frame_number = uhci->frame_number;
+ barrier();
+//yriver
+// delta = (inw(uhci->io_addr + USBFRNUM) - frame_number) &
+// (UHCI_NUMFRAMES - 1);
+ delta = (readl(uhci->regbase + USBFRNUM) - frame_number) &
+ (UHCI_NUMFRAMES - 1);
+ return frame_number + delta;
+}
+
+//static const char hcd_name[] = "uhci_hcd";
+
+static const struct hc_driver _uhci_driver = {
+//yriver
+// .description = hcd_name,
+ .description = "ast uhci",
+ .product_desc = "UHCI Host Controller",
+ .hcd_priv_size = sizeof(struct uhci_hcd),
+
+ /* Generic hardware linkage */
+ .irq = uhci_irq,
+ .flags = HCD_USB11,
+
+ /* Basic lifecycle operations */
+// .reset = uhci_init,
+ .reset = uhci_reset,
+ .start = uhci_start,
+#ifdef CONFIG_PM
+ .pci_suspend = uhci_pci_suspend,
+ .pci_resume = uhci_pci_resume,
+ .bus_suspend = uhci_rh_suspend,
+ .bus_resume = uhci_rh_resume,
+#endif
+ .stop = uhci_stop,
+
+ .urb_enqueue = uhci_urb_enqueue,
+ .urb_dequeue = uhci_urb_dequeue,
+
+ .endpoint_disable = uhci_hcd_endpoint_disable,
+ .get_frame_number = uhci_hcd_get_frame_number,
+
+ .hub_status_data = uhci_hub_status_data,
+ .hub_control = uhci_hub_control,
+};
+
+int __init uhci_hcd_init(void)
+{
+ int retval = -ENOMEM;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n",
+ ignore_oc ? ", overcurrent ignored" : "");
+ set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+
+ if (DEBUG_CONFIGURED) {
+ errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
+ if (!errbuf)
+ goto errbuf_failed;
+ uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
+ if (!uhci_debugfs_root)
+ goto debug_failed;
+ }
+
+ uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
+ sizeof(struct urb_priv), 0, 0, NULL);
+ if (!uhci_up_cachep)
+ goto up_failed;
+/*
+ retval = pci_register_driver(&uhci_pci_driver);
+ if (retval)
+ goto init_failed;
+*/
+ return 0;
+
+//init_failed:
+// kmem_cache_destroy(uhci_up_cachep);
+
+up_failed:
+ debugfs_remove(uhci_debugfs_root);
+
+debug_failed:
+ kfree(errbuf);
+
+errbuf_failed:
+
+ clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+ return retval;
+}
+
+void uhci_hcd_cleanup(void)
+{
+// pci_unregister_driver(&uhci_pci_driver);
+ kmem_cache_destroy(uhci_up_cachep);
+ debugfs_remove(uhci_debugfs_root);
+ kfree(errbuf);
+ clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+}
+
+void __exit cleanup_UHCI(void)
+{
+ uhci_hcd_cleanup();
+}
+
+/*-------------------------------------------------------------------------*/
+static int usb_hcd_guc_probe (const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ unsigned int *base, temp;
+ int retval;
+ struct resource *res;
+ struct usb_hcd *hcd = 0;
+ struct uhci_hcd *uhci;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ retval = -ENODEV;
+ goto err1;
+ }
+
+ hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev));
+// printk("alloc_uhci(2): uhci_dev:%x, _uhci_hcd.state:%x\n", &uhci_dev, _uhci_hcd->state);
+ if (!hcd) {
+ retval = -ENOMEM;
+ return retval;
+ }
+
+
+ uhci = hcd_to_uhci(hcd);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ retval = -ENODEV;
+ goto err1;
+ }
+
+ if (!request_mem_region(res->start, res->end - res->start + 1,
+ res->name)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto err1;
+ }
+
+ base = ioremap_nocache(res->start, res->end - res->start + 1);
+ if (base == NULL) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ uhci->regbase = (unsigned int *)base;
+ uhci->io_addr = (unsigned int)base;
+
+// printk("UHCI Base address is %x, phy %08x\n",(unsigned int)base, UHC_BASE_ADDR);
+
+ retval = usb_add_hcd (hcd, irq, IRQF_SHARED);
+
+ // printk("alloc_uhci(3): uhci_dev:%x, _uhci_hcd.state:%x\n", &uhci_dev, _uhci_hcd->state);
+
+ if (retval == 0)
+ return retval;
+
+ //BruceToDo. Stop USB 1.1 Host's clock.
+ iounmap((void*)uhci->io_addr);
+err1:
+ usb_put_hcd(hcd);
+
+ printk("add UHCI to USB host controller list failed!\n");
+ return retval;
+}
+
+
+static inline void
+usb_hcd_guc_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ usb_remove_hcd(hcd);
+ //BruceToDo. Stop USB 1.1 Host's clock.
+ iounmap((void*)uhci->io_addr);
+ usb_put_hcd(hcd);
+}
+
+/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_PM
+static int uhci_guc_suspend(struct platform_device *dev, pm_message_t message)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(platform_get_drvdata(dev));
+#if 0
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+ omap_ohci_clock_power(0);
+#endif
+ uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED;
+ return 0;
+}
+static int uhci_guc_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+#if 0
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+ omap_ohci_clock_power(1);
+ ohci_finish_controller_resume(hcd);
+#endif
+
+ /*Bruce111220. This line is copied from AST1510 uhci-hcd.c and OMAP kernel 2.6.15*/
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+#endif
+/*-------------------------------------------------------------------------*/
+
+static int uhci_hcd_guc_drv_probe(struct platform_device *dev)
+{
+ return usb_hcd_guc_probe(&_uhci_driver, dev);
+}
+static int uhci_hcd_guc_drv_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_hcd_guc_remove(hcd, dev);
+ platform_set_drvdata(dev, NULL);
+ return 0;
+}
+
+/*
+ * Driver definition to register
+ */
+static struct platform_driver uhci_hcd_guc_driver = {
+ .probe = uhci_hcd_guc_drv_probe,
+ .remove = uhci_hcd_guc_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = uhci_guc_suspend,
+ .resume = uhci_guc_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ast_uhci",
+ },
+};
+
+static int __init uhci_hcd_guc_init (void)
+{
+ uhci_hcd_init();
+ return platform_driver_register(&uhci_hcd_guc_driver);
+}
+
+static void __exit uhci_hcd_guc_cleanup (void)
+{
+ uhci_hcd_cleanup();
+ platform_driver_unregister(&uhci_hcd_guc_driver);
+}
+
+module_init(uhci_hcd_guc_init);
+module_exit(uhci_hcd_guc_cleanup);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/astuhci/uhci-hcd.h b/drivers/usb/astuhci/uhci-hcd.h new file mode 100644 index 000000000000..c4a903e68a4b --- /dev/null +++ b/drivers/usb/astuhci/uhci-hcd.h @@ -0,0 +1,496 @@ +/********************************************************************************
+* File Name : uhci-hcd.h
+*
+* port from uhci-hcd.h
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by the Free Software Foundation;
+* either version 2 of the License, or (at your option) any later version.
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+* without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+********************************************************************************/
+
+#ifndef __LINUX_UHCI_HCD_H
+#define __LINUX_UHCI_HCD_H
+
+#include <linux/list.h>
+#include <linux/usb.h>
+
+#define usb_packetid(pipe) (usb_pipein(pipe) ? USB_PID_IN : USB_PID_OUT)
+#define PIPE_DEVEP_MASK 0x0007ff00
+
+
+/*
+ * Universal Host Controller Interface data structures and defines
+ */
+
+/* Command register */
+#define USBCMD 0
+#define USBCMD_RS 0x0001 /* Run/Stop */
+#define USBCMD_HCRESET 0x0002 /* Host reset */
+#define USBCMD_GRESET 0x0004 /* Global reset */
+#define USBCMD_EGSM 0x0008 /* Global Suspend Mode */
+#define USBCMD_FGR 0x0010 /* Force Global Resume */
+#define USBCMD_SWDBG 0x0020 /* SW Debug mode */
+#define USBCMD_CF 0x0040 /* Config Flag (sw only) */
+#define USBCMD_MAXP 0x0080 /* Max Packet (0 = 32, 1 = 64) */
+
+/* Status register */
+//#define USBSTS 2
+#define USBSTS 1
+#define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */
+#define USBSTS_ERROR 0x0002 /* Interrupt due to error */
+#define USBSTS_RD 0x0004 /* Resume Detect */
+#define USBSTS_HSE 0x0008 /* Host System Error: PCI problems */
+#define USBSTS_HCPE 0x0010 /* Host Controller Process Error:
+ * the schedule is buggy */
+#define USBSTS_HCH 0x0020 /* HC Halted */
+
+/* Interrupt enable register */
+//#define USBINTR 4
+#define USBINTR 2
+#define USBINTR_TIMEOUT 0x0001 /* Timeout/CRC error enable */
+#define USBINTR_RESUME 0x0002 /* Resume interrupt enable */
+#define USBINTR_IOC 0x0004 /* Interrupt On Complete enable */
+#define USBINTR_SP 0x0008 /* Short packet interrupt enable */
+
+//#define USBFRNUM 6
+//#define USBFLBASEADD 8
+//#define USBSOF 12
+#define USBFRNUM 32
+#define USBFLBASEADD 3
+#define USBSOF 33
+#define USBSOF_DEFAULT 64 /* Frame length is exactly 1 ms */
+
+/* USB port status and control registers */
+//#define USBPORTSC1 16
+//#define USBPORTSC2 18
+#define USBPORTSC1 (34)
+#define USBPORTSC2 (35)
+#define USBPORTSC_CCS 0x0001 /* Current Connect Status
+ * ("device present") */
+#define USBPORTSC_CSC 0x0002 /* Connect Status Change */
+#define USBPORTSC_PE 0x0004 /* Port Enable */
+#define USBPORTSC_PEC 0x0008 /* Port Enable Change */
+#define USBPORTSC_DPLUS 0x0010 /* D+ high (line status) */
+#define USBPORTSC_DMINUS 0x0020 /* D- high (line status) */
+#define USBPORTSC_RD 0x0040 /* Resume Detect */
+#define USBPORTSC_RES1 0x0080 /* reserved, always 1 */
+#define USBPORTSC_LSDA 0x0100 /* Low Speed Device Attached */
+#define USBPORTSC_PR 0x0200 /* Port Reset */
+/* OC and OCC from Intel 430TX and later (not UHCI 1.1d spec) */
+#define USBPORTSC_OC 0x0400 /* Over Current condition */
+#define USBPORTSC_OCC 0x0800 /* Over Current Change R/WC */
+#define USBPORTSC_SUSP 0x1000 /* Suspend */
+#define USBPORTSC_RES2 0x2000 /* reserved, write zeroes */
+#define USBPORTSC_RES3 0x4000 /* reserved, write zeroes */
+#define USBPORTSC_RES4 0x8000 /* reserved, write zeroes */
+
+/* Legacy support register */
+#define USBLEGSUP 0xc0
+#define USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */
+#define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
+#define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
+
+#define UHCI_PTR_BITS __constant_cpu_to_le32(0x000F)
+#define UHCI_PTR_TERM __constant_cpu_to_le32(0x0001)
+#define UHCI_PTR_QH __constant_cpu_to_le32(0x0002)
+#define UHCI_PTR_DEPTH __constant_cpu_to_le32(0x0004)
+#define UHCI_PTR_BREADTH __constant_cpu_to_le32(0x0000)
+
+#define UHCI_NUMFRAMES 1024 /* in the frame list [array] */
+#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
+#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames
+ * can be scheduled */
+#define MAX_PHASE 32 /* Periodic scheduling length */
+
+/* Otg Features*/
+#define RH_PORT_BHNP_ENABLE 0x03
+#define RH_PORT_AHNP_SUPPORT 0x04
+#define SF_BHNP_ENABLE 0x34
+
+/* When no queues need Full-Speed Bandwidth Reclamation,
+ * delay this long before turning FSBR off */
+#define FSBR_OFF_DELAY msecs_to_jiffies(10)
+
+/* If a queue hasn't advanced after this much time, assume it is stuck */
+#define QH_WAIT_TIMEOUT msecs_to_jiffies(200)
+
+
+/*
+ * Queue Headers
+ */
+
+/*
+ * One role of a QH is to hold a queue of TDs for some endpoint. One QH goes
+ * with each endpoint, and qh->element (updated by the HC) is either:
+ * - the next unprocessed TD in the endpoint's queue, or
+ * - UHCI_PTR_TERM (when there's no more traffic for this endpoint).
+ *
+ * The other role of a QH is to serve as a "skeleton" framelist entry, so we
+ * can easily splice a QH for some endpoint into the schedule at the right
+ * place. Then qh->element is UHCI_PTR_TERM.
+ *
+ * In the schedule, qh->link maintains a list of QHs seen by the HC:
+ * skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ...
+ *
+ * qh->node is the software equivalent of qh->link. The differences
+ * are that the software list is doubly-linked and QHs in the UNLINKING
+ * state are on the software list but not the hardware schedule.
+ *
+ * For bookkeeping purposes we maintain QHs even for Isochronous endpoints,
+ * but they never get added to the hardware schedule.
+ */
+#define QH_STATE_IDLE 1 /* QH is not being used */
+#define UHCI_QH_STATE_IDLE 1 /* QH is not being used */
+#define QH_STATE_UNLINKING 2 /* QH has been removed from the
+ * schedule but the hardware may
+ * still be using it */
+#define QH_STATE_ACTIVE 3 /* QH is on the schedule */
+
+struct uhci_qh {
+ /* Hardware fields */
+ __le32 link; /* Next QH in the schedule */
+ __le32 element; /* Queue element (TD) pointer */
+
+ /* Software fields */
+ dma_addr_t dma_handle;
+
+ struct list_head node; /* Node in the list of QHs */
+ struct usb_host_endpoint *hep; /* Endpoint information */
+ struct usb_device *udev;
+ struct list_head queue; /* Queue of urbps for this QH */
+ struct uhci_td *dummy_td; /* Dummy TD to end the queue */
+ struct uhci_td *post_td; /* Last TD completed */
+
+ struct usb_iso_packet_descriptor *iso_packet_desc;
+ /* Next urb->iso_frame_desc entry */
+ unsigned long advance_jiffies; /* Time of last queue advance */
+ unsigned int unlink_frame; /* When the QH was unlinked */
+ unsigned int period; /* For Interrupt and Isochronous QHs */
+ short phase; /* Between 0 and period-1 */
+ short load; /* Periodic time requirement, in us */
+ unsigned int iso_frame; /* Frame # for iso_packet_desc */
+
+ int state; /* QH_STATE_xxx; see above */
+ int type; /* Queue type (control, bulk, etc) */
+ int skel; /* Skeleton queue number */
+
+ unsigned int initial_toggle:1; /* Endpoint's current toggle value */
+ unsigned int needs_fixup:1; /* Must fix the TD toggle values */
+ unsigned int is_stopped:1; /* Queue was stopped by error/unlink */
+ unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */
+ unsigned int bandwidth_reserved:1; /* Periodic bandwidth has
+ * been allocated */
+} __attribute__((aligned(16)));
+
+/*
+ * We need a special accessor for the element pointer because it is
+ * subject to asynchronous updates by the controller.
+ */
+static inline __le32 qh_element(struct uhci_qh *qh) {
+ __le32 element = qh->element;
+
+ barrier();
+ return element;
+}
+
+#define LINK_TO_QH(qh) (UHCI_PTR_QH | cpu_to_le32((qh)->dma_handle))
+
+
+/*
+ * Transfer Descriptors
+ */
+
+/*
+ * for TD <status>:
+ */
+#define TD_CTRL_SPD (1 << 29) /* Short Packet Detect */
+#define TD_CTRL_C_ERR_MASK (3 << 27) /* Error Counter bits */
+#define TD_CTRL_C_ERR_SHIFT 27
+#define TD_CTRL_LS (1 << 26) /* Low Speed Device */
+#define TD_CTRL_IOS (1 << 25) /* Isochronous Select */
+#define TD_CTRL_IOC (1 << 24) /* Interrupt on Complete */
+#define TD_CTRL_ACTIVE (1 << 23) /* TD Active */
+#define TD_CTRL_STALLED (1 << 22) /* TD Stalled */
+#define TD_CTRL_DBUFERR (1 << 21) /* Data Buffer Error */
+#define TD_CTRL_BABBLE (1 << 20) /* Babble Detected */
+#define TD_CTRL_NAK (1 << 19) /* NAK Received */
+#define TD_CTRL_CRCTIMEO (1 << 18) /* CRC/Time Out Error */
+#define TD_CTRL_BITSTUFF (1 << 17) /* Bit Stuff Error */
+#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
+
+#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
+ TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \
+ TD_CTRL_BITSTUFF)
+
+#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
+#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000)
+#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \
+ TD_CTRL_ACTLEN_MASK) /* 1-based */
+
+/*
+ * for TD <info>: (a.k.a. Token)
+ */
+#define td_token(td) le32_to_cpu((td)->token)
+#define TD_TOKEN_DEVADDR_SHIFT 8
+#define TD_TOKEN_TOGGLE_SHIFT 19
+#define TD_TOKEN_TOGGLE (1 << 19)
+#define TD_TOKEN_EXPLEN_SHIFT 21
+#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n-1 */
+#define TD_TOKEN_PID_MASK 0xFF
+
+#define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \
+ TD_TOKEN_EXPLEN_SHIFT)
+
+#define uhci_expected_length(token) ((((token) >> TD_TOKEN_EXPLEN_SHIFT) + \
+ 1) & TD_TOKEN_EXPLEN_MASK)
+#define uhci_toggle(token) (((token) >> TD_TOKEN_TOGGLE_SHIFT) & 1)
+#define uhci_endpoint(token) (((token) >> 15) & 0xf)
+#define uhci_devaddr(token) (((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7f)
+#define uhci_devep(token) (((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7ff)
+#define uhci_packetid(token) ((token) & TD_TOKEN_PID_MASK)
+#define uhci_packetout(token) (uhci_packetid(token) != USB_PID_IN)
+#define uhci_packetin(token) (uhci_packetid(token) == USB_PID_IN)
+
+/*
+ * The documentation says "4 words for hardware, 4 words for software".
+ *
+ * That's silly, the hardware doesn't care. The hardware only cares that
+ * the hardware words are 16-byte aligned, and we can have any amount of
+ * sw space after the TD entry.
+ *
+ * td->link points to either another TD (not necessarily for the same urb or
+ * even the same endpoint), or nothing (PTR_TERM), or a QH.
+ */
+struct uhci_td {
+ /* Hardware fields */
+ __le32 link;
+ __le32 status;
+ __le32 token;
+ __le32 buffer;
+
+ /* Software fields */
+ dma_addr_t dma_handle;
+
+ struct list_head list;
+
+ int frame; /* for iso: what frame? */
+ struct list_head fl_list;
+} __attribute__((aligned(16)));
+
+/*
+ * We need a special accessor for the control/status word because it is
+ * subject to asynchronous updates by the controller.
+ */
+static inline u32 td_status(struct uhci_td *td) {
+ __le32 status = td->status;
+
+ barrier();
+ return le32_to_cpu(status);
+}
+
+#define LINK_TO_TD(td) (cpu_to_le32((td)->dma_handle))
+
+
+/*
+ * Skeleton Queue Headers
+ */
+
+/*
+ * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for
+ * automatic queuing. To make it easy to insert entries into the schedule,
+ * we have a skeleton of QHs for each predefined Interrupt latency.
+ * Asynchronous QHs (low-speed control, full-speed control, and bulk)
+ * go onto the period-1 interrupt list, since they all get accessed on
+ * every frame.
+ *
+ * When we want to add a new QH, we add it to the list starting from the
+ * appropriate skeleton QH. For instance, the schedule can look like this:
+ *
+ * skel int128 QH
+ * dev 1 interrupt QH
+ * dev 5 interrupt QH
+ * skel int64 QH
+ * skel int32 QH
+ * ...
+ * skel int1 + async QH
+ * dev 5 low-speed control QH
+ * dev 1 bulk QH
+ * dev 2 bulk QH
+ *
+ * There is a special terminating QH used to keep full-speed bandwidth
+ * reclamation active when no full-speed control or bulk QHs are linked
+ * into the schedule. It has an inactive TD (to work around a PIIX bug,
+ * see the Intel errata) and it points back to itself.
+ *
+ * There's a special skeleton QH for Isochronous QHs which never appears
+ * on the schedule. Isochronous TDs go on the schedule before the
+ * the skeleton QHs. The hardware accesses them directly rather than
+ * through their QH, which is used only for bookkeeping purposes.
+ * While the UHCI spec doesn't forbid the use of QHs for Isochronous,
+ * it doesn't use them either. And the spec says that queues never
+ * advance on an error completion status, which makes them totally
+ * unsuitable for Isochronous transfers.
+ *
+ * There's also a special skeleton QH used for QHs which are in the process
+ * of unlinking and so may still be in use by the hardware. It too never
+ * appears on the schedule.
+ */
+
+#define UHCI_NUM_SKELQH 11
+#define SKEL_UNLINK 0
+#define skel_unlink_qh skelqh[SKEL_UNLINK]
+#define SKEL_ISO 1
+#define skel_iso_qh skelqh[SKEL_ISO]
+ /* int128, int64, ..., int1 = 2, 3, ..., 9 */
+#define SKEL_INDEX(exponent) (9 - exponent)
+#define SKEL_ASYNC 9
+#define skel_async_qh skelqh[SKEL_ASYNC]
+#define SKEL_TERM 10
+#define skel_term_qh skelqh[SKEL_TERM]
+
+/* The following entries refer to sublists of skel_async_qh */
+#define SKEL_LS_CONTROL 20
+#define SKEL_FS_CONTROL 21
+#define SKEL_FSBR SKEL_FS_CONTROL
+#define SKEL_BULK 22
+
+/*
+ * The UHCI controller and root hub
+ */
+
+/*
+ * States for the root hub:
+ *
+ * To prevent "bouncing" in the presence of electrical noise,
+ * when there are no devices attached we delay for 1 second in the
+ * RUNNING_NODEVS state before switching to the AUTO_STOPPED state.
+ *
+ * (Note that the AUTO_STOPPED state won't be necessary once the hub
+ * driver learns to autosuspend.)
+ */
+enum uhci_rh_state {
+ /* In the following states the HC must be halted.
+ * These two must come first. */
+ UHCI_RH_RESET,
+ UHCI_RH_SUSPENDED,
+
+ UHCI_RH_AUTO_STOPPED,
+ UHCI_RH_RESUMING,
+
+ /* In this state the HC changes from running to halted,
+ * so it can legally appear either way. */
+ UHCI_RH_SUSPENDING,
+
+ /* In the following states it's an error if the HC is halted.
+ * These two must come last. */
+ UHCI_RH_RUNNING, /* The normal state */
+ UHCI_RH_RUNNING_NODEVS, /* Running with no devices attached */
+};
+
+/*
+ * The full UHCI controller information:
+ */
+struct uhci_hcd {
+
+ /* debugfs */
+ struct dentry *dentry;
+
+ /* Grabbed from PCI */
+ unsigned long io_addr;
+
+ struct dma_pool *qh_pool;
+ struct dma_pool *td_pool;
+
+ struct uhci_td *term_td; /* Terminating TD, see UHCI bug */
+ struct uhci_qh *skelqh[UHCI_NUM_SKELQH]; /* Skeleton QHs */
+ struct uhci_qh *next_qh; /* Next QH to scan */
+
+ spinlock_t lock;
+
+ dma_addr_t frame_dma_handle; /* Hardware frame list */
+ __le32 *frame;
+ void **frame_cpu; /* CPU's frame list */
+
+ enum uhci_rh_state rh_state;
+ unsigned long auto_stop_time; /* When to AUTO_STOP */
+
+ unsigned int frame_number; /* As of last check */
+ unsigned int is_stopped;
+#define UHCI_IS_STOPPED 9999 /* Larger than a frame # */
+ unsigned int last_iso_frame; /* Frame of last scan */
+ unsigned int cur_iso_frame; /* Frame for current scan */
+
+ unsigned int scan_in_progress:1; /* Schedule scan is running */
+ unsigned int need_rescan:1; /* Redo the schedule scan */
+ unsigned int dead:1; /* Controller has died */
+ unsigned int RD_enable:1; /* Suspended root hub with
+ Resume-Detect interrupts
+ enabled */
+ unsigned int is_initialized:1; /* Data structure is usable */
+ unsigned int fsbr_is_on:1; /* FSBR is turned on */
+ unsigned int fsbr_is_wanted:1; /* Does any URB want FSBR? */
+ unsigned int fsbr_expiring:1; /* FSBR is timing out */
+
+ struct timer_list fsbr_timer; /* For turning off FBSR */
+
+ /* Support for port suspend/resume/reset */
+ unsigned long port_c_suspend; /* Bit-arrays of ports */
+ unsigned long resuming_ports;
+ unsigned long ports_timeout; /* Time to stop signalling */
+
+ struct list_head idle_qh_list; /* Where the idle QHs live */
+
+ int rh_numports; /* Number of root-hub ports */
+
+ wait_queue_head_t waitqh; /* endpoint_disable waiters */
+ int num_waiting; /* Number of waiters */
+
+ int total_load; /* Sum of array values */
+ short load[MAX_PHASE]; /* Periodic allocations */
+ int is_suspended;
+ unsigned int *regbase; // eric
+ unsigned int *ptr_usb_hcd; // eric
+};
+
+/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
+static inline struct uhci_hcd *hcd_to_uhci(struct usb_hcd *hcd)
+{
+ return (struct uhci_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci)
+{
+ return container_of((void *) uhci, struct usb_hcd, hcd_priv);
+}
+
+#define uhci_dev(u) (uhci_to_hcd(u)->self.controller)
+
+/* Utility macro for comparing frame numbers */
+#define uhci_frame_before_eq(f1, f2) (0 <= (int) ((f2) - (f1)))
+
+
+/*
+ * Private per-URB data
+ */
+struct urb_priv {
+ struct list_head node; /* Node in the QH's urbp list */
+
+ struct urb *urb;
+
+ struct uhci_qh *qh; /* QH for this URB */
+ struct list_head td_list;
+
+ unsigned fsbr:1; /* URB wants FSBR */
+};
+
+
+/* Some special IDs */
+
+#define PCI_VENDOR_ID_GENESYS 0x17a0
+#define PCI_DEVICE_ID_GL880S_UHCI 0x8083
+
+#endif
diff --git a/drivers/usb/astuhci/uhci-hub.c b/drivers/usb/astuhci/uhci-hub.c new file mode 100644 index 000000000000..64a44969f107 --- /dev/null +++ b/drivers/usb/astuhci/uhci-hub.c @@ -0,0 +1,437 @@ +/********************************************************************************
+* File Name : uhci-hub.c
+*
+* port from uhci-hub.c
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by the Free Software Foundation;
+* either version 2 of the License, or (at your option) any later version.
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+* without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+********************************************************************************/
+
+static const __u8 root_hub_hub_des[] =
+{
+ 0x09, /* __u8 bLength; */
+ 0x29, /* __u8 bDescriptorType; Hub-descriptor */
+ 0x02, /* __u8 bNbrPorts; */
+ 0x0a, /* __u16 wHubCharacteristics; */
+ 0x00, /* (per-port OC, no power switching) */
+ 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
+ 0x00, /* __u8 bHubContrCurrent; 0 mA */
+ 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
+ 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
+};
+
+#define UHCI_RH_MAXCHILD 7
+
+/* must write as zeroes */
+#define WZ_BITS (USBPORTSC_RES2 | USBPORTSC_RES3 | USBPORTSC_RES4)
+
+/* status change bits: nonzero writes will clear */
+#define RWC_BITS (USBPORTSC_OCC | USBPORTSC_PEC | USBPORTSC_CSC)
+
+/* suspend/resume bits: port suspended or port resuming */
+#define SUSPEND_BITS (USBPORTSC_SUSP | USBPORTSC_RD)
+
+/* A port that either is connected or has a changed-bit set will prevent
+ * us from AUTO_STOPPING.
+ */
+static int any_ports_active(struct uhci_hcd *uhci)
+{
+ int port;
+
+ for (port = 0; port < uhci->rh_numports; ++port) {
+//yriver
+// if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+ if ((readl(uhci->regbase + USBPORTSC1 + port * 1) &
+ (USBPORTSC_CCS | RWC_BITS)) ||
+ test_bit(port, &uhci->port_c_suspend))
+ return 1;
+ }
+ return 0;
+}
+
+static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf)
+{
+ int port;
+ int mask = RWC_BITS;
+
+ /* Some boards (both VIA and Intel apparently) report bogus
+ * overcurrent indications, causing massive log spam unless
+ * we completely ignore them. This doesn't seem to be a problem
+ * with the chipset so much as with the way it is connected on
+ * the motherboard; if the overcurrent input is left to float
+ * then it may constantly register false positives. */
+ if (ignore_oc)
+ mask &= ~USBPORTSC_OCC;
+
+ *buf = 0;
+ for (port = 0; port < uhci->rh_numports; ++port) {
+//yriver
+// if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) & mask) ||
+ if ((readl(uhci->regbase + USBPORTSC1 + port * 1) & mask) ||
+ test_bit(port, &uhci->port_c_suspend))
+ *buf |= (1 << (port + 1));
+ }
+ return !!*buf;
+}
+
+#define OK(x) len = (x); break
+//yriver
+#define CLR_RH_PORTSTAT(x) \
+ status = readl(port_addr); \
+ status &= ~(RWC_BITS|WZ_BITS); \
+ status &= ~(x); \
+ status |= RWC_BITS & (x); \
+ writel(status, port_addr)
+
+#define SET_RH_PORTSTAT(x) \
+ status = readl(port_addr); \
+ status |= (x); \
+ status &= ~(RWC_BITS|WZ_BITS); \
+ writel(status, port_addr)
+
+/* UHCI controllers don't automatically stop resume signalling after 20 msec,
+ * so we have to poll and check timeouts in order to take care of it.
+ */
+static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
+ unsigned long port_addr)
+{
+ int status;
+ int i;
+//yriver
+// if (inw(port_addr) & SUSPEND_BITS) {
+ if (readl(port_addr) & SUSPEND_BITS) {
+ CLR_RH_PORTSTAT(SUSPEND_BITS);
+ if (test_bit(port, &uhci->resuming_ports))
+ set_bit(port, &uhci->port_c_suspend);
+
+ /* The controller won't actually turn off the RD bit until
+ * it has had a chance to send a low-speed EOP sequence,
+ * which is supposed to take 3 bit times (= 2 microseconds).
+ * Experiments show that some controllers take longer, so
+ * we'll poll for completion. */
+ for (i = 0; i < 10; ++i) {
+//yriver
+// if (!(inw(port_addr) & SUSPEND_BITS))
+ if (!(readl(port_addr) & SUSPEND_BITS))
+ break;
+ udelay(1);
+ }
+ }
+ clear_bit(port, &uhci->resuming_ports);
+}
+
+/* Wait for the UHCI controller in HP's iLO2 server management chip.
+ * It can take up to 250 us to finish a reset and set the CSC bit.
+ */
+static void wait_for_HP(unsigned long port_addr)
+{
+ int i;
+
+ for (i = 10; i < 250; i += 10) {
+//yriver
+// if (inw(port_addr) & USBPORTSC_CSC)
+ if (readl(port_addr) & USBPORTSC_CSC)
+ return;
+ udelay(10);
+ }
+ /* Log a warning? */
+}
+
+static void uhci_check_ports(struct uhci_hcd *uhci)
+{
+ unsigned int port;
+ unsigned long port_addr;
+ int status;
+
+ for (port = 0; port < uhci->rh_numports; ++port) {
+//yriver
+// port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
+// status = inw(port_addr);
+ port_addr = uhci->regbase + USBPORTSC1 + 1 * port;
+ status = readl(port_addr);
+ if (unlikely(status & USBPORTSC_PR)) {
+ if (time_after_eq(jiffies, uhci->ports_timeout)) {
+ CLR_RH_PORTSTAT(USBPORTSC_PR);
+ udelay(10);
+
+ /* HP's server management chip requires
+ * a longer delay. */
+ if (to_pci_dev(uhci_dev(uhci))->vendor ==
+ PCI_VENDOR_ID_HP)
+ wait_for_HP(port_addr);
+
+ /* If the port was enabled before, turning
+ * reset on caused a port enable change.
+ * Turning reset off causes a port connect
+ * status change. Clear these changes. */
+ CLR_RH_PORTSTAT(USBPORTSC_CSC | USBPORTSC_PEC);
+ SET_RH_PORTSTAT(USBPORTSC_PE);
+ }
+ }
+ if (unlikely(status & USBPORTSC_RD)) {
+ if (!test_bit(port, &uhci->resuming_ports)) {
+
+ /* Port received a wakeup request */
+ set_bit(port, &uhci->resuming_ports);
+ uhci->ports_timeout = jiffies +
+ msecs_to_jiffies(20);
+
+ /* Make sure we see the port again
+ * after the resuming period is over. */
+ mod_timer(&uhci_to_hcd(uhci)->rh_timer,
+ uhci->ports_timeout);
+ } else if (time_after_eq(jiffies,
+ uhci->ports_timeout)) {
+ uhci_finish_suspend(uhci, port, port_addr);
+ }
+ }
+ }
+}
+
+static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ unsigned long flags;
+ int status = 0;
+
+ spin_lock_irqsave(&uhci->lock, flags);
+
+ uhci_scan_schedule(uhci);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ goto done;
+ uhci_check_ports(uhci);
+
+ status = get_hub_status_data(uhci, buf);
+
+ switch (uhci->rh_state) {
+ case UHCI_RH_SUSPENDING:
+ case UHCI_RH_SUSPENDED:
+ /* if port change, ask to be resumed */
+ if (status)
+ usb_hcd_resume_root_hub(hcd);
+ break;
+
+ case UHCI_RH_AUTO_STOPPED:
+ /* if port change, auto start */
+ if (status)
+ wakeup_rh(uhci);
+ break;
+
+ case UHCI_RH_RUNNING:
+ /* are any devices attached? */
+ if (!any_ports_active(uhci)) {
+ uhci->rh_state = UHCI_RH_RUNNING_NODEVS;
+ uhci->auto_stop_time = jiffies + HZ;
+ }
+ break;
+
+ case UHCI_RH_RUNNING_NODEVS:
+ /* auto-stop if nothing connected for 1 second */
+ if (any_ports_active(uhci))
+ uhci->rh_state = UHCI_RH_RUNNING;
+//yriver
+// else if (time_after_eq(jiffies, uhci->auto_stop_time))
+// suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
+ break;
+
+ default:
+ break;
+ }
+
+done:
+ spin_unlock_irqrestore(&uhci->lock, flags);
+ return status;
+}
+
+/* size of returned buffer is part of USB spec */
+static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ int status, lstatus, retval = 0, len = 0;
+//yriver
+// unsigned int port = wIndex - 1;
+// unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
+ unsigned int port = wIndex - 1;
+ unsigned long port_addr = uhci->regbase + USBPORTSC1 + 1 * port;
+ u16 wPortChange, wPortStatus;
+ unsigned long flags;
+
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ return -ETIMEDOUT;
+
+ spin_lock_irqsave(&uhci->lock, flags);
+ switch (typeReq) {
+
+ case GetHubStatus:
+ *(__le32 *)buf = cpu_to_le32(0);
+ OK(4); /* hub power */
+ case GetPortStatus:
+ if (port >= uhci->rh_numports)
+ goto err;
+
+ uhci_check_ports(uhci);
+//yriver
+// status = inw(port_addr);
+ status = readl(port_addr);
+
+ /* Intel controllers report the OverCurrent bit active on.
+ * VIA controllers report it active off, so we'll adjust the
+ * bit value. (It's not standardized in the UHCI spec.)
+ */
+ if (to_pci_dev(hcd->self.controller)->vendor ==
+ PCI_VENDOR_ID_VIA)
+ status ^= USBPORTSC_OC;
+
+ /* UHCI doesn't support C_RESET (always false) */
+ wPortChange = lstatus = 0;
+ if (status & USBPORTSC_CSC)
+ wPortChange |= USB_PORT_STAT_C_CONNECTION;
+ if (status & USBPORTSC_PEC)
+ wPortChange |= USB_PORT_STAT_C_ENABLE;
+ if ((status & USBPORTSC_OCC) && !ignore_oc)
+ wPortChange |= USB_PORT_STAT_C_OVERCURRENT;
+
+ if (test_bit(port, &uhci->port_c_suspend)) {
+ wPortChange |= USB_PORT_STAT_C_SUSPEND;
+ lstatus |= 1;
+ }
+ if (test_bit(port, &uhci->resuming_ports))
+ lstatus |= 4;
+
+ /* UHCI has no power switching (always on) */
+ wPortStatus = USB_PORT_STAT_POWER;
+ if (status & USBPORTSC_CCS)
+ wPortStatus |= USB_PORT_STAT_CONNECTION;
+ if (status & USBPORTSC_PE) {
+ wPortStatus |= USB_PORT_STAT_ENABLE;
+ if (status & SUSPEND_BITS)
+ wPortStatus |= USB_PORT_STAT_SUSPEND;
+ }
+ if (status & USBPORTSC_OC)
+ wPortStatus |= USB_PORT_STAT_OVERCURRENT;
+ if (status & USBPORTSC_PR)
+ wPortStatus |= USB_PORT_STAT_RESET;
+ if (status & USBPORTSC_LSDA)
+ wPortStatus |= USB_PORT_STAT_LOW_SPEED;
+
+ if (wPortChange)
+ dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
+ wIndex, status, lstatus);
+
+ *(__le16 *)buf = cpu_to_le16(wPortStatus);
+ *(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
+ OK(4);
+ case SetHubFeature: /* We don't implement these */
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ OK(0);
+ default:
+ goto err;
+ }
+ break;
+ case SetPortFeature:
+ if (port >= uhci->rh_numports)
+ goto err;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ SET_RH_PORTSTAT(USBPORTSC_SUSP);
+ OK(0);
+ case USB_PORT_FEAT_RESET:
+ SET_RH_PORTSTAT(USBPORTSC_PR);
+
+ /* Reset terminates Resume signalling */
+ uhci_finish_suspend(uhci, port, port_addr);
+
+ /* USB v2.0 7.1.7.5 */
+ uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
+ OK(0);
+ case USB_PORT_FEAT_POWER:
+ /* UHCI has no power switching */
+ OK(0);
+ default:
+ goto err;
+ }
+ break;
+ case ClearPortFeature:
+ if (port >= uhci->rh_numports)
+ goto err;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ CLR_RH_PORTSTAT(USBPORTSC_PE);
+
+ /* Disable terminates Resume signalling */
+ uhci_finish_suspend(uhci, port, port_addr);
+ OK(0);
+ case USB_PORT_FEAT_C_ENABLE:
+ CLR_RH_PORTSTAT(USBPORTSC_PEC);
+ OK(0);
+ case USB_PORT_FEAT_SUSPEND:
+//yriver
+// if (!(inw(port_addr) & USBPORTSC_SUSP)) {
+ if (!(readl(port_addr) & USBPORTSC_SUSP)) {
+
+ /* Make certain the port isn't suspended */
+ uhci_finish_suspend(uhci, port, port_addr);
+ } else if (!test_and_set_bit(port,
+ &uhci->resuming_ports)) {
+ SET_RH_PORTSTAT(USBPORTSC_RD);
+
+ /* The controller won't allow RD to be set
+ * if the port is disabled. When this happens
+ * just skip the Resume signalling.
+ */
+//yriver
+// if (!(inw(port_addr) & USBPORTSC_RD))
+ if (!(readl(port_addr) & USBPORTSC_RD))
+ uhci_finish_suspend(uhci, port,
+ port_addr);
+ else
+ /* USB v2.0 7.1.7.7 */
+ uhci->ports_timeout = jiffies +
+ msecs_to_jiffies(20);
+ }
+ OK(0);
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(port, &uhci->port_c_suspend);
+ OK(0);
+ case USB_PORT_FEAT_POWER:
+ /* UHCI has no power switching */
+ goto err;
+ case USB_PORT_FEAT_C_CONNECTION:
+ CLR_RH_PORTSTAT(USBPORTSC_CSC);
+ OK(0);
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ CLR_RH_PORTSTAT(USBPORTSC_OCC);
+ OK(0);
+ case USB_PORT_FEAT_C_RESET:
+ /* this driver won't report these */
+ OK(0);
+ default:
+ goto err;
+ }
+ break;
+ case GetHubDescriptor:
+ len = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
+ memcpy(buf, root_hub_hub_des, len);
+ if (len > 2)
+ buf[2] = uhci->rh_numports;
+ OK(len);
+ default:
+err:
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&uhci->lock, flags);
+
+ return retval;
+}
diff --git a/drivers/usb/astuhci/uhci-q.c b/drivers/usb/astuhci/uhci-q.c new file mode 100644 index 000000000000..eb24599be21e --- /dev/null +++ b/drivers/usb/astuhci/uhci-q.c @@ -0,0 +1,1760 @@ +/********************************************************************************
+* File Name : uhci-q.c
+*
+* port from uhci-q.c
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by the Free Software Foundation;
+* either version 2 of the License, or (at your option) any later version.
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+* without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+********************************************************************************/
+static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
+{
+ if (uhci->is_stopped)
+ mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
+ uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
+}
+
+static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
+{
+ uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
+}
+
+
+/*
+ * Full-Speed Bandwidth Reclamation (FSBR).
+ * We turn on FSBR whenever a queue that wants it is advancing,
+ * and leave it on for a short time thereafter.
+ */
+static void uhci_fsbr_on(struct uhci_hcd *uhci)
+{
+ struct uhci_qh *lqh;
+
+ /* The terminating skeleton QH always points back to the first
+ * FSBR QH. Make the last async QH point to the terminating
+ * skeleton QH. */
+ uhci->fsbr_is_on = 1;
+ lqh = list_entry(uhci->skel_async_qh->node.prev,
+ struct uhci_qh, node);
+ lqh->link = LINK_TO_QH(uhci->skel_term_qh);
+}
+
+static void uhci_fsbr_off(struct uhci_hcd *uhci)
+{
+ struct uhci_qh *lqh;
+
+ /* Remove the link from the last async QH to the terminating
+ * skeleton QH. */
+ uhci->fsbr_is_on = 0;
+ lqh = list_entry(uhci->skel_async_qh->node.prev,
+ struct uhci_qh, node);
+ lqh->link = UHCI_PTR_TERM;
+}
+
+static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
+{
+ struct urb_priv *urbp = urb->hcpriv;
+
+ if (!(urb->transfer_flags & URB_NO_FSBR))
+ urbp->fsbr = 1;
+}
+
+static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
+{
+ if (urbp->fsbr) {
+ uhci->fsbr_is_wanted = 1;
+ if (!uhci->fsbr_is_on)
+ uhci_fsbr_on(uhci);
+ else if (uhci->fsbr_expiring) {
+ uhci->fsbr_expiring = 0;
+ del_timer(&uhci->fsbr_timer);
+ }
+ }
+}
+
+static void uhci_fsbr_timeout(unsigned long _uhci)
+{
+ struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uhci->lock, flags);
+ if (uhci->fsbr_expiring) {
+ uhci->fsbr_expiring = 0;
+ uhci_fsbr_off(uhci);
+ }
+ spin_unlock_irqrestore(&uhci->lock, flags);
+}
+
+
+static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
+{
+ dma_addr_t dma_handle;
+ struct uhci_td *td;
+
+ td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
+ if (!td)
+ return NULL;
+
+ td->dma_handle = dma_handle;
+ td->frame = -1;
+
+ INIT_LIST_HEAD(&td->list);
+ INIT_LIST_HEAD(&td->fl_list);
+
+ return td;
+}
+
+static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
+{
+ if (!list_empty(&td->list))
+ dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
+ if (!list_empty(&td->fl_list))
+ dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
+
+ dma_pool_free(uhci->td_pool, td, td->dma_handle);
+}
+
+static inline void uhci_fill_td(struct uhci_td *td, u32 status,
+ u32 token, u32 buffer)
+{
+ td->status = cpu_to_le32(status);
+ td->token = cpu_to_le32(token);
+ td->buffer = cpu_to_le32(buffer);
+}
+
+static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
+{
+ list_add_tail(&td->list, &urbp->td_list);
+}
+
+static void uhci_remove_td_from_urbp(struct uhci_td *td)
+{
+ list_del_init(&td->list);
+}
+
+/*
+ * We insert Isochronous URBs directly into the frame list at the beginning
+ */
+static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
+ struct uhci_td *td, unsigned framenum)
+{
+ framenum &= (UHCI_NUMFRAMES - 1);
+
+ td->frame = framenum;
+
+ /* Is there a TD already mapped there? */
+ if (uhci->frame_cpu[framenum]) {
+ struct uhci_td *ftd, *ltd;
+
+ ftd = uhci->frame_cpu[framenum];
+ ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
+
+ list_add_tail(&td->fl_list, &ftd->fl_list);
+
+ td->link = ltd->link;
+ wmb();
+ ltd->link = LINK_TO_TD(td);
+ } else {
+ td->link = uhci->frame[framenum];
+ wmb();
+ uhci->frame[framenum] = LINK_TO_TD(td);
+ uhci->frame_cpu[framenum] = td;
+ }
+}
+
+static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
+ struct uhci_td *td)
+{
+ /* If it's not inserted, don't remove it */
+ if (td->frame == -1) {
+ WARN_ON(!list_empty(&td->fl_list));
+ return;
+ }
+
+ if (uhci->frame_cpu[td->frame] == td) {
+ if (list_empty(&td->fl_list)) {
+ uhci->frame[td->frame] = td->link;
+ uhci->frame_cpu[td->frame] = NULL;
+ } else {
+ struct uhci_td *ntd;
+
+ ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
+ uhci->frame[td->frame] = LINK_TO_TD(ntd);
+ uhci->frame_cpu[td->frame] = ntd;
+ }
+ } else {
+ struct uhci_td *ptd;
+
+ ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
+ ptd->link = td->link;
+ }
+
+ list_del_init(&td->fl_list);
+ td->frame = -1;
+}
+
+static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
+ unsigned int framenum)
+{
+ struct uhci_td *ftd, *ltd;
+
+ framenum &= (UHCI_NUMFRAMES - 1);
+
+ ftd = uhci->frame_cpu[framenum];
+ if (ftd) {
+ ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
+ uhci->frame[framenum] = ltd->link;
+ uhci->frame_cpu[framenum] = NULL;
+
+ while (!list_empty(&ftd->fl_list))
+ list_del_init(ftd->fl_list.prev);
+ }
+}
+
+/*
+ * Remove all the TDs for an Isochronous URB from the frame list
+ */
+static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
+{
+ struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+ struct uhci_td *td;
+
+ list_for_each_entry(td, &urbp->td_list, list)
+ uhci_remove_td_from_frame_list(uhci, td);
+}
+
+static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
+ struct usb_device *udev, struct usb_host_endpoint *hep)
+{
+ dma_addr_t dma_handle;
+ struct uhci_qh *qh;
+
+ qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
+ if (!qh)
+ return NULL;
+
+ memset(qh, 0, sizeof(*qh));
+ qh->dma_handle = dma_handle;
+
+ qh->element = UHCI_PTR_TERM;
+ qh->link = UHCI_PTR_TERM;
+
+ INIT_LIST_HEAD(&qh->queue);
+ INIT_LIST_HEAD(&qh->node);
+
+ if (udev) { /* Normal QH */
+ qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ if (qh->type != USB_ENDPOINT_XFER_ISOC) {
+ qh->dummy_td = uhci_alloc_td(uhci);
+ if (!qh->dummy_td) {
+ dma_pool_free(uhci->qh_pool, qh, dma_handle);
+ return NULL;
+ }
+ }
+//yriver
+// qh->state = QH_STATE_IDLE;
+ qh->state = UHCI_QH_STATE_IDLE;
+ qh->hep = hep;
+ qh->udev = udev;
+ hep->hcpriv = qh;
+
+ if (qh->type == USB_ENDPOINT_XFER_INT ||
+ qh->type == USB_ENDPOINT_XFER_ISOC)
+ qh->load = usb_calc_bus_time(udev->speed,
+ usb_endpoint_dir_in(&hep->desc),
+ qh->type == USB_ENDPOINT_XFER_ISOC,
+ le16_to_cpu(hep->desc.wMaxPacketSize))
+ / 1000 + 1;
+
+ } else { /* Skeleton QH */
+ qh->state = QH_STATE_ACTIVE;
+ qh->type = -1;
+ }
+ return qh;
+}
+
+static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+//yriver
+// WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
+ WARN_ON(qh->state != UHCI_QH_STATE_IDLE && qh->udev);
+ if (!list_empty(&qh->queue))
+ dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
+
+ list_del(&qh->node);
+ if (qh->udev) {
+ qh->hep->hcpriv = NULL;
+ if (qh->dummy_td)
+ uhci_free_td(uhci, qh->dummy_td);
+ }
+ dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
+}
+
+/*
+ * When a queue is stopped and a dequeued URB is given back, adjust
+ * the previous TD link (if the URB isn't first on the queue) or
+ * save its toggle value (if it is first and is currently executing).
+ *
+ * Returns 0 if the URB should not yet be given back, 1 otherwise.
+ */
+static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
+ struct urb *urb)
+{
+ struct urb_priv *urbp = urb->hcpriv;
+ struct uhci_td *td;
+ int ret = 1;
+
+ /* Isochronous pipes don't use toggles and their TD link pointers
+ * get adjusted during uhci_urb_dequeue(). But since their queues
+ * cannot truly be stopped, we have to watch out for dequeues
+ * occurring after the nominal unlink frame. */
+ if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+ ret = (uhci->frame_number + uhci->is_stopped !=
+ qh->unlink_frame);
+ goto done;
+ }
+
+ /* If the URB isn't first on its queue, adjust the link pointer
+ * of the last TD in the previous URB. The toggle doesn't need
+ * to be saved since this URB can't be executing yet. */
+ if (qh->queue.next != &urbp->node) {
+ struct urb_priv *purbp;
+ struct uhci_td *ptd;
+
+ purbp = list_entry(urbp->node.prev, struct urb_priv, node);
+ WARN_ON(list_empty(&purbp->td_list));
+ ptd = list_entry(purbp->td_list.prev, struct uhci_td,
+ list);
+ td = list_entry(urbp->td_list.prev, struct uhci_td,
+ list);
+ ptd->link = td->link;
+ goto done;
+ }
+
+ /* If the QH element pointer is UHCI_PTR_TERM then then currently
+ * executing URB has already been unlinked, so this one isn't it. */
+ if (qh_element(qh) == UHCI_PTR_TERM)
+ goto done;
+ qh->element = UHCI_PTR_TERM;
+
+ /* Control pipes don't have to worry about toggles */
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL)
+ goto done;
+
+ /* Save the next toggle value */
+ WARN_ON(list_empty(&urbp->td_list));
+ td = list_entry(urbp->td_list.next, struct uhci_td, list);
+ qh->needs_fixup = 1;
+ qh->initial_toggle = uhci_toggle(td_token(td));
+
+done:
+ return ret;
+}
+
+/*
+ * Fix up the data toggles for URBs in a queue, when one of them
+ * terminates early (short transfer, error, or dequeued).
+ */
+static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
+{
+ struct urb_priv *urbp = NULL;
+ struct uhci_td *td;
+ unsigned int toggle = qh->initial_toggle;
+ unsigned int pipe;
+
+ /* Fixups for a short transfer start with the second URB in the
+ * queue (the short URB is the first). */
+ if (skip_first)
+ urbp = list_entry(qh->queue.next, struct urb_priv, node);
+
+ /* When starting with the first URB, if the QH element pointer is
+ * still valid then we know the URB's toggles are okay. */
+ else if (qh_element(qh) != UHCI_PTR_TERM)
+ toggle = 2;
+
+ /* Fix up the toggle for the URBs in the queue. Normally this
+ * loop won't run more than once: When an error or short transfer
+ * occurs, the queue usually gets emptied. */
+ urbp = list_prepare_entry(urbp, &qh->queue, node);
+ list_for_each_entry_continue(urbp, &qh->queue, node) {
+
+ /* If the first TD has the right toggle value, we don't
+ * need to change any toggles in this URB */
+ td = list_entry(urbp->td_list.next, struct uhci_td, list);
+ if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
+ td = list_entry(urbp->td_list.prev, struct uhci_td,
+ list);
+ toggle = uhci_toggle(td_token(td)) ^ 1;
+
+ /* Otherwise all the toggles in the URB have to be switched */
+ } else {
+ list_for_each_entry(td, &urbp->td_list, list) {
+ td->token ^= __constant_cpu_to_le32(
+ TD_TOKEN_TOGGLE);
+ toggle ^= 1;
+ }
+ }
+ }
+
+ wmb();
+ pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
+ usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
+ usb_pipeout(pipe), toggle);
+ qh->needs_fixup = 0;
+}
+
+/*
+ * Link an Isochronous QH into its skeleton's list
+ */
+static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
+
+ /* Isochronous QHs aren't linked by the hardware */
+}
+
+/*
+ * Link a high-period interrupt QH into the schedule at the end of its
+ * skeleton's list
+ */
+static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct uhci_qh *pqh;
+
+ list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
+
+ pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+ qh->link = pqh->link;
+ wmb();
+ pqh->link = LINK_TO_QH(qh);
+}
+
+/*
+ * Link a period-1 interrupt or async QH into the schedule at the
+ * correct spot in the async skeleton's list, and update the FSBR link
+ */
+static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct uhci_qh *pqh;
+ __le32 link_to_new_qh;
+
+ /* Find the predecessor QH for our new one and insert it in the list.
+ * The list of QHs is expected to be short, so linear search won't
+ * take too long. */
+ list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
+ if (pqh->skel <= qh->skel)
+ break;
+ }
+ list_add(&qh->node, &pqh->node);
+
+ /* Link it into the schedule */
+ qh->link = pqh->link;
+ wmb();
+ link_to_new_qh = LINK_TO_QH(qh);
+ pqh->link = link_to_new_qh;
+
+ /* If this is now the first FSBR QH, link the terminating skeleton
+ * QH to it. */
+ if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
+ uhci->skel_term_qh->link = link_to_new_qh;
+}
+
+/*
+ * Put a QH on the schedule in both hardware and software
+ */
+static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ WARN_ON(list_empty(&qh->queue));
+
+ /* Set the element pointer if it isn't set already.
+ * This isn't needed for Isochronous queues, but it doesn't hurt. */
+ if (qh_element(qh) == UHCI_PTR_TERM) {
+ struct urb_priv *urbp = list_entry(qh->queue.next,
+ struct urb_priv, node);
+ struct uhci_td *td = list_entry(urbp->td_list.next,
+ struct uhci_td, list);
+
+ qh->element = LINK_TO_TD(td);
+ }
+
+ /* Treat the queue as if it has just advanced */
+ qh->wait_expired = 0;
+ qh->advance_jiffies = jiffies;
+
+ if (qh->state == QH_STATE_ACTIVE)
+ return;
+ qh->state = QH_STATE_ACTIVE;
+
+ /* Move the QH from its old list to the correct spot in the appropriate
+ * skeleton's list */
+ if (qh == uhci->next_qh)
+ uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
+ node);
+ list_del(&qh->node);
+
+ if (qh->skel == SKEL_ISO)
+ link_iso(uhci, qh);
+ else if (qh->skel < SKEL_ASYNC)
+ link_interrupt(uhci, qh);
+ else
+ link_async(uhci, qh);
+}
+
+/*
+ * Unlink a high-period interrupt QH from the schedule
+ */
+static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct uhci_qh *pqh;
+
+ pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+ pqh->link = qh->link;
+ mb();
+}
+
+/*
+ * Unlink a period-1 interrupt or async QH from the schedule
+ */
+static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct uhci_qh *pqh;
+ __le32 link_to_next_qh = qh->link;
+
+ pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+ pqh->link = link_to_next_qh;
+
+ /* If this was the old first FSBR QH, link the terminating skeleton
+ * QH to the next (new first FSBR) QH. */
+ if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
+ uhci->skel_term_qh->link = link_to_next_qh;
+ mb();
+}
+
+/*
+ * Take a QH off the hardware schedule
+ */
+static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ if (qh->state == QH_STATE_UNLINKING)
+ return;
+ WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
+ qh->state = QH_STATE_UNLINKING;
+
+ /* Unlink the QH from the schedule and record when we did it */
+ if (qh->skel == SKEL_ISO)
+ ;
+ else if (qh->skel < SKEL_ASYNC)
+ unlink_interrupt(uhci, qh);
+ else
+ unlink_async(uhci, qh);
+
+ uhci_get_current_frame_number(uhci);
+ qh->unlink_frame = uhci->frame_number;
+
+ /* Force an interrupt so we know when the QH is fully unlinked */
+ if (list_empty(&uhci->skel_unlink_qh->node))
+ uhci_set_next_interrupt(uhci);
+
+ /* Move the QH from its old list to the end of the unlinking list */
+ if (qh == uhci->next_qh)
+ uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
+ node);
+ list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
+}
+
+/*
+ * When we and the controller are through with a QH, it becomes IDLE.
+ * This happens when a QH has been off the schedule (on the unlinking
+ * list) for more than one frame, or when an error occurs while adding
+ * the first URB onto a new QH.
+ */
+static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ WARN_ON(qh->state == QH_STATE_ACTIVE);
+
+ if (qh == uhci->next_qh)
+ uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
+ node);
+ list_move(&qh->node, &uhci->idle_qh_list);
+//yriver
+// qh->state = QH_STATE_IDLE;
+ qh->state = UHCI_QH_STATE_IDLE;
+
+ /* Now that the QH is idle, its post_td isn't being used */
+ if (qh->post_td) {
+ uhci_free_td(uhci, qh->post_td);
+ qh->post_td = NULL;
+ }
+
+ /* If anyone is waiting for a QH to become idle, wake them up */
+ if (uhci->num_waiting)
+ wake_up_all(&uhci->waitqh);
+}
+
+/*
+ * Find the highest existing bandwidth load for a given phase and period.
+ */
+static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
+{
+ int highest_load = uhci->load[phase];
+
+ for (phase += period; phase < MAX_PHASE; phase += period)
+ highest_load = max_t(int, highest_load, uhci->load[phase]);
+ return highest_load;
+}
+
+/*
+ * Set qh->phase to the optimal phase for a periodic transfer and
+ * check whether the bandwidth requirement is acceptable.
+ */
+static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ int minimax_load;
+
+ /* Find the optimal phase (unless it is already set) and get
+ * its load value. */
+ if (qh->phase >= 0)
+ minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
+ else {
+ int phase, load;
+ int max_phase = min_t(int, MAX_PHASE, qh->period);
+
+ qh->phase = 0;
+ minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
+ for (phase = 1; phase < max_phase; ++phase) {
+ load = uhci_highest_load(uhci, phase, qh->period);
+ if (load < minimax_load) {
+ minimax_load = load;
+ qh->phase = phase;
+ }
+ }
+ }
+
+ /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
+ if (minimax_load + qh->load > 900) {
+ dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
+ "period %d, phase %d, %d + %d us\n",
+ qh->period, qh->phase, minimax_load, qh->load);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+/*
+ * Reserve a periodic QH's bandwidth in the schedule
+ */
+static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ int i;
+ int load = qh->load;
+ char *p = "??";
+
+ for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
+ uhci->load[i] += load;
+ uhci->total_load += load;
+ }
+ uhci_to_hcd(uhci)->self.bandwidth_allocated =
+ uhci->total_load / MAX_PHASE;
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
+ p = "INT";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
+ p = "ISO";
+ break;
+ }
+ qh->bandwidth_reserved = 1;
+ dev_dbg(uhci_dev(uhci),
+ "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
+ "reserve", qh->udev->devnum,
+ qh->hep->desc.bEndpointAddress, p,
+ qh->period, qh->phase, load);
+}
+
+/*
+ * Release a periodic QH's bandwidth reservation
+ */
+static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ int i;
+ int load = qh->load;
+ char *p = "??";
+
+ for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
+ uhci->load[i] -= load;
+ uhci->total_load -= load;
+ }
+ uhci_to_hcd(uhci)->self.bandwidth_allocated =
+ uhci->total_load / MAX_PHASE;
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
+ p = "INT";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
+ p = "ISO";
+ break;
+ }
+ qh->bandwidth_reserved = 0;
+ dev_dbg(uhci_dev(uhci),
+ "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
+ "release", qh->udev->devnum,
+ qh->hep->desc.bEndpointAddress, p,
+ qh->period, qh->phase, load);
+}
+
+static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
+ struct urb *urb)
+{
+ struct urb_priv *urbp;
+
+ urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
+ if (!urbp)
+ return NULL;
+
+ urbp->urb = urb;
+ urb->hcpriv = urbp;
+
+ INIT_LIST_HEAD(&urbp->node);
+ INIT_LIST_HEAD(&urbp->td_list);
+
+ return urbp;
+}
+
+static void uhci_free_urb_priv(struct uhci_hcd *uhci,
+ struct urb_priv *urbp)
+{
+ struct uhci_td *td, *tmp;
+
+ if (!list_empty(&urbp->node))
+ dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
+ urbp->urb);
+
+ list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
+ uhci_remove_td_from_urbp(td);
+ uhci_free_td(uhci, td);
+ }
+
+ kmem_cache_free(uhci_up_cachep, urbp);
+}
+
+/*
+ * Map status to standard result codes
+ *
+ * <status> is (td_status(td) & 0xF60000), a.k.a.
+ * uhci_status_bits(td_status(td)).
+ * Note: <status> does not include the TD_CTRL_NAK bit.
+ * <dir_out> is True for output TDs and False for input TDs.
+ */
+static int uhci_map_status(int status, int dir_out)
+{
+ if (!status)
+ return 0;
+ if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
+ return -EPROTO;
+ if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
+ if (dir_out)
+ return -EPROTO;
+ else
+ return -EILSEQ;
+ }
+ if (status & TD_CTRL_BABBLE) /* Babble */
+ return -EOVERFLOW;
+ if (status & TD_CTRL_DBUFERR) /* Buffer error */
+ return -ENOSR;
+ if (status & TD_CTRL_STALLED) /* Stalled */
+ return -EPIPE;
+ return 0;
+}
+
+/*
+ * Control transfers
+ */
+static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
+ struct uhci_qh *qh)
+{
+ struct uhci_td *td;
+ unsigned long destination, status;
+ int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int len = urb->transfer_buffer_length;
+ dma_addr_t data = urb->transfer_dma;
+ __le32 *plink;
+ struct urb_priv *urbp = urb->hcpriv;
+ int skel;
+
+ /* The "pipe" thing contains the destination in bits 8--18 */
+ destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
+
+ /* 3 errors, dummy TD remains inactive */
+ status = uhci_maxerr(3);
+ if (urb->dev->speed == USB_SPEED_LOW)
+ status |= TD_CTRL_LS;
+
+ /*
+ * Build the TD for the control request setup packet
+ */
+ td = qh->dummy_td;
+ uhci_add_td_to_urbp(td, urbp);
+ uhci_fill_td(td, status, destination | uhci_explen(8),
+ urb->setup_dma);
+ plink = &td->link;
+ status |= TD_CTRL_ACTIVE;
+
+ /*
+ * If direction is "send", change the packet ID from SETUP (0x2D)
+ * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
+ * set Short Packet Detect (SPD) for all data packets.
+ *
+ * 0-length transfers always get treated as "send".
+ */
+ if (usb_pipeout(urb->pipe) || len == 0)
+ destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
+ else {
+ destination ^= (USB_PID_SETUP ^ USB_PID_IN);
+ status |= TD_CTRL_SPD;
+ }
+
+ /*
+ * Build the DATA TDs
+ */
+ while (len > 0) {
+ int pktsze = maxsze;
+
+ if (len <= pktsze) { /* The last data packet */
+ pktsze = len;
+ status &= ~TD_CTRL_SPD;
+ }
+
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ goto nomem;
+ *plink = LINK_TO_TD(td);
+
+ /* Alternate Data0/1 (start with Data1) */
+ destination ^= TD_TOKEN_TOGGLE;
+
+ uhci_add_td_to_urbp(td, urbp);
+ uhci_fill_td(td, status, destination | uhci_explen(pktsze),
+ data);
+ plink = &td->link;
+
+ data += pktsze;
+ len -= pktsze;
+ }
+
+ /*
+ * Build the final TD for control status
+ */
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ goto nomem;
+ *plink = LINK_TO_TD(td);
+
+ /* Change direction for the status transaction */
+ destination ^= (USB_PID_IN ^ USB_PID_OUT);
+ destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
+
+ uhci_add_td_to_urbp(td, urbp);
+ uhci_fill_td(td, status | TD_CTRL_IOC,
+ destination | uhci_explen(0), 0);
+ plink = &td->link;
+
+ /*
+ * Build the new dummy TD and activate the old one
+ */
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ goto nomem;
+ *plink = LINK_TO_TD(td);
+
+ uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
+ wmb();
+ qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
+ qh->dummy_td = td;
+
+ /* Low-speed transfers get a different queue, and won't hog the bus.
+ * Also, some devices enumerate better without FSBR; the easiest way
+ * to do that is to put URBs on the low-speed queue while the device
+ * isn't in the CONFIGURED state. */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->state != USB_STATE_CONFIGURED)
+ skel = SKEL_LS_CONTROL;
+ else {
+ skel = SKEL_FS_CONTROL;
+ uhci_add_fsbr(uhci, urb);
+ }
+ if (qh->state != QH_STATE_ACTIVE)
+ qh->skel = skel;
+
+ urb->actual_length = -8; /* Account for the SETUP packet */
+ return 0;
+
+nomem:
+ /* Remove the dummy TD from the td_list so it doesn't get freed */
+ uhci_remove_td_from_urbp(qh->dummy_td);
+ return -ENOMEM;
+}
+
+/*
+ * Common submit for bulk and interrupt
+ */
+static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
+ struct uhci_qh *qh)
+{
+ struct uhci_td *td;
+ unsigned long destination, status;
+ int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int len = urb->transfer_buffer_length;
+ dma_addr_t data = urb->transfer_dma;
+ __le32 *plink;
+ struct urb_priv *urbp = urb->hcpriv;
+ unsigned int toggle;
+
+ if (len < 0)
+ return -EINVAL;
+
+ /* The "pipe" thing contains the destination in bits 8--18 */
+ destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
+ toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+
+ /* 3 errors, dummy TD remains inactive */
+ status = uhci_maxerr(3);
+ if (urb->dev->speed == USB_SPEED_LOW)
+ status |= TD_CTRL_LS;
+ if (usb_pipein(urb->pipe))
+ status |= TD_CTRL_SPD;
+
+ /*
+ * Build the DATA TDs
+ */
+ plink = NULL;
+ td = qh->dummy_td;
+ do { /* Allow zero length packets */
+ int pktsze = maxsze;
+
+ if (len <= pktsze) { /* The last packet */
+ pktsze = len;
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+ status &= ~TD_CTRL_SPD;
+ }
+
+ if (plink) {
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ goto nomem;
+ *plink = LINK_TO_TD(td);
+ }
+ uhci_add_td_to_urbp(td, urbp);
+ uhci_fill_td(td, status,
+ destination | uhci_explen(pktsze) |
+ (toggle << TD_TOKEN_TOGGLE_SHIFT),
+ data);
+ plink = &td->link;
+ status |= TD_CTRL_ACTIVE;
+
+ data += pktsze;
+ len -= maxsze;
+ toggle ^= 1;
+ } while (len > 0);
+
+ /*
+ * URB_ZERO_PACKET means adding a 0-length packet, if direction
+ * is OUT and the transfer_length was an exact multiple of maxsze,
+ * hence (len = transfer_length - N * maxsze) == 0
+ * however, if transfer_length == 0, the zero packet was already
+ * prepared above.
+ */
+ if ((urb->transfer_flags & URB_ZERO_PACKET) &&
+ usb_pipeout(urb->pipe) && len == 0 &&
+ urb->transfer_buffer_length > 0) {
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ goto nomem;
+ *plink = LINK_TO_TD(td);
+
+ uhci_add_td_to_urbp(td, urbp);
+ uhci_fill_td(td, status,
+ destination | uhci_explen(0) |
+ (toggle << TD_TOKEN_TOGGLE_SHIFT),
+ data);
+ plink = &td->link;
+
+ toggle ^= 1;
+ }
+
+ /* Set the interrupt-on-completion flag on the last packet.
+ * A more-or-less typical 4 KB URB (= size of one memory page)
+ * will require about 3 ms to transfer; that's a little on the
+ * fast side but not enough to justify delaying an interrupt
+ * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
+ * flag setting. */
+ td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
+
+ /*
+ * Build the new dummy TD and activate the old one
+ */
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ goto nomem;
+ *plink = LINK_TO_TD(td);
+
+ uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
+ wmb();
+ qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
+ qh->dummy_td = td;
+
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe), toggle);
+ return 0;
+
+nomem:
+ /* Remove the dummy TD from the td_list so it doesn't get freed */
+ uhci_remove_td_from_urbp(qh->dummy_td);
+ return -ENOMEM;
+}
+
+static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
+ struct uhci_qh *qh)
+{
+ int ret;
+
+ /* Can't have low-speed bulk transfers */
+ if (urb->dev->speed == USB_SPEED_LOW)
+ return -EINVAL;
+
+ if (qh->state != QH_STATE_ACTIVE)
+ qh->skel = SKEL_BULK;
+ ret = uhci_submit_common(uhci, urb, qh);
+ if (ret == 0)
+ uhci_add_fsbr(uhci, urb);
+ return ret;
+}
+
+static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
+ struct uhci_qh *qh)
+{
+ int ret;
+
+ /* USB 1.1 interrupt transfers only involve one packet per interval.
+ * Drivers can submit URBs of any length, but longer ones will need
+ * multiple intervals to complete.
+ */
+
+ if (!qh->bandwidth_reserved) {
+ int exponent;
+
+ /* Figure out which power-of-two queue to use */
+ for (exponent = 7; exponent >= 0; --exponent) {
+ if ((1 << exponent) <= urb->interval)
+ break;
+ }
+ if (exponent < 0)
+ return -EINVAL;
+
+ /* If the slot is full, try a lower period */
+ do {
+ qh->period = 1 << exponent;
+ qh->skel = SKEL_INDEX(exponent);
+
+ /* For now, interrupt phase is fixed by the layout
+ * of the QH lists.
+ */
+ qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
+ ret = uhci_check_bandwidth(uhci, qh);
+ } while (ret != 0 && --exponent >= 0);
+ if (ret)
+ return ret;
+ } else if (qh->period > urb->interval)
+ return -EINVAL; /* Can't decrease the period */
+
+ ret = uhci_submit_common(uhci, urb, qh);
+ if (ret == 0) {
+ urb->interval = qh->period;
+ if (!qh->bandwidth_reserved)
+ uhci_reserve_bandwidth(uhci, qh);
+ }
+ return ret;
+}
+
+/*
+ * Fix up the data structures following a short transfer
+ */
+static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
+ struct uhci_qh *qh, struct urb_priv *urbp)
+{
+ struct uhci_td *td;
+ struct list_head *tmp;
+ int ret;
+
+ td = list_entry(urbp->td_list.prev, struct uhci_td, list);
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+
+ /* When a control transfer is short, we have to restart
+ * the queue at the status stage transaction, which is
+ * the last TD. */
+ WARN_ON(list_empty(&urbp->td_list));
+ qh->element = LINK_TO_TD(td);
+ tmp = td->list.prev;
+ ret = -EINPROGRESS;
+
+ } else {
+
+ /* When a bulk/interrupt transfer is short, we have to
+ * fix up the toggles of the following URBs on the queue
+ * before restarting the queue at the next URB. */
+ qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
+ uhci_fixup_toggles(qh, 1);
+
+ if (list_empty(&urbp->td_list))
+ td = qh->post_td;
+ qh->element = td->link;
+ tmp = urbp->td_list.prev;
+ ret = 0;
+ }
+
+ /* Remove all the TDs we skipped over, from tmp back to the start */
+ while (tmp != &urbp->td_list) {
+ td = list_entry(tmp, struct uhci_td, list);
+ tmp = tmp->prev;
+
+ uhci_remove_td_from_urbp(td);
+ uhci_free_td(uhci, td);
+ }
+ return ret;
+}
+
+/*
+ * Common result for control, bulk, and interrupt
+ */
+static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
+{
+ struct urb_priv *urbp = urb->hcpriv;
+ struct uhci_qh *qh = urbp->qh;
+ struct uhci_td *td, *tmp;
+ unsigned status;
+ int ret = 0;
+
+ list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
+ unsigned int ctrlstat;
+ int len;
+
+ ctrlstat = td_status(td);
+ status = uhci_status_bits(ctrlstat);
+ if (status & TD_CTRL_ACTIVE)
+ return -EINPROGRESS;
+
+ len = uhci_actual_length(ctrlstat);
+ urb->actual_length += len;
+
+ if (status) {
+ ret = uhci_map_status(status,
+ uhci_packetout(td_token(td)));
+ if ((debug == 1 && ret != -EPIPE) || debug > 1) {
+ /* Some debugging code */
+ dev_dbg(&urb->dev->dev,
+ "%s: failed with status %x\n",
+ __func__, status);
+
+ if (debug > 1 && errbuf) {
+ /* Print the chain for debugging */
+ uhci_show_qh(uhci, urbp->qh, errbuf,
+ ERRBUF_LEN, 0);
+ lprintk(errbuf);
+ }
+ }
+
+ /* Did we receive a short packet? */
+ } else if (len < uhci_expected_length(td_token(td))) {
+
+ /* For control transfers, go to the status TD if
+ * this isn't already the last data TD */
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (td->list.next != urbp->td_list.prev)
+ ret = 1;
+ }
+
+ /* For bulk and interrupt, this may be an error */
+ else if (urb->transfer_flags & URB_SHORT_NOT_OK)
+ ret = -EREMOTEIO;
+
+ /* Fixup needed only if this isn't the URB's last TD */
+ else if (&td->list != urbp->td_list.prev)
+ ret = 1;
+ }
+
+ uhci_remove_td_from_urbp(td);
+ if (qh->post_td)
+ uhci_free_td(uhci, qh->post_td);
+ qh->post_td = td;
+
+ if (ret != 0)
+ goto err;
+ }
+ return ret;
+
+err:
+ if (ret < 0) {
+ /* Note that the queue has stopped and save
+ * the next toggle value */
+ qh->element = UHCI_PTR_TERM;
+ qh->is_stopped = 1;
+ qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
+ qh->initial_toggle = uhci_toggle(td_token(td)) ^
+ (ret == -EREMOTEIO);
+
+ } else /* Short packet received */
+ ret = uhci_fixup_short_transfer(uhci, qh, urbp);
+ return ret;
+}
+
+/*
+ * Isochronous transfers
+ */
+static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
+ struct uhci_qh *qh)
+{
+ struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
+ int i, frame;
+ unsigned long destination, status;
+ struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+
+ /* Values must not be too big (could overflow below) */
+ if (urb->interval >= UHCI_NUMFRAMES ||
+ urb->number_of_packets >= UHCI_NUMFRAMES)
+ return -EFBIG;
+
+ /* Check the period and figure out the starting frame number */
+ if (!qh->bandwidth_reserved) {
+ qh->period = urb->interval;
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ qh->phase = -1; /* Find the best phase */
+ i = uhci_check_bandwidth(uhci, qh);
+ if (i)
+ return i;
+
+ /* Allow a little time to allocate the TDs */
+ uhci_get_current_frame_number(uhci);
+ frame = uhci->frame_number + 10;
+
+ /* Move forward to the first frame having the
+ * correct phase */
+ urb->start_frame = frame + ((qh->phase - frame) &
+ (qh->period - 1));
+ } else {
+ i = urb->start_frame - uhci->last_iso_frame;
+ if (i <= 0 || i >= UHCI_NUMFRAMES)
+ return -EINVAL;
+ qh->phase = urb->start_frame & (qh->period - 1);
+ i = uhci_check_bandwidth(uhci, qh);
+ if (i)
+ return i;
+ }
+
+ } else if (qh->period != urb->interval) {
+ return -EINVAL; /* Can't change the period */
+
+ } else {
+ /* Find the next unused frame */
+ if (list_empty(&qh->queue)) {
+ frame = qh->iso_frame;
+ } else {
+ struct urb *lurb;
+
+ lurb = list_entry(qh->queue.prev,
+ struct urb_priv, node)->urb;
+ frame = lurb->start_frame +
+ lurb->number_of_packets *
+ lurb->interval;
+ }
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ /* Skip some frames if necessary to insure
+ * the start frame is in the future.
+ */
+ uhci_get_current_frame_number(uhci);
+ if (uhci_frame_before_eq(frame, uhci->frame_number)) {
+ frame = uhci->frame_number + 1;
+ frame += ((qh->phase - frame) &
+ (qh->period - 1));
+ }
+ } /* Otherwise pick up where the last URB leaves off */
+ urb->start_frame = frame;
+ }
+
+ /* Make sure we won't have to go too far into the future */
+ if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
+ urb->start_frame + urb->number_of_packets *
+ urb->interval))
+ return -EFBIG;
+
+ status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
+ destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ td = uhci_alloc_td(uhci);
+ if (!td)
+ return -ENOMEM;
+
+ uhci_add_td_to_urbp(td, urbp);
+ uhci_fill_td(td, status, destination |
+ uhci_explen(urb->iso_frame_desc[i].length),
+ urb->transfer_dma +
+ urb->iso_frame_desc[i].offset);
+ }
+
+ /* Set the interrupt-on-completion flag on the last packet. */
+ td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
+
+ /* Add the TDs to the frame list */
+ frame = urb->start_frame;
+ list_for_each_entry(td, &urbp->td_list, list) {
+ uhci_insert_td_in_frame_list(uhci, td, frame);
+ frame += qh->period;
+ }
+
+ if (list_empty(&qh->queue)) {
+ qh->iso_packet_desc = &urb->iso_frame_desc[0];
+ qh->iso_frame = urb->start_frame;
+ }
+
+ qh->skel = SKEL_ISO;
+ if (!qh->bandwidth_reserved)
+ uhci_reserve_bandwidth(uhci, qh);
+ return 0;
+}
+
+static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
+{
+ struct uhci_td *td, *tmp;
+ struct urb_priv *urbp = urb->hcpriv;
+ struct uhci_qh *qh = urbp->qh;
+
+ list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
+ unsigned int ctrlstat;
+ int status;
+ int actlength;
+
+ if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
+ return -EINPROGRESS;
+
+ uhci_remove_tds_from_frame(uhci, qh->iso_frame);
+
+ ctrlstat = td_status(td);
+ if (ctrlstat & TD_CTRL_ACTIVE) {
+ status = -EXDEV; /* TD was added too late? */
+ } else {
+ status = uhci_map_status(uhci_status_bits(ctrlstat),
+ usb_pipeout(urb->pipe));
+ actlength = uhci_actual_length(ctrlstat);
+
+ urb->actual_length += actlength;
+ qh->iso_packet_desc->actual_length = actlength;
+ qh->iso_packet_desc->status = status;
+ }
+ if (status)
+ urb->error_count++;
+
+ uhci_remove_td_from_urbp(td);
+ uhci_free_td(uhci, td);
+ qh->iso_frame += qh->period;
+ ++qh->iso_packet_desc;
+ }
+ return 0;
+}
+
+static int uhci_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags)
+{
+ int ret;
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ unsigned long flags;
+ struct urb_priv *urbp;
+ struct uhci_qh *qh;
+
+ spin_lock_irqsave(&uhci->lock, flags);
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto done_not_linked;
+
+ ret = -ENOMEM;
+ urbp = uhci_alloc_urb_priv(uhci, urb);
+ if (!urbp)
+ goto done;
+
+ if (urb->ep->hcpriv)
+ qh = urb->ep->hcpriv;
+ else {
+ qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
+ if (!qh)
+ goto err_no_qh;
+ }
+ urbp->qh = qh;
+
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ret = uhci_submit_control(uhci, urb, qh);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ret = uhci_submit_bulk(uhci, urb, qh);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ ret = uhci_submit_interrupt(uhci, urb, qh);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ urb->error_count = 0;
+ ret = uhci_submit_isochronous(uhci, urb, qh);
+ break;
+ }
+ if (ret != 0)
+ goto err_submit_failed;
+
+ /* Add this URB to the QH */
+ urbp->qh = qh;
+ list_add_tail(&urbp->node, &qh->queue);
+
+ /* If the new URB is the first and only one on this QH then either
+ * the QH is new and idle or else it's unlinked and waiting to
+ * become idle, so we can activate it right away. But only if the
+ * queue isn't stopped. */
+ if (qh->queue.next == &urbp->node && !qh->is_stopped) {
+ uhci_activate_qh(uhci, qh);
+ uhci_urbp_wants_fsbr(uhci, urbp);
+ }
+ goto done;
+
+err_submit_failed:
+//yriver
+// if (qh->state == QH_STATE_IDLE)
+ if (qh->state == UHCI_QH_STATE_IDLE)
+ uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
+err_no_qh:
+ uhci_free_urb_priv(uhci, urbp);
+done:
+ if (ret)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+done_not_linked:
+ spin_unlock_irqrestore(&uhci->lock, flags);
+ return ret;
+}
+
+static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ unsigned long flags;
+ struct uhci_qh *qh;
+ int rc;
+
+ spin_lock_irqsave(&uhci->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
+ qh = ((struct urb_priv *) urb->hcpriv)->qh;
+
+ /* Remove Isochronous TDs from the frame list ASAP */
+ if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+ uhci_unlink_isochronous_tds(uhci, urb);
+ mb();
+
+ /* If the URB has already started, update the QH unlink time */
+ uhci_get_current_frame_number(uhci);
+ if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
+ qh->unlink_frame = uhci->frame_number;
+ }
+
+ uhci_unlink_qh(uhci, qh);
+
+done:
+ spin_unlock_irqrestore(&uhci->lock, flags);
+ return rc;
+}
+
+/*
+ * Finish unlinking an URB and give it back
+ */
+static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
+ struct urb *urb, int status)
+__releases(uhci->lock)
+__acquires(uhci->lock)
+{
+ struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+
+ /* urb->actual_length < 0 means the setup transaction didn't
+ * complete successfully. Either it failed or the URB was
+ * unlinked first. Regardless, don't confuse people with a
+ * negative length. */
+ urb->actual_length = max(urb->actual_length, 0);
+ }
+
+ /* When giving back the first URB in an Isochronous queue,
+ * reinitialize the QH's iso-related members for the next URB. */
+ else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
+ urbp->node.prev == &qh->queue &&
+ urbp->node.next != &qh->queue) {
+ struct urb *nurb = list_entry(urbp->node.next,
+ struct urb_priv, node)->urb;
+
+ qh->iso_packet_desc = &nurb->iso_frame_desc[0];
+ qh->iso_frame = nurb->start_frame;
+ }
+
+ /* Take the URB off the QH's queue. If the queue is now empty,
+ * this is a perfect time for a toggle fixup. */
+ list_del_init(&urbp->node);
+ if (list_empty(&qh->queue) && qh->needs_fixup) {
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe), qh->initial_toggle);
+ qh->needs_fixup = 0;
+ }
+
+ uhci_free_urb_priv(uhci, urbp);
+ usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
+
+ spin_unlock(&uhci->lock);
+ usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
+ spin_lock(&uhci->lock);
+
+ /* If the queue is now empty, we can unlink the QH and give up its
+ * reserved bandwidth. */
+ if (list_empty(&qh->queue)) {
+ uhci_unlink_qh(uhci, qh);
+ if (qh->bandwidth_reserved)
+ uhci_release_bandwidth(uhci, qh);
+ }
+}
+
+/*
+ * Scan the URBs in a QH's queue
+ */
+#define QH_FINISHED_UNLINKING(qh) \
+ (qh->state == QH_STATE_UNLINKING && \
+ uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
+
+static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct urb_priv *urbp;
+ struct urb *urb;
+ int status;
+
+ while (!list_empty(&qh->queue)) {
+ urbp = list_entry(qh->queue.next, struct urb_priv, node);
+ urb = urbp->urb;
+
+ if (qh->type == USB_ENDPOINT_XFER_ISOC)
+ status = uhci_result_isochronous(uhci, urb);
+ else
+ status = uhci_result_common(uhci, urb);
+ if (status == -EINPROGRESS)
+ break;
+
+ /* Dequeued but completed URBs can't be given back unless
+ * the QH is stopped or has finished unlinking. */
+ if (urb->unlinked) {
+ if (QH_FINISHED_UNLINKING(qh))
+ qh->is_stopped = 1;
+ else if (!qh->is_stopped)
+ return;
+ }
+
+ uhci_giveback_urb(uhci, qh, urb, status);
+ if (status < 0)
+ break;
+ }
+
+ /* If the QH is neither stopped nor finished unlinking (normal case),
+ * our work here is done. */
+ if (QH_FINISHED_UNLINKING(qh))
+ qh->is_stopped = 1;
+ else if (!qh->is_stopped)
+ return;
+
+ /* Otherwise give back each of the dequeued URBs */
+restart:
+ list_for_each_entry(urbp, &qh->queue, node) {
+ urb = urbp->urb;
+ if (urb->unlinked) {
+
+ /* Fix up the TD links and save the toggles for
+ * non-Isochronous queues. For Isochronous queues,
+ * test for too-recent dequeues. */
+ if (!uhci_cleanup_queue(uhci, qh, urb)) {
+ qh->is_stopped = 0;
+ return;
+ }
+ uhci_giveback_urb(uhci, qh, urb, 0);
+ goto restart;
+ }
+ }
+ qh->is_stopped = 0;
+
+ /* There are no more dequeued URBs. If there are still URBs on the
+ * queue, the QH can now be re-activated. */
+ if (!list_empty(&qh->queue)) {
+ if (qh->needs_fixup)
+ uhci_fixup_toggles(qh, 0);
+
+ /* If the first URB on the queue wants FSBR but its time
+ * limit has expired, set the next TD to interrupt on
+ * completion before reactivating the QH. */
+ urbp = list_entry(qh->queue.next, struct urb_priv, node);
+ if (urbp->fsbr && qh->wait_expired) {
+ struct uhci_td *td = list_entry(urbp->td_list.next,
+ struct uhci_td, list);
+
+ td->status |= __cpu_to_le32(TD_CTRL_IOC);
+ }
+
+ uhci_activate_qh(uhci, qh);
+ }
+
+ /* The queue is empty. The QH can become idle if it is fully
+ * unlinked. */
+ else if (QH_FINISHED_UNLINKING(qh))
+ uhci_make_qh_idle(uhci, qh);
+}
+
+/*
+ * Check for queues that have made some forward progress.
+ * Returns 0 if the queue is not Isochronous, is ACTIVE, and
+ * has not advanced since last examined; 1 otherwise.
+ *
+ * Early Intel controllers have a bug which causes qh->element sometimes
+ * not to advance when a TD completes successfully. The queue remains
+ * stuck on the inactive completed TD. We detect such cases and advance
+ * the element pointer by hand.
+ */
+static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct urb_priv *urbp = NULL;
+ struct uhci_td *td;
+ int ret = 1;
+ unsigned status;
+
+ if (qh->type == USB_ENDPOINT_XFER_ISOC)
+ goto done;
+
+ /* Treat an UNLINKING queue as though it hasn't advanced.
+ * This is okay because reactivation will treat it as though
+ * it has advanced, and if it is going to become IDLE then
+ * this doesn't matter anyway. Furthermore it's possible
+ * for an UNLINKING queue not to have any URBs at all, or
+ * for its first URB not to have any TDs (if it was dequeued
+ * just as it completed). So it's not easy in any case to
+ * test whether such queues have advanced. */
+ if (qh->state != QH_STATE_ACTIVE) {
+ urbp = NULL;
+ status = 0;
+
+ } else {
+ urbp = list_entry(qh->queue.next, struct urb_priv, node);
+ td = list_entry(urbp->td_list.next, struct uhci_td, list);
+ status = td_status(td);
+ if (!(status & TD_CTRL_ACTIVE)) {
+
+ /* We're okay, the queue has advanced */
+ qh->wait_expired = 0;
+ qh->advance_jiffies = jiffies;
+ goto done;
+ }
+ ret = 0;
+ }
+
+ /* The queue hasn't advanced; check for timeout */
+ if (qh->wait_expired)
+ goto done;
+
+ if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
+
+ /* Detect the Intel bug and work around it */
+ if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
+ qh->element = qh->post_td->link;
+ qh->advance_jiffies = jiffies;
+ ret = 1;
+ goto done;
+ }
+
+ qh->wait_expired = 1;
+
+ /* If the current URB wants FSBR, unlink it temporarily
+ * so that we can safely set the next TD to interrupt on
+ * completion. That way we'll know as soon as the queue
+ * starts moving again. */
+ if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
+ uhci_unlink_qh(uhci, qh);
+
+ } else {
+ /* Unmoving but not-yet-expired queues keep FSBR alive */
+ if (urbp)
+ uhci_urbp_wants_fsbr(uhci, urbp);
+ }
+
+done:
+ return ret;
+}
+
+/*
+ * Process events in the schedule, but only in one thread at a time
+ */
+static void uhci_scan_schedule(struct uhci_hcd *uhci)
+{
+ int i;
+ struct uhci_qh *qh;
+
+ /* Don't allow re-entrant calls */
+ if (uhci->scan_in_progress) {
+ uhci->need_rescan = 1;
+ return;
+ }
+ uhci->scan_in_progress = 1;
+rescan:
+ uhci->need_rescan = 0;
+ uhci->fsbr_is_wanted = 0;
+
+ uhci_clear_next_interrupt(uhci);
+ uhci_get_current_frame_number(uhci);
+ uhci->cur_iso_frame = uhci->frame_number;
+
+ /* Go through all the QH queues and process the URBs in each one */
+ for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
+ uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
+ struct uhci_qh, node);
+ while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
+ uhci->next_qh = list_entry(qh->node.next,
+ struct uhci_qh, node);
+
+ if (uhci_advance_check(uhci, qh)) {
+ uhci_scan_qh(uhci, qh);
+ if (qh->state == QH_STATE_ACTIVE) {
+ uhci_urbp_wants_fsbr(uhci,
+ list_entry(qh->queue.next, struct urb_priv, node));
+ }
+ }
+ }
+ }
+
+ uhci->last_iso_frame = uhci->cur_iso_frame;
+ if (uhci->need_rescan)
+ goto rescan;
+ uhci->scan_in_progress = 0;
+
+ if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
+ !uhci->fsbr_expiring) {
+ uhci->fsbr_expiring = 1;
+ mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
+ }
+
+ if (list_empty(&uhci->skel_unlink_qh->node))
+ uhci_clear_next_interrupt(uhci);
+ else
+ uhci_set_next_interrupt(uhci);
+}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index f3a75a929e0a..97a22b9cbb25 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -88,6 +88,13 @@ config USB_EHCI_FSL ---help--- Variation of ARC USB block used in some Freescale chips. +config USB_EHCI_AST + bool "Support for ASPEED SoC EHCI USB controller" + depends on USB_EHCI_HCD && ARCH_ASPEED +# select USB_EHCI_ROOT_HUB_TT + ---help--- + Variation of ARC USB block used in some ASPEED SoC chips. + config USB_EHCI_HCD_PPC_OF bool "EHCI support for PPC USB controller on OF platform bus" depends on USB_EHCI_HCD && PPC_OF diff --git a/drivers/usb/host/ehci-ast.c b/drivers/usb/host/ehci-ast.c new file mode 100644 index 000000000000..503df9a291ff --- /dev/null +++ b/drivers/usb/host/ehci-ast.c @@ -0,0 +1,297 @@ +/******************************************************************************** +* File Name : drivers/usb/host/ehci-aspeed.c +* Author : Ryan Chen +* Description : EHCI HCD (Host Controller Driver) for USB +* +* Copyright (C) ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* History : +* 1. 2012/08/17 ryan chen create this file +* +********************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <mach/hardware.h> + + +/* ASPEED EHCI USB Host Controller */ + +/*-------------------------------------------------------------------------*/ + +/* configure so an HC device and id are always provided */ +/* always called with process context; sleeping is OK */ + +static int ehci_ast_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + ehci->caps = hcd->regs; + ehci->regs = hcd->regs + + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + dbg_hcs_params(ehci, "reset"); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + +#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT + hcd->has_tt = 1; +#else + hcd->has_tt = 0; +#endif + + ehci->sbrn = 0x20; + +// retval = ehci_halt(ehci); +// if (retval) +// return retval; + + + /* + * data structure init + */ + retval = ehci_init(hcd); + if (retval) + return retval; + + ehci_reset(ehci); + ehci_port_power(ehci, 0); + + return retval; +} + +static const struct hc_driver ehci_ast_hc_driver = { + .description = hcd_name, + .product_desc = "ASPEED On-Chip EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_USB2, + /* + * basic lifecycle operations + */ + .reset = ehci_ast_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + .relinquish_port = ehci_relinquish_port, + .port_handed_over = ehci_port_handed_over, +}; + +static int ehci_ast_drv_probe(struct platform_device *pdev) +{ + struct resource *res; + struct usb_hcd *hcd; +// struct ehci_hcd *ehci; + void __iomem *regs; + int irq, err; + + if (usb_disabled()) + return -ENODEV; + + pr_debug("Initializing ASPEED-SoC USB Host Controller\n"); + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, + "Found HC with no IRQ. Check %s setup!\n", + dev_name(&pdev->dev)); + err = -ENODEV; + goto err1; + } + + //TODO +// IRQ_SET_HIGH_LEVEL (irq); +// IRQ_SET_LEVEL_TRIGGER (irq); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, + "Found HC with no register addr. Check %s setup!\n", + dev_name(&pdev->dev)); + err = -ENODEV; + goto err1; + } + + if (!request_mem_region(res->start, res->end - res->start + 1, + res->name)) { + dev_dbg(&pdev->dev, "controller already in use\n"); + err = -EBUSY; + goto err1; + } + + regs = ioremap_nocache(res->start, res->end - res->start + 1); + if (regs == NULL) { + dev_dbg(&pdev->dev, "error mapping memory\n"); + err = -EFAULT; + goto err2; + } + + hcd = usb_create_hcd(&ehci_ast_hc_driver, + &pdev->dev, dev_name(&pdev->dev)); + if (!hcd) { + err = -ENOMEM; + goto err3; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = res->end - res->start + 1; + hcd->regs = regs; + + err = usb_add_hcd(hcd, irq, IRQF_DISABLED); + if (err) + goto err4; + + return 0; + + err4: + usb_put_hcd(hcd); + err3: + iounmap(regs); + err2: + release_mem_region(res->start, res->end - res->start + 1); + err1: + dev_err(&pdev->dev, "init %s fail, %d\n", + dev_name(&pdev->dev), err); + + return err; + + +} + +static int ehci_ast_drv_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + + usb_remove_hcd(hcd); + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + + return 0; +} + + /*TBD*/ +#ifdef CONFIG_PM +static int ehci_hcd_ast_drv_suspend(struct platform_device *pdev, pm_message_t msg) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + unsigned long flags; + int rc = 0; + + if (time_before(jiffies, ehci->next_statechange)) + msleep(10); + + /* Root hub was already suspended. Disable irq emission and + * mark HW unaccessible, bail out if RH has been resumed. Use + * the spinlock to properly synchronize with possible pending + * RH suspend or resume activity. + * + * This is still racy as hcd->state is manipulated outside of + * any locks =P But that will be a different fix. + */ + spin_lock_irqsave (&ehci->lock, flags); + if (hcd->state != HC_STATE_SUSPENDED) { + rc = -EINVAL; + goto bail; + } + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + (void)ehci_readl(ehci, &ehci->regs->intr_enable); + + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + bail: + spin_unlock_irqrestore (&ehci->lock, flags); + + // could save FLADJ in case of Vaux power loss + // ... we'd only use it to handle clock skew + + return rc; +} +static int ehci_hcd_ast_drv_resume(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + // maybe restore FLADJ + + if (time_before(jiffies, ehci->next_statechange)) + msleep(100); + + /* Mark hardware accessible again as we are out of D3 state by now */ + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + + usb_root_hub_lost_power(hcd->self.root_hub); + + /* Else reset, to cope with power loss or flush-to-storage + * style "resume" having let BIOS kick in during reboot. + */ + (void) ehci_halt(ehci); + (void) ehci_reset(ehci); + + /* emptying the schedule aborts any urbs */ + spin_lock_irq(&ehci->lock); + if (ehci->reclaim) + end_unlink_async(ehci); + ehci_work(ehci); + spin_unlock_irq(&ehci->lock); + + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + + /* here we "know" root ports should always stay powered */ + ehci_port_power(ehci, 1); + + hcd->state = HC_STATE_SUSPENDED; + return 0; +} +#endif + +MODULE_ALIAS("platform:ehci_ast"); + +static struct platform_driver ehci_hcd_ast_driver = { + .probe = ehci_ast_drv_probe, + .remove = ehci_ast_drv_remove, + .shutdown = usb_hcd_platform_shutdown, +#ifdef CONFIG_PM + .suspend = ehci_hcd_ast_drv_suspend, + .resume = ehci_hcd_ast_drv_resume, +#endif + .driver = { + .name = "ehci-ast", + }, +}; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index e551bb38852b..a34a4cf7005a 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -581,6 +581,7 @@ static int ehci_run (struct usb_hcd *hcd) * Scsi_Host.highmem_io, and so forth. It's readonly to all * host side drivers though. */ + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_64BIT_ADDR(hcc_params)) { ehci_writel(ehci, 0, &ehci->regs->segment); @@ -1036,6 +1037,11 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ixp4xx_ehci_driver #endif +#ifdef CONFIG_USB_EHCI_AST +#include "ehci-ast.c" +#define PLATFORM_DRIVER ehci_hcd_ast_driver +#endif + #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) #error "missing bus glue for ehci-hcd" @@ -1117,7 +1123,10 @@ err_debug: clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); return retval; } -module_init(ehci_hcd_init); + +//ehci must after uhci driver module load. Ryan Modify +late_initcall(ehci_hcd_init); +//module_init(ehci_hcd_init); static void __exit ehci_hcd_cleanup(void) { diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 3f3ce13fef43..a8be29b23357 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1473,6 +1473,30 @@ config FB_SAVAGE_ACCEL the resulting framebuffer console has bothersome glitches, then choose N here. +menuconfig FB_AST + tristate "ASPEED Framebuffer Driver" + depends on FB + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + default n + +if FB_AST + +config AST_DAC + bool "CRT DAC output" + +config AST_DVO + bool "CRT DVO output" + +config HDMI_CAT6613 + bool "Enable CAT6613 HDMI TX" + depends on FB_AST && AST_DVO + help + This option will support CAT6613 HDMI TX driver + +endif + config FB_SIS tristate "SiS/XGI display support" depends on FB && PCI diff --git a/drivers/video/Makefile b/drivers/video/Makefile index e39e33e797da..97bc000f9bae 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -41,6 +41,8 @@ obj-$(CONFIG_FB_NVIDIA) += nvidia/ obj-$(CONFIG_FB_ATY) += aty/ macmodes.o obj-$(CONFIG_FB_ATY128) += aty/ macmodes.o obj-$(CONFIG_FB_RADEON) += aty/ +obj-$(CONFIG_FB_AST) += astfb.o +obj-$(CONFIG_HDMI_CAT6613) += hdmi_cat6613.o obj-$(CONFIG_FB_SIS) += sis/ obj-$(CONFIG_FB_VIA) += via/ obj-$(CONFIG_FB_KYRO) += kyro/ diff --git a/drivers/video/astfb.c b/drivers/video/astfb.c new file mode 100644 index 000000000000..8292bb8a36e9 --- /dev/null +++ b/drivers/video/astfb.c @@ -0,0 +1,1056 @@ + /******************************************************************************** +* File Name : drivers/video/astfb.c +* Author : Ryan Chen +* Description : ASPEED Framebuffer Driver +* +* Copyright (C) ASPEED Tech. Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* History : +* 1. 2012/12/27 Ryan Chen create this file +* +* +********************************************************************************/ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/platform_device.h> + +#include <asm/io.h> +#include <asm/uaccess.h> +#include <asm/mach/map.h> +#include <plat/regs-crt.h> +#include <mach/ast_lcd.h> + +#ifdef CONFIG_DOUBLE_BUFFER +#define NUMBER_OF_BUFFERS 2 +#else +#define NUMBER_OF_BUFFERS 1 +#endif + +////////////////////////////////////////////////////////////// +/* H/W Feature Definition */ +#define DEFAULT_MMIO_SIZE 0x00020000 +#define DEFAULT_CMDQ_SIZE 0x00100000 +#define MIN_CMDQ_SIZE 0x00040000 +#define CMD_QUEUE_GUARD_BAND 0x00000020 +#define DEFAULT_HWC_NUM 0x00000002 + +//////////////////////////////////////////////////////////////// +static wait_queue_head_t wq; +static int gNoPanDisplay; +static int gGUIWaitVsync; + +#define ASTFB_GET_DFBINFO _IOR(0xF3,0x00,struct astfb_dfbinfo) + +/* Default Threshold Seting */ +#define CRT_LOW_THRESHOLD_VALUE 0x12 +#define CRT_HIGH_THRESHOLD_VALUE 0x1E + +//#define CRT_LOW_THRESHOLD_VALUE 0x60 +//#define CRT_HIGH_THRESHOLD_VALUE 0x78 +//for fix 1920X1080 +//#define CRT_LOW_THRESHOLD_VALUE 0x16 +//#define CRT_HIGH_THRESHOLD_VALUE 0x1E + +//////////////////////////////////////////////////////////// + +/* Debugging stuff */ + +#define FBDBG 1 + +#define dprintk(msg...) if (FBDBG) { printk(KERN_DEBUG "astfb: " msg); } + +struct pixel_freq_pll_data { + u32 pixel_freq; //*10000 + u32 pll_set; +}; + +static struct pixel_freq_pll_data pll_table[] = { + {39721, 0x00046515}, /* 00: VCLK25_175 */ + {35308, 0x00047255}, /* 01: VCLK28_322 */ + {31746, 0x0004682a}, /* 02: VCLK31_5 */ + {27777, 0x0004672a}, /* 03: VCLK36 */ + {25000, 0x00046c50}, /* 04: VCLK40 */ + {20202, 0x00046842}, /* 05: VCLK49_5 */ + {20000, 0x00006c32}, /* 06: VCLK50 */ + {17777, 0x00006a2f}, /* 07: VCLK56_25 */ + {15384, 0x00006c41}, /* 08: VCLK65 */ + {13333, 0x00006832}, /* 09: VCLK75 */ + {12690, 0x0000672e}, /* 0A: VCLK78_75 */ + {10582, 0x0000683f}, /* 0B: VCLK94_5 */ + {9259, 0x00004824}, /* 0C: VCLK108 */ + {7407, 0x0000482d}, /* 0D: VCLK135 */ + {6349, 0x0000472e}, /* 0E: VCLK157_5 */ + {6172, 0x00004836}, /* 0F: VCLK162 */ +}; + +// ARGB4444 format +unsigned short cursor_8x8[] = { + 0x0FFF, 0x1FFF, 0x2FFF, 0x3777, 0x4777, 0x5777, 0x6777, 0x7888, + 0x8FFF, 0xF000, 0xAFFF, 0xB777, 0xC777, 0xD777, 0xE777, 0xF888, + 0x0FFF, 0x1FFF, 0x2FFF, 0x3FFF, 0x4777, 0x5777, 0x6777, 0x7888, + 0x8FFF, 0x9FFF, 0xAFFF, 0xBFFF, 0xCFFF, 0xD777, 0xE777, 0xF888, + 0x0FFF, 0x1FFF, 0x2FFF, 0x3FFF, 0x4FFF, 0x5FFF, 0x6FFF, 0x7888, + 0x8FFF, 0x9FFF, 0xAFFF, 0xBFFF, 0xCFFF, 0xDFFF, 0xEFFF, 0xFFFF, + 0x0FFF, 0x1FFF, 0x2777, 0x3FFF, 0x4FFF, 0x5FFF, 0x6FFF, 0x7FFF, + 0x8FFF, 0x9777, 0xA777, 0xB777, 0xC777, 0xDFFF, 0xEFFF, 0xFFFF, +}; + +// XRGB4444 format +unsigned short cursor_16x16[] = { + 0x8777, 0x8777, 0x8777, 0x8777, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x8777, 0xC888, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x8777, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x8777, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x8777, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x4777, 0x4FFF, 0x4FFF, 0x4FFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x4FFF, 0x4FFF, 0x4FFF, 0x4FFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x0FFF, 0x0FFF, 0x0FFF, 0x0FFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x0FFF, 0x0FFF, 0x0FFF, 0x0FFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x0FFF, 0x0FFF, 0x0FFF, 0x0FFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0x0FFF, 0x0FFF, 0x0FFF, 0x0FFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0xCFFF, 0xCFFF, 0xCFFF, 0xCFFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0xCFFF, 0xCFFF, 0xCFFF, 0xCFFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0xCFFF, 0xCFFF, 0xCFFF, 0xCFFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0xCFFF, 0xCFFF, 0xCFFF, 0xCFFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, + 0xCFFF, 0xCFFF, 0xCFFF, 0xCFFF, 0x4FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, 0x8FFF, +}; + +struct astfb_device { + int state; + struct mutex rqueue_mutex; + int palette_size; + u32 pseudo_palette[17]; + struct platform_device *pdev; + struct fb_var_screeninfo new_var; /* for mode changes */ +}; + + +/* data structure */ +struct astfb_info { + struct platform_device *pdev; + struct fb_info *info; + struct resource *reg_res; + struct resource *fb_res; + void __iomem *base; + int addr_assign; + int irq; + int yuv_mode; + u32 pseudo_palette[17]; + + struct timer_list timer; + + /* driver registered */ + int registered; + /* console control */ + int currcon; + + int need_wakeup; + void __iomem *next_addr; + + + u8 hwcursor; //0: disable , 1 : enable + u8 dac; //0: disable , 1 : enable + u8 dvo; //0: disable , 1 : enable + u8 xmiter; //0: dvi, 1:hdmi; + struct ast_fb_plat_data *fb_plat_data; + +}; + +static inline void +astfb_write(struct astfb_info *fbinfo, u32 val, u32 reg) +{ +// dprintk("astfb_write : val: %x , reg : %x \n",val,reg); + writel(val, fbinfo->base+ reg); +} + +static inline u32 +astfb_read(struct astfb_info *fbinfo, u32 reg) +{ + return readl(fbinfo->base + reg); +} + +static void astfb_osd_enable(struct astfb_info *sfb, u8 enable) +{ + if(enable) + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) | CRT_CTRL_OSD_EN, AST_CRT_CTRL1); + else + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) & ~CRT_CTRL_OSD_EN, AST_CRT_CTRL1); +} + +static void astfb_cursor_enable(struct astfb_info *sfb, u8 enable) +{ + if(enable) { + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) | CRT_CTRL_HW_CURSOR_EN, AST_CRT_CTRL1); + } else { + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) & ~CRT_CTRL_HW_CURSOR_EN, AST_CRT_CTRL1); + } +} + +int +astfb_crtc_to_var(struct fb_var_screeninfo *var, struct astfb_info *sfb) +{ + + /* crtc */ + var->xoffset = var->yoffset = 0; + + /* palette */ + switch(var->bits_per_pixel) { + case 8: + var->red.offset = var->green.offset = var->blue.offset = 0; + var->red.length = var->green.length = var->blue.length = 6; + break; + case 16: + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 24: + case 32: + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + } + + var->red.msb_right = + var->green.msb_right = + var->blue.msb_right = + var->transp.offset = + var->transp.length = + var->transp.msb_right = 0; + + return 0; +} + +/*-------------------------------------------------------------------------*/ +static int astfb_hw_cursor(struct fb_info *info, struct fb_cursor *cursor) +{ +// printk("astfb_hw_cursor \n"); + return 0; +} + +#if (NUMBER_OF_BUFFERS > 1) +static int astfb_pan_display(struct fb_var_screeninfo *var, struct fb_info* info) +{ + struct astfb_info *sfb = info->par; + u32 addr; + s32 timeout; + + if(gNoPanDisplay) + return 0; + + addr = var->yoffset * info->fix.line_length + info->fix.smem_start; + + astfb_write(sfb, addr, AST_CRT_ADDR); + + if(gGUIWaitVsync) + { + timeout = interruptible_sleep_on_timeout(&wq,HZ/60); + if(timeout<0) + dprintk("%s: interruptible_sleep_on_timeout, may lost interrupt! timeout=%d\n",__FUNCTION__,timeout); + } + return 0; + +} /* astfb_pan_display */ +#endif + +static int astfb_set_par(struct fb_info *info) +{ + struct astfb_info *sfb = info->par; + struct fb_var_screeninfo *var = &info->var; + u32 i,ctrl1, ctrl2, htt, hde, hrs_s, hrs_e, vtt, vde, vrs_s, vrs_e; + u32 d_offset, t_count, thshld; + u32 d2_pll; + + //S1 : set H / V + // Horizontal Timing + htt = var->xres + var->left_margin + var->right_margin + var->hsync_len; + hde = var->xres; + astfb_write(sfb, CRT_H_TOTAL((htt - 1)) | CRT_H_DE((hde - 1)), AST_CRT_HORIZ0); + + hrs_s = var->xres + var->right_margin; + hrs_e = var->xres + var->right_margin + var->hsync_len; + astfb_write(sfb, CRT_H_RS_START((hrs_s - 1)) | CRT_H_RS_END((hrs_e - 1)), AST_CRT_HORIZ1); + + dprintk("var->upper_margin= %d, var->lower_margin= %d, var->vsync_len = %d \n",var->upper_margin, var->lower_margin, var->vsync_len); + + vtt = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; + vde = var->yres; + astfb_write(sfb, CRT_V_TOTAL((vtt - 1)) | CRT_V_DE((vde - 1)), AST_CRT_VERTI0); + vrs_s = var->yres + var->lower_margin; + vrs_e = var->yres + var->lower_margin + var->vsync_len; + astfb_write(sfb, CRT_V_RS_START((vrs_s - 1)) | CRT_V_RS_END((vrs_e - 1)), AST_CRT_VERTI1); + + if(var->nonstd != 0) + printk("TODO Check .... nonstd \n"); + + switch (var->nonstd) { + case 0: + break; + case ASTFB_COLOR_YUV444: + var->bits_per_pixel = 32; + return 0; + case ASTFB_COLOR_YUV420: + var->bits_per_pixel = 32; + return 0; + } + + //S2 : Offset , TODO ... (x + 0x1f) & ~0x1f + d_offset = var->xres * var->bits_per_pixel /8; +// dprintk("d_offset %d\n",d_offset); + + switch (var->nonstd) { + case 0: + break; + case ASTFB_COLOR_YUV444: + var->bits_per_pixel = 24; + return 0; + case ASTFB_COLOR_YUV420: + var->bits_per_pixel = 16; + return 0; + } + + t_count =(var->xres * var->bits_per_pixel + 63) / 64; +// dprintk("t_count %d \n",t_count); + astfb_write(sfb, CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count), AST_CRT_OFFSET); + + + //S3 : DCLK + dprintk("var->pixclock = %d \n",var->pixclock); + + for(i=0; i<sizeof(pll_table)/sizeof(struct pixel_freq_pll_data); i++) { + if(pll_table[i].pixel_freq == var->pixclock) { + astfb_write(sfb, pll_table[i].pll_set, AST_CRT_PLL); + dprintk("find pixclk in table set 0x%x \n",pll_table[i].pll_set); + break; + } + } + if(i == sizeof(pll_table)/sizeof(struct pixel_freq_pll_data)) + printk("ERROR pixclk in table ... FIXME \n"); +#if 0 + d2_pll = sfb->fb_plat_data->get_clk(); + u32 num, denum, div0, + num = pll_table[i].pll_set & 0xff; + denum = (pll_table[i].pll_set >> 8) & 0x1f; + div0 = (pll_table[i].pll_set >> 13) & 0x3; + div1 = (pll_table[i].pll_set >> 13) & 0x3; + printk +#endif + + //S4 + astfb_write(sfb, sfb->info->fix.smem_start, AST_CRT_ADDR); + + thshld = CRT_THROD_HIGH(CRT_HIGH_THRESHOLD_VALUE) | CRT_THROD_LOW(CRT_LOW_THRESHOLD_VALUE); + astfb_write(sfb, thshld, AST_CRT_THROD); + + + info->fix.line_length = (var->xres*var->bits_per_pixel)/8; + dprintk("x :%d , y : %d , bpp = %d \n",var->xres, var->yres, var->bits_per_pixel); + //disable crt first ..... + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL2) & ~(CRT_CTRL_DAC_PWR_EN | CRT_CTRL_DVO_EN), AST_CRT_CTRL2); + + ctrl1 = astfb_read(sfb, AST_CRT_CTRL1); + //CTRL 1 + // SetPolarity + dprintk("var->sync : %x , var->vmode = %d \n",var->sync, var->vmode); + + if(var->sync & FB_SYNC_HOR_HIGH_ACT) + ctrl1 &= ~CRT_CTRL_HSYNC_POLARITY; + else + ctrl1 |= CRT_CTRL_HSYNC_POLARITY; + + if(var->sync & FB_SYNC_VERT_HIGH_ACT) + ctrl1 &= ~CRT_CTRL_VSYNC_POLARITY; + else + ctrl1 |= CRT_CTRL_VSYNC_POLARITY; + + /* Mode Type Setting */ + + if(var->bits_per_pixel==16) + ctrl1 &= ~CRT_CTRL_FORMAT_MASK; //RGB565 + else + ctrl1 |= CRT_CTRL_FORMAT(COLOR_XRGB8888); + + if (var->vmode & FB_VMODE_INTERLACED) + ctrl1 |= CRT_CTRL_INTER_TIMING; + else + ctrl1 &= ~CRT_CTRL_INTER_TIMING; + + //enable crt ... + astfb_write(sfb, ctrl1 | CRT_CTRL_GRAPHIC_EN, AST_CRT_CTRL1); + + dprintk("var->left_margin= %d, var->right_margin= %d, var->hsync_len = %d \n",var->left_margin, var->right_margin, var->hsync_len); + + + //enable dac / dvo + //CTRL 2 + ctrl2 = 0;//astfb_read(sfb, AST_CRT_CTRL2); + + // SoC V2 add CRT interrupt support. We should not touch this setting when changing video timing. + ctrl2 &= ~CRT_CTRL_VLINE_NUM_MASK; + +#ifdef CONFIG_AST_DAC + ctrl2 |= CRT_CTRL_DAC_PWR_EN; +#endif + +#ifdef CONFIG_AST_DVO + ctrl2 |= CRT_CTRL_DVO_EN; +#endif + + astfb_write(sfb, ctrl2 , AST_CRT_CTRL2); + + return 0; +} + +static int astfb_get_cmap_len(struct fb_var_screeninfo *var) +{ + return (var->bits_per_pixel == 8) ? 256 : 16; +} + +static int astfb_setcolreg(unsigned regno, + unsigned red, unsigned green, unsigned blue, + unsigned transp, struct fb_info *info) +{ + if(regno >= astfb_get_cmap_len(&info->var)) + return 1; + + switch(info->var.bits_per_pixel) { + case 8: + return 1; + break; + case 16: + ((u32 *)(info->pseudo_palette))[regno] = + (red & 0xf800) | + ((green & 0xfc00) >> 5) | + ((blue & 0xf800) >> 11); + break; + case 24: + case 32: + red >>= 8; + green >>= 8; + blue >>= 8; + ((u32 *)(info->pseudo_palette))[regno] = + (red << 16) | (green << 8) | (blue); + break; + } + return 0; + +} + +/* + * Blank the screen if blank_mode != 0, else unblank. Return 0 if blanking + * succeeded, != 0 if un-/blanking failed. + * blank_mode == 2: suspend vsync + * blank_mode == 3: suspend hsync + * blank_mode == 4: powerdown + */ +static int astfb_blank(int blank_mode, struct fb_info *info) +{ + u32 ctrl; + struct astfb_info *sfb = info->par; + + printk(KERN_DEBUG "astfb: astfb_blank mode %d \n",blank_mode); + ctrl = astfb_read(sfb, AST_CRT_CTRL1); + + switch(blank_mode) { + case FB_BLANK_UNBLANK: /* on */ + ctrl &= ~CRT_CTRL_SCREEN_OFF; + break; + case FB_BLANK_NORMAL: /* blank */ + ctrl |= CRT_CTRL_SCREEN_OFF; + break; + case FB_BLANK_VSYNC_SUSPEND: /* no vsync */ + ctrl |= CRT_CTRL_VSYNC_OFF; + break; + case FB_BLANK_HSYNC_SUSPEND: /* no hsync */ + ctrl |= CRT_CTRL_HSYNC_OFF; + break; + case FB_BLANK_POWERDOWN: /* off */ + ctrl |= (CRT_CTRL_SCREEN_OFF | CRT_CTRL_VSYNC_OFF | CRT_CTRL_HSYNC_OFF); + break; + default: + return 1; + } + + /* set reg */ + astfb_write(sfb, ctrl, AST_CRT_CTRL1); + + return 0; + +} /* astfb_blank */ + +static int astfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + var->xres_virtual = var->xres; + var->yres_virtual =var->yres * NUMBER_OF_BUFFERS; +//////////////////////////////////////////////////////////////////// + /* Sanity check for offsets */ + if(var->xoffset < 0) var->xoffset = 0; + if(var->yoffset < 0) var->yoffset = 0; + + if(var->xres > var->xres_virtual) + var->xres_virtual = var->xres; + + /* Truncate offsets to maximum if too high */ + if(var->xoffset > var->xres_virtual - var->xres) { + var->xoffset = var->xres_virtual - var->xres - 1; + } + + if(var->yoffset > var->yres_virtual - var->yres) { + var->yoffset = var->yres_virtual - var->yres - 1; + } +//////////////////////////////////////////////////////////////////// + switch(var->bits_per_pixel) { + case 8: + var->red.offset = var->green.offset = var->blue.offset = 0; + var->red.length = var->green.length = var->blue.length = 6; + break; + case 16: + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 24: + case 32: + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 24; + break; + default: + dprintk("bpp=%d not support\n",var->bits_per_pixel); + return -EINVAL; + break; + } + return 0; +} + +static int +astfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) +{ + struct astfb_info *sfb = info->par; + + printk(KERN_DEBUG "astfb: astfb_ioctl is called \n"); + + switch(cmd) { +// case AST_COLOR_FORMAT: +// return 0; + + default: + return -EINVAL; + } + + return 0; + +} /* astfb_ioctl */ + +/* fb ops */ +static struct fb_ops astfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = astfb_check_var, + .fb_set_par = astfb_set_par, + .fb_blank = astfb_blank, + .fb_setcolreg = astfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_ioctl = astfb_ioctl, + .fb_cursor = astfb_hw_cursor, +#if (NUMBER_OF_BUFFERS > 1) + .fb_pan_display = astfb_pan_display, +#endif +}; + +static void ast_fbmem_free(struct astfb_info *sfb) +{ + iounmap(sfb->info->screen_base); +} + +static irqreturn_t +astfb_isr(int irq, void *parm) +{ + u32 status; + struct astfb_info *sfb=parm; + status = astfb_read(sfb, AST_CRT_CTRL1); + astfb_write(sfb, status, AST_CRT_CTRL1); + if (status & CRT_CTRL_VERTICAL_INTR_STS) + wake_up_interruptible(&wq); + + return IRQ_HANDLED; +} + +//TODO .. +static int astfb_setup(struct astfb_info *sfb) +{ + char *this_opt = NULL; + char *options = NULL; + char tmp[128]; + char *tmp_opt; + char name[10]; + int i; + + fb_get_options("astfb", &options); + dprintk("%s\n", options); + + if (!options || !*options) + return -1; + + strcpy(tmp, options); + tmp_opt=tmp; + while ((this_opt = strsep(&tmp_opt, ",")) != NULL) { + printk("x %s \n",this_opt); + if (!strncmp(this_opt, "mode:", 5)) { + printk("%s \n",this_opt); + } else if(!strncmp(this_opt, "hwcursor:", 9)) { + printk("%s \n",this_opt); + } else if(!strncmp(this_opt, "osd:", 4)) { + printk("%s \n",this_opt); + } else if (!strncmp(this_opt, "vram:", 8)) { + printk("%s \n",this_opt); + } else if(!strncmp(this_opt, "dac:", 4)) { + printk("%s \n",this_opt); + } else if(!strncmp(this_opt, "dvo:", 4)) { + printk("%s \n",this_opt); + } else { + printk("f %s \n",this_opt); + } + + } + + return 0; + +} /* astfb_setup */ + +static void sfb_timer(unsigned long private) +{ + struct astfb_info *sfb = (void *) private; + if(sfb->need_wakeup) + { + sfb->need_wakeup=0; + wake_up_interruptible(&wq); + } + if(sfb->next_addr) + { + astfb_write(sfb, (u32)sfb->next_addr, AST_CRT_ADDR); + sfb->need_wakeup=1; + } + mod_timer(&sfb->timer, jiffies + HZ/24); +} + +#ifdef CONFIG_HDMI_CAT6613 +static ssize_t show_hdmi_status(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fb_info = dev_get_drvdata(device); + ssize_t len = 0; + int rc; + + rc=ast_hdmi_get_info(fb_info); + if(rc==1) + len=sprintf(buf, "UNPLUG\n"); + else if(rc==0) + len=sprintf(buf, "PLUG\n"); + else + len=sprintf(buf, "UNKNOWN\n"); + return len; +} + +static ssize_t show_hdmi_enable(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(device); + struct astfb_info *sfb = info->par; + + return sprintf(buf, "%d\n",sfb->hdmi_en); +} + +static ssize_t store_hdmi_enable(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(device); + struct astfb_info *sfb = info->par; + if(buf[0]=='1') { + ast_hdmi_enable(1); + sfb->hdmi_en=1; + } + else { + ast_hdmi_enable(0); + sfb->hdmi_en=0; + } + + return count; +} +#endif + +static ssize_t show_lcd_enable(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(device); + struct astfb_info *sfb = info->par; + if(astfb_read(sfb, AST_CRT_CTRL1) & CRT_CTRL_GRAPHIC_EN) + return sprintf(buf, "%d\n",1); + else + return sprintf(buf, "%d\n",0); +} + +static ssize_t store_lcd_enable(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(device); + struct astfb_info *sfb = info->par; + if(buf[0]=='1') { + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) | CRT_CTRL_GRAPHIC_EN, AST_CRT_CTRL1); + } + else { + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) & ~CRT_CTRL_GRAPHIC_EN, AST_CRT_CTRL1); + } + + return count; +} + +static ssize_t show_pix_clk(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(device); + struct astfb_info *sfb = info->par; + +// return sprintf(buf, "target_clk=%d\ncalc_clk=%d\n",sfb->target_clk,sfb->calc_clk); +} + +static ssize_t no_pan_display_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%hu\n", gNoPanDisplay); +} + +static ssize_t no_pan_display_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned short value; + + if (sscanf(buf, "%hu", &value) != 1 || + (value != 0 && value != 1 )) { + dprintk(KERN_ERR "no_pan_display_store : Invalid value\n"); + return -EINVAL; + } + + if(value == 0) + gNoPanDisplay = 0; + else if(value == 1) + gNoPanDisplay = 1; + + return count; +} + +static ssize_t phys_addr_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(device); + return sprintf(buf, "%hu\n", info->fix.smem_start); +} + +static ssize_t virt_addr_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(device); + return sprintf(buf, "%hu\n", info->screen_base); +} + +static struct device_attribute device_attrs[] = { + __ATTR(virt_addr, S_IRUGO | S_IWUGO, virt_addr_show, NULL), + __ATTR(phys_addr, S_IRUGO | S_IWUGO, phys_addr_show, NULL), + __ATTR(no_pan_display, S_IRUGO | S_IWUGO, no_pan_display_show, no_pan_display_store), + __ATTR(lcd_enable, S_IRUGO | S_IWUGO, show_lcd_enable, store_lcd_enable), + __ATTR(pixel_clock, S_IRUGO, show_pix_clk, NULL), +// __ATTR(osd_enable, S_IRUGO, show_osd_enable, store_osd_enable), +// __ATTR(cursor_enable, S_IRUGO, show_cursor_enable, store_cursor_enable), +#ifdef CONFIG_HDMI_CAT6613 + __ATTR(hdmi_status, S_IRUGO, show_hdmi_status, NULL), + __ATTR(hdmi_enable, S_IRUGO | S_IWUGO, show_hdmi_enable, store_hdmi_enable), +#endif +#ifdef CONFIG_VGA_EDID + __ATTR(vga_status, S_IRUGO, show_vga_status, NULL), + __ATTR(vga_detect, S_IRUGO | S_IWUGO, show_vga_edid, NULL), +#endif +}; + +static int astfb_probe(struct platform_device *pdev) +{ + struct astfb_device *astfbdev = NULL; + struct astfb_info *sfb; + struct fb_info *info; + struct device *dev = &pdev->dev; + int ret,i,retval; + char *mode_option; + + dprintk("astfb_probe \n"); + + info = framebuffer_alloc(sizeof(struct astfb_info), dev); + if (!info) { + dev_err(dev, "cannot allocate memory\n"); + return -ENOMEM; + } + + sfb = info->par; + sfb->info = info; + sfb->pdev = pdev; + sfb->fb_plat_data = (struct ast_fb_plat_data *)dev->platform_data; + strcpy(info->fix.id, sfb->pdev->name); + + sfb->reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!sfb->reg_res) { + dev_err(dev, "register resources unusable\n"); + ret = -ENXIO; + goto free_info; + } + + sfb->irq = platform_get_irq(pdev, 0); + if (!sfb->irq) { + dev_err(dev, "unable to get irq\n"); + ret = -ENXIO; + goto free_info; + } + + if(!sfb->fb_plat_data) { + dev_err(dev, "unable to get ast fb platform data\n"); + ret = -ENXIO; + goto free_info; + } + + info->fix.mmio_start = sfb->reg_res->start; + info->fix.mmio_len = sfb->reg_res->end - sfb->reg_res->start + 1; + + if (!request_mem_region(info->fix.mmio_start, info->fix.mmio_len, pdev->name)) { + dev_err(dev, "cannot request CRT registers\n"); + ret = -EBUSY; + goto free_info; + } + + sfb->base = ioremap(info->fix.mmio_start, info->fix.mmio_len); + if (!sfb->base) { + dev_err(dev, "cannot map LCDC registers\n"); + ret = -ENOMEM; + goto free_res; + } + + info->fbops = &astfb_ops; + + if(astfb_setup(sfb)) { + dev_warn(dev, "cannot get fb boot options will use default !!!\n"); + } +// if (!mode_option) { + mode_option = "640x480-32@60"; + info->fix.smem_start = 0x47000000; + +// } + + if(fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8) != 1) { + dev_err(dev, "cannot find db modes \n"); + ret = -ENOMEM; + goto free_res; + } + + + /* resource allocation */ + info->fix.smem_len = SZ_2M * ((info->var.bits_per_pixel)/8 * NUMBER_OF_BUFFERS); //assign 16M for 1920*1080*32it double-buffering + + printk("info->fix.smem_start = %x , len = %d , bpp = %d\n",info->fix.smem_start, info->fix.smem_len, info->var.bits_per_pixel); + + if (!request_mem_region(info->fix.smem_start, info->fix.smem_len, pdev->name)) { + dev_err(dev, "cannot request CRT mem\n"); + ret = -EBUSY; + goto free_io; + } + + info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); + if (!info->screen_base) { + dev_err(dev, "cannot map CRT mem\n"); + ret = -ENOMEM; + goto free_addr; + } + + printk(KERN_INFO "FB Phys:%x, Virtual:%x \n", info->fix.smem_start, info->screen_base); + + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.type_aux = 0; + +#if (NUMBER_OF_BUFFERS > 1) + info->fix.ypanstep = 1; +#else + info->fix.ypanstep = 0; +#endif + + info->fix.xpanstep = 0; + info->fix.ywrapstep = 0; + info->fix.visual = FB_VISUAL_TRUECOLOR, + info->fix.accel = FB_ACCEL_NONE; + info->flags = FBINFO_FLAG_DEFAULT; + info->pseudo_palette = sfb->pseudo_palette; + + /* + * Allocate colourmap. + */ + ret=fb_alloc_cmap(&(info->cmap), 256, 0); + if(ret) { + dev_err(dev, "Alloc color map failed\n"); + goto free_mem; + } + + ret = request_irq(sfb->irq, astfb_isr, IRQF_SHARED, pdev->name, sfb); + if (ret) { + dev_err(dev, "Can't request LCD irq"); + ret = -EBUSY; + goto free_cmap; + } + init_waitqueue_head(&wq); + + ret = astfb_check_var(&info->var, info); + if (ret) + goto free_irq; + + init_timer(&sfb->timer); + sfb->timer.data = (long) sfb; + sfb->timer.function = sfb_timer; + astfb_set_par(info); + platform_set_drvdata(pdev, sfb); + ret = register_framebuffer(info); + if (!ret) { + for(i=0;i<sizeof(device_attrs)/sizeof(struct device_attribute);i++) + device_create_file(info->dev, &device_attrs[i]); + return 0; + } + + dev_err(dev, "Failed to register framebuffer device: %d\n", ret); + + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) & ~CRT_CTRL_GRAPHIC_EN, AST_CRT_CTRL1); + platform_set_drvdata(pdev, NULL); +free_irq: + free_irq(sfb->irq,sfb); +free_cmap: + fb_dealloc_cmap(&info->cmap); +free_mem: + ast_fbmem_free(sfb); +free_addr: + if(sfb->addr_assign) + release_mem_region(info->fix.smem_start, info->fix.smem_len); +free_io: + iounmap(sfb->base); +free_res: + release_mem_region(info->fix.mmio_start, info->fix.mmio_len); +free_info: + framebuffer_release(info); + return ret; + +} + +static int +astfb_remove(struct platform_device *pdev) +{ + struct astfb_info *sfb = platform_get_drvdata(pdev); + + unregister_framebuffer(sfb->info); + astfb_write(sfb, astfb_read(sfb, AST_CRT_CTRL1) & ~CRT_CTRL_GRAPHIC_EN, AST_CRT_CTRL1); + free_irq(sfb->irq,sfb); + fb_dealloc_cmap(&sfb->info->cmap); + iounmap(sfb->info->screen_base); + if(sfb->addr_assign) + release_mem_region(sfb->info->fix.smem_start, sfb->info->fix.smem_len); + iounmap(sfb->base); + release_mem_region(sfb->info->fix.mmio_start, sfb->info->fix.mmio_len); + framebuffer_release(sfb->info); + platform_set_drvdata(pdev, NULL); + dprintk("astfb_remove \n"); + + return 0; +} + +#ifdef CONFIG_PM +static int astfb_suspend(struct platform_device *pdev, pm_message_t state) +{ + /* TODO */ + return 0; +} + +static int astfb_resume(struct platform_device *pdev) +{ + /* TODO */ + return 0; +} +#else +#define astfb_suspend NULL +#define astfb_resume NULL +#endif + +/* driver ops */ +static struct platform_driver astfb_driver = { + .probe = astfb_probe, + .remove = astfb_remove, + .suspend = astfb_suspend, + .resume = astfb_resume, + .driver = { + .name = "ast-fb", + .owner = THIS_MODULE, + }, + +}; +int __devinit astfb_init(void) +{ + return platform_driver_register(&astfb_driver); +} + +static void __exit astfb_cleanup(void) +{ + printk(KERN_DEBUG "astfb: astfb_remove_module is called \n"); + + platform_driver_unregister(&astfb_driver); +} + +module_init(astfb_init); +module_exit(astfb_cleanup); + +MODULE_AUTHOR("Ryan Chen"); +MODULE_DESCRIPTION("Framebuffer driver for the ASPEED"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/hdmi_cat6613.c b/drivers/video/hdmi_cat6613.c new file mode 100755 index 000000000000..2a6d21f2b6a4 --- /dev/null +++ b/drivers/video/hdmi_cat6613.c @@ -0,0 +1,545 @@ +/******************************************************************************** +* File Name : drivers/video/hdmi_cat6613.c +* Author : Ryan Chen +* Description : HDMI CAT6613 driver +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +* History : +* 1. 2012/08/24 Ryan Chen create this file +* +********************************************************************************/ + +#include <linux/i2c.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/fb.h> + +#include <mach/regs-cat6613.h> +#include "edid.h" + +#define DEVICE_NAME "cat6613" +#define CAT6613_DEVICE_ID 0xCA13 + +struct cat6613_info { + struct i2c_client *client; + struct fb_info *fb_info; + struct aspeed_monitor_info *mon_info; + struct work_struct cat6613_work; + int state;//0:unplug 1:plug + int irq; +}; + +static struct cat6613_info cat6613_device; +struct aspeed_monitor_info monitor_info; + + +static void get_detailed_timing(unsigned char *block, + struct fb_videomode *mode) +{ + mode->xres = H_ACTIVE; + mode->yres = V_ACTIVE; + mode->pixclock = PIXEL_CLOCK; + mode->pixclock /= 1000; + mode->pixclock = KHZ2PICOS(mode->pixclock); + mode->right_margin = H_SYNC_OFFSET; + mode->left_margin = (H_ACTIVE + H_BLANKING) - + (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH); + mode->upper_margin = V_BLANKING - V_SYNC_OFFSET - + V_SYNC_WIDTH; + mode->lower_margin = V_SYNC_OFFSET; + mode->hsync_len = H_SYNC_WIDTH; + mode->vsync_len = V_SYNC_WIDTH; + if (HSYNC_POSITIVE) + mode->sync |= FB_SYNC_HOR_HIGH_ACT; + if (VSYNC_POSITIVE) + mode->sync |= FB_SYNC_VERT_HIGH_ACT; + mode->refresh = PIXEL_CLOCK/((H_ACTIVE + H_BLANKING) * + (V_ACTIVE + V_BLANKING)); + if (INTERLACED) { + mode->yres *= 2; + mode->upper_margin *= 2; + mode->lower_margin *= 2; + mode->vsync_len *= 2; + mode->vmode |= FB_VMODE_INTERLACED; + } + else + mode->vmode=0; + mode->flag = FB_MODE_IS_DETAILED; + +} + +static void cat6613_parse_cea(void) +{ + int timing_offset,cea_data_offset=0,data_tag,data_len,vic,i; + char *ext=&cat6613_device.mon_info->edid[128]; + struct fb_monspecs *specs=&cat6613_device.mon_info->specs; + + if(cat6613_device.mon_info->edid[126]==0 || ext[0]!=0x2) { + printk("DVI mode\n"); + cat6613_device.mon_info->type=0; //dvi mode + return; + } + + printk("CEA Revision=%d\n", ext[1]); + + if(ext[3]& (1<<6)) { + printk("HDMI mode\n"); + cat6613_device.mon_info->type=1; //hdmi mode + } + else { + printk("HDMI mode without audio\n"); + cat6613_device.mon_info->type=0; //dvi mode + } + + if(ext[2]==0) //no timing & cea data for parsing + return; + + timing_offset=ext[2]; + + //parsing cea data + if(timing_offset!=4) { + while((cea_data_offset+4)!=timing_offset) { + data_tag=(ext[cea_data_offset+4]>>5)&0x7; //bit 5~7 + data_len=ext[cea_data_offset+4]&0x1f; //bit 0~4 + switch(data_tag) { + case 1: + //printk("audio data block\n"); + break; + case 2: + //printk("video data block\n"); + for(i=1;i<=data_len;i++) { + vic=ext[cea_data_offset+4+i]&0x7f; + //add 720p60 timing + if(vic==4) { + //printk("add 1280x720p60 timing\n"); + memcpy(&specs->modedb[specs->modedb_len], &(panels[8].mode),sizeof(struct fb_videomode)); + specs->modedb_len++; + } + //add 1080p60 timing + if(vic==16) { + //printk("add 1920x1080p60 timing\n"); + memcpy(&specs->modedb[specs->modedb_len], &(panels[9].mode),sizeof(struct fb_videomode)); + specs->modedb_len++; + } + if(vic==2 || vic==3) { + //printk("add 720x480p60 timing\n"); + memcpy(&specs->modedb[specs->modedb_len], &(panels[10].mode),sizeof(struct fb_videomode)); + specs->modedb_len++; + } + + } + break; + case 3: + //printk("vendor data block\n"); + break; + case 4: + //printk("speaker data block\n"); + break; + default: + //printk("unknown data block tag=%d\n",data_tag); + break; + } + cea_data_offset+=(data_len+1); //go to next block + } + } + while(ext[timing_offset]!=0) { + //printk("%x\n",ext[timing_offset+17]); + get_detailed_timing(&ext[timing_offset], &specs->modedb[specs->modedb_len]); + specs->modedb_len++; + timing_offset+=18; + } + +} + +static int cat6613_reset(struct i2c_client *client) +{ + int rc; + rc = i2c_smbus_write_byte_data(client, REG_TX_BANK_CTRL, 0x00); + rc |= i2c_smbus_write_byte_data(client, REG_TX_SW_RST, 0x3d); + msleep(2); + rc |= i2c_smbus_write_byte_data(client, REG_TX_SW_RST, 0x1d); + msleep(2); + rc |= i2c_smbus_write_byte_data(client, REG_TX_HDMI_MODE, 0x00); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AV_MUTE, 0x01); + + //set int + rc |=i2c_smbus_write_byte_data(client,REG_TX_INT_CTRL, 0x40); + rc |=i2c_smbus_write_byte_data(client,REG_TX_INT_MASK1, 0xfd); + rc |=i2c_smbus_write_byte_data(client,REG_TX_INT_MASK2, 0xff) ; + rc |=i2c_smbus_write_byte_data(client,REG_TX_INT_MASK3, 0x7f); + + return rc; + +} + +static int cat6613_afe(struct i2c_client *client) +{ + int tmds,rc=0; + if(cat6613_device.fb_info) { + tmds=1000000/(cat6613_device.fb_info->var.pixclock); + if(tmds > 80) { + rc = i2c_smbus_write_byte_data(client, REG_TX_AFE_DRV_CTRL,0x10); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_XP_CTRL,0x88); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_ISW_CTRL,0x10); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_IP_CTRL,0x84); + + } + else { + + rc = i2c_smbus_write_byte_data(client, REG_TX_AFE_DRV_CTRL,0x10); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_XP_CTRL,0x18); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_ISW_CTRL,0x10); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_IP_CTRL,0x0c); + } + rc |= i2c_smbus_write_byte_data(client, REG_TX_AFE_DRV_CTRL,0x00); + } + + return rc; +} + +static int cat6613_set_av(struct i2c_client *client) +{ + int rc=0; + + rc |= i2c_smbus_write_byte_data(client, REG_TX_SW_RST,0xd); //reset av + msleep(1); + if(cat6613_device.mon_info->type==0) { + rc |= i2c_smbus_write_byte_data(client, REG_TX_HDMI_MODE,0); //dvi mode + rc |= i2c_smbus_write_byte_data(client, REG_TX_SW_RST,5); + msleep(1); + return rc; + } + + rc |= i2c_smbus_write_byte_data(client, REG_TX_BANK_CTRL,1); //switch bank 1 + + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB1,0x12); //set underscan + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB2,0x8); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB3,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB4,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB5,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB6,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB7,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB8,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB9,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB10,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB11,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB12,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_DB13,0x0); + + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVIINFO_SUM,0x55); //check sum + + rc |= i2c_smbus_write_byte_data(client, REG_TX_BANK_CTRL,0); //switch bank 0 + rc |= i2c_smbus_write_byte_data(client, REG_TX_PKT_GENERAL_CTRL,1); + rc |= i2c_smbus_write_byte_data(client, REG_TX_NULL_CTRL,1); + rc |= i2c_smbus_write_byte_data(client, REG_TX_ACP_CTRL,0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AVI_INFOFRM_CTRL,3); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AUD_INFOFRM_CTRL,1); + rc |= i2c_smbus_write_byte_data(client, REG_TX_MPG_INFOFRM_CTRL,0); + + rc |= i2c_smbus_write_byte_data(client, REG_TX_HDMI_MODE,1); //hdmi mode + + rc |= i2c_smbus_write_byte_data(client, 0xf8,0xc3); + rc |= i2c_smbus_write_byte_data(client, 0xf8,0xa5); + rc |= i2c_smbus_write_byte_data(client, REG_TX_PKT_SINGLE_CTRL,0x0); //set auto cts + + rc |= i2c_smbus_write_byte_data(client, REG_TX_AUDIO_CTRL0,0x0); + rc |= i2c_smbus_write_byte_data(client, REG_TX_AUDIO_CTRL0,0x1); //set i2s 16bit + rc |= i2c_smbus_write_byte_data(client, REG_TX_AUDIO_CTRL1,0x1); //set not full packet mode & 32bit i2s + + rc |= i2c_smbus_write_byte_data(client, REG_TX_SW_RST,1); + msleep(1); + + return rc; + +} + +static int cat6613_clear_mute(struct i2c_client *client) +{ + int rc; + rc = i2c_smbus_write_byte_data(client, REG_TX_AV_MUTE,0); + return rc; +} + + +static int cat6613_wait_ddc(struct i2c_client *client) +{ + int rc,count; + + for(count=0;count<10;count++) { + rc=i2c_smbus_read_byte_data(client,REG_TX_DDC_STATUS); + if(rc & B_DDC_DONE) + return 0; + msleep(1); + } + printk("ddc timeout\n"); + i2c_smbus_write_byte_data(client,REG_TX_DDC_MASTER_CTRL, B_MASTERHOST ) ; + i2c_smbus_write_byte_data(client,REG_TX_DDC_CMD, CMD_DDC_ABORT) ; + return -1; + +} + +static int cat6613_read_edid(struct i2c_client *client) +{ + int j ; + int remained_byte, offset, count; + remained_byte = 256 ; + offset=0; + + while(offset<256) { + count = (remained_byte<32)?remained_byte:32 ; + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_DDC_MASTER_CTRL, B_MASTERDDC|B_MASTERHOST ) ; + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_DDC_CMD, CMD_FIFO_CLR); + if(cat6613_wait_ddc(cat6613_device.client)) { + return -1; + } + + i2c_smbus_write_byte_data(client,REG_TX_DDC_HEADER, 0xA0) ; + i2c_smbus_write_byte_data(client,REG_TX_DDC_REQOFF, offset) ; + i2c_smbus_write_byte_data(client,REG_TX_DDC_REQCOUNT, count) ; + i2c_smbus_write_byte_data(client,REG_TX_DDC_EDIDSEG, 0) ; + i2c_smbus_write_byte_data(client,REG_TX_DDC_CMD, 3); + if(cat6613_wait_ddc(cat6613_device.client)) { + return -1; + } + for( j = 0 ; j < count ; j++) + { + cat6613_device.mon_info->edid[offset+j] = i2c_smbus_read_byte_data(client,REG_TX_DDC_READFIFO); ; + } + remained_byte -= count ; + offset += count ; + } + return 0; + +} + +static void cat6613_add_modes(void) +{ + int i; + struct fb_monspecs *specs=&cat6613_device.mon_info->specs; + struct fb_info *info=cat6613_device.fb_info; + + for(i=0;i<specs->modedb_len;i++) { + fb_add_videomode(&specs->modedb[i],&info->modelist); + } + +} + +static void cat6613_del_modes(void) +{ + int i; + struct fb_monspecs *specs=&cat6613_device.mon_info->specs; + struct fb_info *info=cat6613_device.fb_info; + if(!info) + return; + + for(i=0;i<specs->modedb_len;i++) { + fb_delete_videomode(&specs->modedb[i],&info->modelist); + } +} + +static void cat6613_handle(struct work_struct *work) +{ + char int_status,sys_status,rc,int_status3; + struct fb_var_screeninfo tmp_var; + int_status=i2c_smbus_read_byte_data(cat6613_device.client,REG_TX_INT_STAT1); + sys_status=i2c_smbus_read_byte_data(cat6613_device.client, REG_TX_SYS_STATUS); + int_status3=i2c_smbus_read_byte_data(cat6613_device.client,REG_TX_INT_STAT3); + if(!(sys_status&B_INT_ACTIVE)) + printk("cat6613_handle: no int\n"); + else { +#if 0 + if(int_status & B_INT_DDCFIFO_ERR) { + printk("B_INT_DDCFIFO_ERR\n"); + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_DDC_MASTER_CTRL, B_MASTERHOST ) ; + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_DDC_CMD, CMD_FIFO_CLR); + } + + if(int_status & B_INT_DDC_BUS_HANG) { + printk("B_INT_DDC_BUS_HANG\n"); + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_DDC_MASTER_CTRL, B_MASTERHOST ) ; + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_DDC_CMD, CMD_DDC_ABORT) ; + } + if(int_status & B_INT_HPD_PLUG) { + if(sys_status & B_HPDETECT) + printk("HPD PLUG\n"); + else + printk("HPD UN PLUG 0\n"); + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_INT_CLR0,B_CLR_HPD); + } + if(int_status3 & B_INT_VIDSTABLE) { + if(sys_status & B_TXVIDSTABLE) { + printk("VIDSTABLE\n"); + i2c_smbus_write_byte_data(cat6613_device.client, REG_TX_AFE_DRV_CTRL,0x00); + } + else + printk("UN VIDSTABLE\n"); + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_INT_CLR1,B_CLR_VIDSTABLE); + } +#endif + if(int_status & B_INT_RX_SENSE) { + if(sys_status& B_RXSENDETECT) { + if(cat6613_device.state==0) { + rc=cat6613_read_edid(cat6613_device.client); + if(!rc) { + rc=fb_parse_edid(cat6613_device.mon_info->edid,&tmp_var); + if(!rc) + cat6613_device.state=1; + } + } + } + else { + printk("HPD UN PLUG 0\n"); + if(cat6613_device.state==1) { + printk("HPD UN PLUG 1\n"); + cat6613_del_modes(); + cat6613_device.mon_info->status=0; + cat6613_device.state=0; + } + } + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_INT_CLR0,B_CLR_RXSENSE); + } + } + + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_SYS_STATUS, sys_status | B_INTACTDONE ); + i2c_smbus_write_byte_data(cat6613_device.client,REG_TX_SYS_STATUS, sys_status); + enable_irq(cat6613_device.irq); +} + +static irqreturn_t cat6613_isr(int irq, void *parm) +{ + + disable_irq_nosync(cat6613_device.irq); + schedule_work(&cat6613_device.cat6613_work); + return IRQ_HANDLED; +} + +static int hdmi_cat6613_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc=0; + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C)) + return -ENODEV; + + rc=(i2c_smbus_read_byte_data(client, REG_TX_VENDOR_ID1)<<8)&0xff00; + rc|=i2c_smbus_read_byte_data(client, REG_TX_DEVICE_ID0); + if(rc != CAT6613_DEVICE_ID) { + printk(KERN_ERR "%s: read id fail\n", __func__); + return -ENODEV; + } + + cat6613_device.client=client; + cat6613_device.irq=client->irq; + cat6613_device.mon_info=&monitor_info; + + //reset + rc=cat6613_reset(client); + if(rc) + printk(KERN_ERR "%s: reset fail\n", __func__); + + INIT_WORK(&cat6613_device.cat6613_work, cat6613_handle); + rc = request_irq(cat6613_device.irq, cat6613_isr, IRQF_DISABLED, DEVICE_NAME, NULL); + if(rc) { + printk(KERN_ERR "%s: request irq fail\n", __func__); + return rc; + } + + return rc; +} + +static int __devexit hdmi_cat6613_remove(struct i2c_client *client) +{ + + return 0; +} + + +static const struct i2c_device_id hmdi_cat6613_id[] = { + { DEVICE_NAME, 0 }, + { } +}; + +static struct i2c_driver hdmi_cat6613_i2c_driver = { + .driver = { + .name = DEVICE_NAME, + .owner = THIS_MODULE, + }, + .probe = hdmi_cat6613_probe, + .remove = __exit_p(hdmi_cat6613_remove), + .id_table = hmdi_cat6613_id, +}; + + +int aspeed_hdmi_get_info(struct fb_info *fb_info) +{ + + if(!fb_info) { + printk("no fb_info\n"); + return -1; + } + cat6613_device.fb_info=fb_info; + + if(cat6613_device.state==0) + return 1; + + if(cat6613_device.mon_info->status==0) { + if(monitor_info.specs.modedb) + fb_destroy_modedb(monitor_info.specs.modedb); + fb_edid_to_monspecs(cat6613_device.mon_info->edid, &cat6613_device.mon_info->specs); + cat6613_parse_cea(); + cat6613_add_modes(); + cat6613_device.mon_info->status=1; + } + return 0; +} + +void aspeed_hdmi_enable(int en) +{ + if(en==0) { + i2c_smbus_write_byte_data(cat6613_device.client, REG_TX_HDMI_MODE, 0x00); + i2c_smbus_write_byte_data(cat6613_device.client, REG_TX_AV_MUTE, 0x01); + } + else { + cat6613_set_av(cat6613_device.client); + cat6613_afe(cat6613_device.client); + cat6613_clear_mute(cat6613_device.client); + } +} + +static int __init hdmi_cat6613_init(void) +{ + int ret; + + ret = i2c_add_driver(&hdmi_cat6613_i2c_driver); + if (ret) + printk(KERN_ERR "%s: failed to add i2c driver\n", __func__); + + return ret; +} + +static void __exit hdmi_cat6613_exit(void) +{ + i2c_del_driver(&hdmi_cat6613_i2c_driver); +} + +module_init(hdmi_cat6613_init); +module_exit(hdmi_cat6613_exit); + +EXPORT_SYMBOL(aspeed_hdmi_get_info); +EXPORT_SYMBOL(aspeed_hdmi_enable); + +MODULE_AUTHOR("Ryan Chen <jsho@aspeed-tech.com>"); +MODULE_DESCRIPTION("CAT6023 HDMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9d285f6ae1db..57021458abf4 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -233,6 +233,12 @@ config ORION5X_WATCHDOG To compile this driver as a module, choose M here: the module will be called orion5x_wdt. +config AST_WATCHDOG + tristate "ASPEED GUC watchdog" + depends on WATCHDOG + help + Watchdog timer for ASPEED chips. + # ARM26 Architecture # AVR32 Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index e352bbb7630b..ba47642109d5 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o +obj-$(CONFIG_AST_WATCHDOG) += ast_wdt.o # ARM26 Architecture diff --git a/drivers/watchdog/ast_wdt.c b/drivers/watchdog/ast_wdt.c new file mode 100644 index 000000000000..845f1db3d66d --- /dev/null +++ b/drivers/watchdog/ast_wdt.c @@ -0,0 +1,519 @@ +/******************************************************************************** +* File Name : ast_wdt +* +* Copyright (C) 2012-2020 ASPEED Technology Inc. +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by the Free Software Foundation; +* either version 2 of the License, or (at your option) any later version. +* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +* without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +********************************************************************************/ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/miscdevice.h> +#include <linux/watchdog.h> +#include <linux/fs.h> +#include <linux/notifier.h> +#include <linux/reboot.h> +#include <linux/init.h> +#include <linux/semaphore.h> +#include <asm/uaccess.h> + +#include <linux/platform_device.h> +#include <asm/io.h> + +#ifdef CONFIG_COLDFIRE +#include <asm/arch/irqs.h> +#include <asm/arch/ast_wdt.h> +#include <asm/arch/platform.h> +#else +#include <mach/irqs.h> +#include <mach/ast_wdt.h> +#include <mach/platform.h> +#endif + +#define TICKS_PER_uSEC 1 + + +typedef unsigned char bool_T; + +#ifdef TRUE +#undef TRUE +#endif + +#ifdef FALSE +#undef FALSE +#endif + +#define TRUE 1 +#define FALSE 0 + +#if defined(CONFIG_COLDFIRE) +#define WDT_BASE_VA AST_WDT_BASE + +#else +#define WDT_BASE_VA (IO_ADDRESS(AST_WDT_BASE)) +#endif + +#define WDT_CntSts (WDT_BASE_VA+0x00) +#define WDT_Reload (WDT_BASE_VA+0x04) +#define WDT_Restart (WDT_BASE_VA+0x08) +#define WDT_Ctrl (WDT_BASE_VA+0x0C) +#define WDT_TimeOut (WDT_BASE_VA+0x10) +#define WDT_Clr (WDT_BASE_VA+0x14) +#define WDT_RstWd (WDT_BASE_VA+0x18) + + +#define AST_READ_REG(r) (*((volatile unsigned int *) (r))) +#define AST_WRITE_REG(r,v) (*((volatile unsigned int *) (r)) = ((unsigned int) (v))) + + +#define WDT_CLK_SRC_EXT 0 +#define WDT_CLK_SRC_PCLK 1 + +//Global Variables +#define WD_TIMO 6 /* Default heartbeat = 6 seconds */ + +static int heartbeat = WD_TIMO; +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0<heartbeat<65536, default=" __MODULE_STRING(WD_TIMO) ")"); + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); + +static unsigned long wdt_is_open; +static char expect_close; + +//Function Declaration +int __init wdt_init(void); + +static irqreturn_t wdt_isr(int irq, void *devid, struct pt_regs *regs) +{ + /* clear timeout */ + AST_WRITE_REG(WDT_Clr, 1); + + return (IRQ_HANDLED); +} + +void wdt_disable(void) +{ + register unsigned int regVal; + + /* reset WDT_Ctrl[0] as 0 */ + regVal = AST_READ_REG(WDT_Ctrl); + regVal &= 0xFFFFFFFE; + AST_WRITE_REG(WDT_Ctrl, regVal); +} + +void wdt_sel_clk_src(unsigned char sourceClk) +{ + register unsigned int regVal; + + regVal = AST_READ_REG(WDT_Ctrl); + if (sourceClk == WDT_CLK_SRC_PCLK) + { + /* reset WDT_Ctrl[4] as 0 */ + regVal &= 0xFFFFFFEF; + } + else + { + /* set WDT_Ctrl[4] as 1 */ + regVal |= 0x00000010; + } + AST_WRITE_REG(WDT_Ctrl, regVal); +} + +void wdt_set_timeout_action(bool_T bResetOut, bool_T bIntrSys, bool_T bResetSys) +{ + register unsigned int regVal; + + regVal = AST_READ_REG(WDT_Ctrl); + + if (bResetOut) + { + /* set WDT_Ctrl[3] = 1 */ + regVal |= 0x00000008; + } + else + { + /* reset WDT_Ctrl[3] = 0 */ + regVal &= 0xFFFFFFF7; + } + + if (bIntrSys) + { + /* set WDT_Ctrl[2] = 1 */ + regVal |= 0x00000004; + } + else + { + /* reset WDT_Ctrl[2] = 0 */ + regVal &= 0xFFFFFFFB; + } + + if (bResetSys) + { + /* set WDT_Ctrl[1] = 1 */ + regVal |= 0x00000002; + } + else + { + /* reset WDT_Ctrl[1] = 0 */ + regVal &= 0xFFFFFFFD; + } + + AST_WRITE_REG(WDT_Ctrl, regVal); +} + +void wdt_enable(void) +{ + register unsigned int regVal; + + /* set WDT_Ctrl[0] as 1 */ + regVal = AST_READ_REG(WDT_Ctrl); + regVal |= 1; + AST_WRITE_REG(WDT_Ctrl, regVal); +} + +void wdt_restart_new(unsigned int nPeriod, int sourceClk, bool_T bResetOut, bool_T bIntrSys, bool_T bResetSys, bool_T bUpdated) +{ + wdt_disable(); + + AST_WRITE_REG(WDT_Reload, nPeriod); + + wdt_sel_clk_src(sourceClk); + + wdt_set_timeout_action(bResetOut, bIntrSys, bResetSys); + + AST_WRITE_REG(WDT_Restart, 0x4755); /* reload! */ + + if (!bUpdated) + wdt_enable(); +} + +void wdt_restart(void) +{ + wdt_disable(); + AST_WRITE_REG(WDT_Restart, 0x4755); /* reload! */ + wdt_enable(); +} + + +/** + * wdt_set_heartbeat: + * @t: the new heartbeat value that needs to be set. + * + * Set a new heartbeat value for the watchdog device. If the heartbeat value is + * incorrect we keep the old value and return -EINVAL. If successfull we + * return 0. + */ +static int wdt_set_heartbeat(int t) +{ + if ((t < 1) || (t > 1000)) + return -EINVAL; + + heartbeat=t; + + wdt_restart_new(TICKS_PER_uSEC*1000000*t, WDT_CLK_SRC_EXT, FALSE, TRUE, FALSE, FALSE); + return 0; +} + +/* + Kernel Interfaces +*/ + +/** + * ast_wdt_write: + * @file: file handle to the watchdog + * @buf: buffer to write (unused as data does not matter here + * @count: count of bytes + * @ppos: pointer to the position to write. No seeks allowed + * + * A write to a watchdog device is defined as a keepalive signal. Any + * write of data will do, as we we don't define content meaning. + */ + + static ssize_t ast_wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) + { + if(count) + { + if (!nowayout) + { + size_t i; + + /* In case it was set long ago */ + expect_close = 0; + + for (i = 0; i != count; i++) + { + char c; + if (get_user(c, buf + i)) + return -EFAULT; + if (c == 'V') + expect_close = 42; + } + } + wdt_restart(); + } + return count; + } + +/** + * ast_wdt_ioctl: + * @inode: inode of the device + * @file: file handle to the device + * @cmd: watchdog command + * @arg: argument pointer + * * The watchdog API defines a common set of functions for all watchdogs + * according to their available features. We only actually usefully support + * querying capabilities and current status. + */ + +static int ast_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int __user *p = argp; + int new_heartbeat; + + static struct watchdog_info ident = + { + .options = WDIOF_SETTIMEOUT| + WDIOF_MAGICCLOSE| + WDIOF_KEEPALIVEPING, + .firmware_version = 1, + .identity = "AST WDT", + }; + + switch(cmd) + { + default: + return -ENOIOCTLCMD; + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &ident, sizeof(ident))?-EFAULT:0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + case WDIOC_KEEPALIVE: + wdt_restart(); + return 0; + case WDIOC_SETTIMEOUT: + if (get_user(new_heartbeat, p)) + return -EFAULT; + + if (wdt_set_heartbeat(new_heartbeat)) + return -EINVAL; + + /* Fall */ + case WDIOC_GETTIMEOUT: + return put_user(heartbeat, p); + } +} +/** +* ast_wdt_open: +* @inode: inode of device +* @file: file handle to device +* +* The watchdog device has been opened. The watchdog device is single +* open and on opening we load the counters. Counter zero is a 100Hz +* cascade, into counter 1 which downcounts to reboot. When the counter +* triggers counter 2 downcounts the length of the reset pulse which +* set set to be as long as possible. +*/ + +static int ast_wdt_open(struct inode *inode, struct file *file) +{ + if(test_and_set_bit(0, &wdt_is_open)) + return -EBUSY; + /* + * Activate + */ + // wdt_init(); + wdt_restart(); + return nonseekable_open(inode, file); +} + +/** +* ast_wdt_release: +* @inode: inode to board +* @file: file handle to board +* +* The watchdog has a configurable API. There is a religious dispute +* between people who want their watchdog to be able to shut down and +* those who want to be sure if the watchdog manager dies the machine +* reboots. In the former case we disable the counters, in the latter +* case you have to open it again very soon. +*/ + +static int ast_wdt_release(struct inode *inode, struct file *file) +{ + if (expect_close == 42 || !nowayout) + { + wdt_disable(); + clear_bit(0, &wdt_is_open); + } + else + { + printk(KERN_CRIT "wdt: WDT device closed unexpectedly. WDT will not stop!\n"); + wdt_restart(); + } + expect_close = 0; + return 0; +} + +/** +* notify_sys: +* @this: our notifier block +* @code: the event being reported +* @unused: unused +* +* Our notifier is called on system shutdowns. We want to turn the card +* off at reboot otherwise the machine will reboot again during memory +* test or worse yet during the following fsck. This would suck, in fact +* trust me - if it happens it does suck. +*/ + +static int ast_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) +{ + if(code==SYS_DOWN || code==SYS_HALT) + { + /* Turn the WDT off */ + wdt_disable(); + } + return NOTIFY_DONE; +} + +extern void ast_soc_wdt_reset(void) +{ + writel(0x10 , WDT_BASE_VA+0x04); + writel(0x4755, WDT_BASE_VA+0x08); + writel(0x3, WDT_BASE_VA+0x0c); +} + +EXPORT_SYMBOL(ast_soc_wdt_reset); + +static struct file_operations ast_wdt_fops = +{ + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = ast_wdt_write, + .ioctl = ast_wdt_ioctl, + .open = ast_wdt_open, + .release = ast_wdt_release, +}; + +static struct miscdevice ast_wdt_miscdev = +{ + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &ast_wdt_fops, +}; + +static struct notifier_block ast_wdt_notifier = +{ + .notifier_call=ast_wdt_notify_sys, +}; + +static int ast_wdt_probe(struct platform_device *pdev) +{ + int ret; + + wdt_disable(); + wdt_sel_clk_src(WDT_CLK_SRC_EXT); + wdt_set_timeout_action(FALSE, FALSE, FALSE); + + /* register ISR */ + if (request_irq(IRQ_WDT, (void *)wdt_isr, IRQF_DISABLED, "WDT", NULL)) + { + printk("unable to register interrupt INT_WDT = %d\n", IRQ_WDT); + return (-1); + } + else + printk("success to register interrupt for INT_WDT (%d)\n", IRQ_WDT); + + ret = register_reboot_notifier(&ast_wdt_notifier); + if(ret) + { + printk(KERN_ERR "wdt: cannot register reboot notifier (err=%d)\n", ret); + free_irq(IRQ_WDT, NULL); + return ret; + } + + ret = misc_register(&ast_wdt_miscdev); + if (ret) + { + printk(KERN_ERR "wdt: cannot register miscdev on minor=%d (err=%d)\n",WATCHDOG_MINOR, ret); + unregister_reboot_notifier(&ast_wdt_notifier); + return ret; + } + + /* interrupt the system while WDT timeout */ + wdt_restart_new(TICKS_PER_uSEC*1000000*heartbeat, WDT_CLK_SRC_EXT, FALSE, TRUE, FALSE, TRUE); + + printk(KERN_INFO "AST WDT is installed.(irq = %d, heartbeat = %d secs, nowayout = %d)\n",IRQ_WDT,heartbeat,nowayout); + + return (0); +} + +static int ast_wdt_remove(struct platform_device *dev) +{ + misc_deregister(&ast_wdt_miscdev); + disable_irq(IRQ_WDT); + free_irq(IRQ_WDT, NULL); + return 0; +} + +static void ast_wdt_shutdown(struct platform_device *dev) +{ + wdt_disable(); +} + +static struct platform_driver ast_wdt_driver = { + .probe = ast_wdt_probe, + .remove = ast_wdt_remove, + .shutdown = ast_wdt_shutdown, +#if 0 + .suspend = ast_wdt_suspend, + .resume = ast_wdt_resume, +#endif + .driver = { + .owner = THIS_MODULE, + .name = "ast-wdt", + }, +}; + +static char banner[] __initdata = KERN_INFO "ASPEED Watchdog Timer, ASPEED Technology Inc.\n"; + +static int __init watchdog_init(void) +{ + printk(banner); + + return platform_driver_register(&ast_wdt_driver); +} + +static void __exit watchdog_exit(void) +{ + platform_driver_unregister(&ast_wdt_driver); +} + +module_init(watchdog_init); +module_exit(watchdog_exit); + +MODULE_DESCRIPTION("Driver for AST Watch Dog"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); +MODULE_LICENSE("GPL"); |