summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/build.mk1
-rw-r--r--common/cbi.c34
-rw-r--r--common/charge_ramp.c32
-rw-r--r--common/crc.c2
-rw-r--r--common/i2c_master.c86
-rw-r--r--common/mkbp_event.c290
-rw-r--r--common/motion_sense.c157
-rw-r--r--common/panic_output.c2
-rw-r--r--common/timer.c4
-rw-r--r--common/usb_pd_protocol.c17
10 files changed, 469 insertions, 156 deletions
diff --git a/common/build.mk b/common/build.mk
index 3b25e0bc99..5a10c91bb5 100644
--- a/common/build.mk
+++ b/common/build.mk
@@ -11,6 +11,7 @@ common-y+=version.o printf.o queue.o queue_policies.o
common-$(CONFIG_ACCELGYRO_BMA255)+=math_util.o
common-$(CONFIG_ACCELGYRO_BMI160)+=math_util.o
+common-$(CONFIG_ACCELGYRO_ICM426XX)+=math_util.o
common-$(CONFIG_ACCELGYRO_LSM6DS0)+=math_util.o
common-$(CONFIG_ACCELGYRO_LSM6DSM)+=math_util.o
common-$(CONFIG_ACCEL_LIS2DH)+=math_util.o
diff --git a/common/cbi.c b/common/cbi.c
index f37d6950b1..a6dd13443d 100644
--- a/common/cbi.c
+++ b/common/cbi.c
@@ -253,6 +253,37 @@ int cbi_get_oem_id(uint32_t *id)
return cbi_get_board_info(CBI_TAG_OEM_ID, (uint8_t *)id, &size);
}
+int cbi_get_model_id(uint32_t *id)
+{
+ uint8_t size = sizeof(*id);
+
+ return cbi_get_board_info(CBI_TAG_MODEL_ID, (uint8_t *)id, &size);
+}
+
+int cbi_get_fw_config(uint32_t *fw_config)
+{
+ uint8_t size = sizeof(*fw_config);
+
+ return cbi_get_board_info(CBI_TAG_FW_CONFIG, (uint8_t *)fw_config,
+ &size);
+}
+
+int cbi_get_ssfc(uint32_t *ssfc)
+{
+ uint8_t size = sizeof(*ssfc);
+
+ return cbi_get_board_info(CBI_TAG_SSFC, (uint8_t *)ssfc,
+ &size);
+}
+
+int cbi_get_pcb_supplier(uint32_t *pcb_supplier)
+{
+ uint8_t size = sizeof(*pcb_supplier);
+
+ return cbi_get_board_info(CBI_TAG_PCB_SUPPLIER, (uint8_t *)pcb_supplier,
+ &size);
+}
+
static int hc_cbi_get(struct host_cmd_handler_args *args)
{
const struct __ec_align4 ec_params_get_cbi *p = args->params;
@@ -368,6 +399,9 @@ static void dump_cbi(void)
print_tag("BOARD_VERSION", cbi_get_board_version(&val), &val);
print_tag("OEM_ID", cbi_get_oem_id(&val), &val);
print_tag("SKU_ID", cbi_get_sku_id(&val), &val);
+ print_tag("FW_CONFIG", cbi_get_fw_config(&val), &val);
+ print_tag("PCB_SUPPLIER", cbi_get_pcb_supplier(&val), &val);
+ print_tag("SSFC", cbi_get_ssfc(&val), &val);
}
static int cc_cbi(int argc, char **argv)
diff --git a/common/charge_ramp.c b/common/charge_ramp.c
index 86a0b454cc..3ca5bd89bb 100644
--- a/common/charge_ramp.c
+++ b/common/charge_ramp.c
@@ -11,17 +11,22 @@
#include "usb_charge.h"
#include "util.h"
-#define TYPEC_DTS_RAMP_MAX 2400
-
test_mockable int chg_ramp_allowed(int supplier)
{
/* Don't allow ramping in RO when write protected. */
if (!system_is_in_rw() && system_is_locked())
return 0;
- /* Ramp DTS suppliers. */
- if (supplier == CHARGE_SUPPLIER_TYPEC_DTS)
+ switch (supplier) {
+ case CHARGE_SUPPLIER_TYPEC_DTS:
+#ifdef CONFIG_CHARGE_RAMP_HW
+ /* Need ramping for USB-C chargers as well to avoid voltage droops. */
+ case CHARGE_SUPPLIER_PD:
+ case CHARGE_SUPPLIER_TYPEC:
+#endif
return 1;
+ /* default: fall through */
+ }
/* Othewise ask the BC1.2 detect module */
return usb_charger_ramp_allowed(supplier);
@@ -29,12 +34,19 @@ test_mockable int chg_ramp_allowed(int supplier)
test_mockable int chg_ramp_max(int supplier, int sup_curr)
{
- /*
- * Ramp DTS suppliers to advertised current or predetermined
- * limit, whichever is greater.
- */
- if (supplier == CHARGE_SUPPLIER_TYPEC_DTS)
- return MAX(TYPEC_DTS_RAMP_MAX, sup_curr);
+ switch (supplier) {
+#ifdef CONFIG_CHARGE_RAMP_HW
+ case CHARGE_SUPPLIER_PD:
+ case CHARGE_SUPPLIER_TYPEC:
+#endif
+ case CHARGE_SUPPLIER_TYPEC_DTS:
+ /*
+ * We should not ramp DTS beyond what they advertise, otherwise
+ * we may brownout the systems they are connected to.
+ */
+ return sup_curr;
+ /* default: fall through */
+ }
/* Otherwise ask the BC1.2 detect module */
return usb_charger_ramp_max(supplier, sup_curr);
diff --git a/common/crc.c b/common/crc.c
index 79d405eb13..a715a6d366 100644
--- a/common/crc.c
+++ b/common/crc.c
@@ -60,7 +60,7 @@ static uint32_t crc32_hash(uint32_t crc, const void *buf, int size)
{
const uint8_t *p;
- p = buf;
+ p = (const uint8_t *)buf;
while (size--) {
crc ^= *p++;
diff --git a/common/i2c_master.c b/common/i2c_master.c
index ec2cad96df..df9dbe57c2 100644
--- a/common/i2c_master.c
+++ b/common/i2c_master.c
@@ -304,6 +304,92 @@ int i2c_write8(int port, int slave_addr, int offset, int data)
return i2c_xfer(port, slave_addr, buf, 2, 0, 0);
}
+int i2c_update8(const int port,
+ const uint16_t slave_addr_flags,
+ const int offset,
+ const uint8_t mask,
+ const enum mask_update_action action)
+{
+ int rv;
+ int val, oldval;
+
+ rv = i2c_read8(port, slave_addr_flags, offset, &oldval);
+ if (rv)
+ return rv;
+
+ val = (action == MASK_SET) ? oldval | mask
+ : oldval & ~mask;
+
+ if (val != oldval)
+ return i2c_write8(port, slave_addr_flags, offset, val);
+
+ return EC_SUCCESS;
+}
+
+int i2c_update16(const int port,
+ const uint16_t slave_addr_flags,
+ const int offset,
+ const uint16_t mask,
+ const enum mask_update_action action)
+{
+ int rv;
+ int val, oldval;
+
+ rv = i2c_read16(port, slave_addr_flags, offset, &oldval);
+ if (rv)
+ return rv;
+
+ val = (action == MASK_SET) ? oldval | mask
+ : oldval & ~mask;
+
+ if (val != oldval)
+ return i2c_write16(port, slave_addr_flags, offset, val);
+
+ return EC_SUCCESS;
+}
+
+int i2c_field_update8(const int port,
+ const uint16_t slave_addr_flags,
+ const int offset,
+ const uint8_t field_mask,
+ const uint8_t set_value)
+{
+ int rv;
+ int val, oldval;
+
+ rv = i2c_read8(port, slave_addr_flags, offset, &oldval);
+ if (rv)
+ return rv;
+
+ val = (oldval & (~field_mask)) | set_value;
+
+ if (val != oldval)
+ return i2c_write8(port, slave_addr_flags, offset, val);
+
+ return EC_SUCCESS;
+}
+
+int i2c_field_update16(const int port,
+ const uint16_t slave_addr_flags,
+ const int offset,
+ const uint16_t field_mask,
+ const uint16_t set_value)
+{
+ int rv;
+ int val, oldval;
+
+ rv = i2c_read16(port, slave_addr_flags, offset, &oldval);
+ if (rv)
+ return rv;
+
+ val = (oldval & (~field_mask)) | set_value;
+
+ if (val != oldval)
+ return i2c_write16(port, slave_addr_flags, offset, val);
+
+ return EC_SUCCESS;
+}
+
int i2c_read_string(int port, int slave_addr, int offset, uint8_t *data,
int len)
{
diff --git a/common/mkbp_event.c b/common/mkbp_event.c
index 44cec336b5..18843ef6be 100644
--- a/common/mkbp_event.c
+++ b/common/mkbp_event.c
@@ -10,67 +10,125 @@
#include "gpio.h"
#include "host_command.h"
#include "hwtimer.h"
+#include "timer.h"
#include "link_defs.h"
#include "mkbp_event.h"
#include "power.h"
#include "util.h"
-static uint32_t events;
+#define CPUTS(outstr) cputs(CC_COMMAND, outstr)
+#define CPRINTS(format, args...) cprints(CC_COMMAND, format, ## args)
+#define CPRINTF(format, args...) cprintf(CC_COMMAND, format, ## args)
+
+/*
+ * Tracks the current state of the MKBP interrupt send from the EC to the AP.
+ *
+ * The inactive state is only valid when there are no events to set to the AP.
+ * If the AP is asleep, then some events are not worth waking the AP up, so the
+ * interrupt could remain in an inactive in that case.
+ *
+ * The transition state (INTERRUPT_INACTIVE_TO_ACTIVE) is used to track the
+ * sometimes lock transition for a "rising edge" for platforms that send the
+ * rising edge interrupt through a host communication layer
+ *
+ * The active state represents that a rising edge interrupt has already been
+ * sent to the AP, and the EC is waiting for the AP to call get next event
+ * host command to consume all of the events (at which point the state will
+ * move to inactive).
+ *
+ * The transition from ACTIVE -> INACTIVE is considerer to be simple meaning
+ * the operation can be performed within a blocking mutex (e.g. no-op or setting
+ * a gpio).
+ */
+enum interrupt_state {
+ INTERRUPT_INACTIVE,
+ INTERRUPT_INACTIVE_TO_ACTIVE, /* Transitioning */
+ INTERRUPT_ACTIVE,
+};
+
+struct mkbp_state {
+ struct mutex lock;
+ uint32_t events;
+ enum interrupt_state interrupt;
+ /*
+ * Tracks unique transitions to INTERRUPT_INACTIVE_TO_ACTIVE allowing
+ * only the most recent transition to finish the transition to a final
+ * state -- either active or inactive depending on the result of the
+ * operation.
+ */
+ uint8_t interrupt_id;
+ /*
+ * Tracks the number of consecutive failed attempts for the AP to poll
+ * get_next_events in order to limit the retry logic.
+ */
+ uint8_t failed_attempts;
+};
+
+static struct mkbp_state state;
uint32_t mkbp_last_event_time;
-static void set_event(uint8_t event_type)
+#ifdef CONFIG_MKBP_USE_GPIO
+static int mkbp_set_host_active_via_gpio(int active, uint32_t *timestamp)
{
- atomic_or(&events, 1 << event_type);
-}
+ /*
+ * If we want to take a timestamp, then disable interrupts temporarily
+ * to ensure that the timestamp is as close as possible to the setting
+ * of the GPIO pin in hardware (i.e. we aren't interrupted between
+ * taking the timestamp and setting the gpio)
+ */
+ if (timestamp) {
+ interrupt_disable();
+ *timestamp = __hw_clock_source_read();
+ }
-static void clear_event(uint8_t event_type)
-{
- atomic_clear(&events, 1 << event_type);
-}
+ gpio_set_level(GPIO_EC_INT_L, !active);
-static int event_is_set(uint8_t event_type)
-{
- return events & (1 << event_type);
-}
+ if (timestamp)
+ interrupt_enable();
-#ifndef CONFIG_MKBP_USE_HOST_EVENT
-void mkbp_set_host_active_via_gpio(int active)
-{
- gpio_set_level(GPIO_EC_INT_L, !active);
+ return EC_SUCCESS;
}
#endif
-void mkbp_set_host_active_via_event(int active)
+#ifdef CONFIG_MKBP_USE_HOST_EVENT
+static int mkbp_set_host_active_via_event(int active, uint32_t *timestamp)
{
+ /* This should be moved into host_set_single_event for more accuracy */
+ if (timestamp)
+ *timestamp = __hw_clock_source_read();
if (active)
host_set_single_event(EC_HOST_EVENT_MKBP);
+ return EC_SUCCESS;
}
-
-__attribute__((weak)) void mkbp_set_host_active(int active)
-{
-#ifdef CONFIG_MKBP_USE_HOST_EVENT
- mkbp_set_host_active_via_event(active);
-#else
- mkbp_set_host_active_via_gpio(active);
#endif
-}
-/**
- * Assert host keyboard interrupt line.
+/*
+ * This communicates to the AP whether an MKBP event is currently available
+ * for processing.
+ *
+ * NOTE: When active is 0 this function CANNOT de-schedule. It must be very
+ * simple like toggling a GPIO or no-op
+ *
+ * @param active 1 if there is an event, 0 otherwise
+ * @param timestamp, if non-null this variable will be written as close to the
+ * hardware interrupt from EC->AP as possible.
*/
-static void set_host_interrupt(int active)
+static int mkbp_set_host_active(int active, uint32_t *timestamp)
{
- static int old_active;
-
- interrupt_disable();
-
- if (old_active == 0 && active == 1)
- mkbp_last_event_time = __hw_clock_source_read();
-
- mkbp_set_host_active(active);
-
- old_active = active;
- interrupt_enable();
+#if defined(CONFIG_MKBP_USE_CUSTOM)
+ /*
+ * TODO change mkbp_set_host_active_via_custom declaration. Done in
+ * child CL to decouple changes
+ */
+ if (timestamp)
+ *timestamp = __hw_clock_source_read();
+ mkbp_set_host_active_via_custom(active);
+ return EC_SUCCESS;
+#elif defined(CONFIG_MKBP_USE_HOST_EVENT)
+ return mkbp_set_host_active_via_event(active, timestamp);
+#elif defined(CONFIG_MKBP_USE_GPIO)
+ return mkbp_set_host_active_via_gpio(active, timestamp);
+#endif
}
#ifdef CONFIG_MKBP_WAKEUP_MASK
@@ -92,24 +150,130 @@ static inline int host_is_sleeping(void)
}
#endif /* CONFIG_MKBP_WAKEUP_MASK */
-int mkbp_send_event(uint8_t event_type)
+/*
+ * This is the deferred function that ensures that we attempt to set the MKBP
+ * interrupt again if there was a failure in the system (EC or AP) and the AP
+ * never called get_next_event.
+ */
+static void force_mkbp_if_events(void);
+DECLARE_DEFERRED(force_mkbp_if_events);
+
+static void activate_mkbp_with_events(uint32_t events_to_add)
{
- set_event(event_type);
+ int interrupt_id = -1;
+ int skip_interrupt = 0;
+ int rv, schedule_deferred = 0;
#ifdef CONFIG_MKBP_WAKEUP_MASK
/* Only assert interrupt for wake events if host is sleeping */
- if (host_is_sleeping()) {
- /* Skip host wake if this isn't a wake event */
- if (!(host_get_events() & CONFIG_MKBP_WAKEUP_MASK) &&
- event_type != EC_MKBP_EVENT_KEY_MATRIX)
- return 0;
- }
+ skip_interrupt = host_is_sleeping() &&
+ !(host_get_events() & CONFIG_MKBP_WAKEUP_MASK);
#endif
- set_host_interrupt(1);
+ mutex_lock(&state.lock);
+ state.events |= events_to_add;
+
+ /* To skip the interrupt, we cannot have the EC_MKBP_EVENT_KEY_MATRIX */
+ skip_interrupt = skip_interrupt &&
+ !(state.events & (1 << EC_MKBP_EVENT_KEY_MATRIX));
+
+ if (state.events && state.interrupt == INTERRUPT_INACTIVE &&
+ !skip_interrupt) {
+ state.interrupt = INTERRUPT_INACTIVE_TO_ACTIVE;
+ interrupt_id = ++state.interrupt_id;
+ }
+ mutex_unlock(&state.lock);
+
+ /* If we don't need to send an interrupt we are done */
+ if (interrupt_id < 0)
+ return;
+
+ /* Send a rising edge MKBP interrupt */
+ rv = mkbp_set_host_active(1, &mkbp_last_event_time);
+
+ /*
+ * If this was the last interrupt to the AP, update state;
+ * otherwise the latest interrupt should update state.
+ */
+ mutex_lock(&state.lock);
+ if (state.interrupt == INTERRUPT_INACTIVE_TO_ACTIVE &&
+ interrupt_id == state.interrupt_id) {
+ schedule_deferred = 1;
+ state.interrupt = rv == EC_SUCCESS ? INTERRUPT_ACTIVE
+ : INTERRUPT_INACTIVE;
+ }
+ mutex_unlock(&state.lock);
+
+ if (schedule_deferred) {
+ hook_call_deferred(&force_mkbp_if_events_data, SECOND);
+ if (rv != EC_SUCCESS)
+ CPRINTS("Could not activate MKBP (%d). Deferring", rv);
+ }
+}
+
+/*
+ * This is the deferred function that ensures that we attempt to set the MKBP
+ * interrupt again if there was a failure in the system (EC or AP) and the AP
+ * never called get_next_event.
+ */
+static void force_mkbp_if_events(void)
+{
+ int toggled = 0;
+
+ mutex_lock(&state.lock);
+ if (state.interrupt == INTERRUPT_ACTIVE) {
+ if (++state.failed_attempts < 3) {
+ state.interrupt = INTERRUPT_INACTIVE;
+ toggled = 1;
+ }
+ }
+ mutex_unlock(&state.lock);
+
+ if (toggled)
+ CPRINTS("MKBP not cleared within threshold, toggling.");
+
+ activate_mkbp_with_events(0);
+}
+
+int mkbp_send_event(uint8_t event_type)
+{
+ activate_mkbp_with_events(1 << event_type);
+
return 1;
}
+static int set_inactive_if_no_events(void)
+{
+ int interrupt_cleared;
+
+ mutex_lock(&state.lock);
+ interrupt_cleared = !state.events;
+ if (interrupt_cleared) {
+ state.interrupt = INTERRUPT_INACTIVE;
+ state.failed_attempts = 0;
+ /* Only simple tasks (i.e. gpio set or no-op) allowed here */
+ mkbp_set_host_active(0, NULL);
+ }
+ mutex_unlock(&state.lock);
+
+ /* Cancel our safety net since the events were cleared. */
+ if (interrupt_cleared)
+ hook_call_deferred(&force_mkbp_if_events_data, -1);
+
+ return interrupt_cleared;
+}
+
+/* This can only be called when the state.lock mutex is held */
+static int take_event_if_set(uint8_t event_type)
+{
+ int taken;
+
+ taken = state.events & (1 << event_type);
+ state.events &= ~(1 << event_type);
+
+ return taken;
+}
+
static int mkbp_get_next_event(struct host_cmd_handler_args *args)
{
static int last;
@@ -122,24 +286,22 @@ static int mkbp_get_next_event(struct host_cmd_handler_args *args)
* Find the next event to service. We do this in a round-robin
* way to make sure no event gets starved.
*/
+ mutex_lock(&state.lock);
for (i = 0; i < EC_MKBP_EVENT_COUNT; ++i)
- if (event_is_set((last + i) % EC_MKBP_EVENT_COUNT))
+ if (take_event_if_set((last + i) % EC_MKBP_EVENT_COUNT))
break;
+ mutex_unlock(&state.lock);
if (i == EC_MKBP_EVENT_COUNT) {
- set_host_interrupt(0);
- return EC_RES_UNAVAILABLE;
+ if (set_inactive_if_no_events())
+ return EC_RES_UNAVAILABLE;
+ /* An event was set just now, restart loop. */
+ continue;
}
evt = (i + last) % EC_MKBP_EVENT_COUNT;
last = evt + 1;
- /*
- * Clear the event before retrieving the event data in case the
- * event source wants to send the same event.
- */
- clear_event(evt);
-
for (src = __mkbp_evt_srcs; src < __mkbp_evt_srcs_end; ++src)
if (src->event_type == evt)
break;
@@ -158,12 +320,16 @@ static int mkbp_get_next_event(struct host_cmd_handler_args *args)
* event first.
*/
data_size = src->get_data(resp + 1);
- if (data_size == -EC_ERROR_BUSY)
- set_event(evt);
+ if (data_size == -EC_ERROR_BUSY) {
+ mutex_lock(&state.lock);
+ state.events |= 1 << evt;
+ mutex_unlock(&state.lock);
+ }
} while (data_size == -EC_ERROR_BUSY);
- if (!events)
- set_host_interrupt(0);
+ /* If there are no more events and we support the "more" flag, set it */
+ if (!set_inactive_if_no_events() && args->version >= 2)
+ resp[0] |= EC_MKBP_HAS_MORE_EVENTS;
if (data_size < 0)
return EC_RES_ERROR;
@@ -173,7 +339,7 @@ static int mkbp_get_next_event(struct host_cmd_handler_args *args)
}
DECLARE_HOST_COMMAND(EC_CMD_GET_NEXT_EVENT,
mkbp_get_next_event,
- EC_VER_MASK(0) | EC_VER_MASK(1));
+ EC_VER_MASK(0) | EC_VER_MASK(1) | EC_VER_MASK(2));
#ifdef CONFIG_MKBP_WAKEUP_MASK
static int mkbp_get_wake_mask(struct host_cmd_handler_args *args)
diff --git a/common/motion_sense.c b/common/motion_sense.c
index 1023409f99..353e54030b 100644
--- a/common/motion_sense.c
+++ b/common/motion_sense.c
@@ -45,13 +45,8 @@ const intv3_t orientation_modes[] = {
};
#endif
-/*
- * Sampling interval for measuring acceleration and calculating lid angle.
- */
-test_export_static unsigned int motion_interval;
-
/* Delay between FIFO interruption. */
-static unsigned int motion_int_interval;
+static unsigned int ap_event_interval;
/* Minimum time in between running motion sense task loop. */
unsigned int motion_min_interval = CONFIG_MOTION_MIN_SENSE_WAIT_TIME * MSEC;
@@ -208,22 +203,19 @@ static inline int motion_sensor_in_forced_mode(
#endif
}
-
-
/* Minimal amount of time since last collection before triggering a new one */
static inline int motion_sensor_time_to_read(const timestamp_t *ts,
const struct motion_sensor_t *sensor)
{
- int rate_mhz = sensor->drv->get_data_rate(sensor);
-
- if (rate_mhz == 0)
+ if (sensor->collection_rate == 0)
return 0;
+
/*
- * converting from mHz to us.
- * If within 95% of the time, check sensor.
+ * If the time is within the min motion interval (3 ms) go ahead and
+ * read from the sensor
*/
return time_after(ts->le.lo,
- sensor->last_collection + SECOND * 950 / rate_mhz);
+ sensor->next_collection - motion_min_interval);
}
static enum sensor_config motion_sense_get_ec_config(void)
@@ -296,7 +288,9 @@ int motion_sense_set_data_rate(struct motion_sensor_t *sensor)
* Reset last collection: the last collection may be so much in the past
* it may appear to be in the future.
*/
- sensor->last_collection = ts.le.lo;
+ odr = sensor->drv->get_data_rate(sensor);
+ sensor->collection_rate = odr > 0 ? SECOND * 1000 / odr : 0;
+ sensor->next_collection = ts.le.lo + sensor->collection_rate;
sensor->oversampling = 0;
mutex_unlock(&g_sensor_mutex);
return 0;
@@ -402,9 +396,9 @@ static int motion_sense_ec_rate(struct motion_sensor_t *sensor)
*
* Note: Not static to be tested.
*/
-static int motion_sense_set_motion_intervals(void)
+static void motion_sense_set_motion_intervals(void)
{
- int i, sensor_ec_rate, ec_rate = 0, ec_int_rate = 0;
+ int i, sensor_ec_rate, ec_int_rate = 0;
struct motion_sensor_t *sensor;
for (i = 0; i < motion_sensor_count; ++i) {
sensor = &motion_sensors[i];
@@ -415,28 +409,20 @@ static int motion_sense_set_motion_intervals(void)
(sensor->drv->get_data_rate(sensor) == 0))
continue;
- sensor_ec_rate = motion_sense_ec_rate(sensor);
- if (sensor_ec_rate == 0)
- continue;
- if (ec_rate == 0 || sensor_ec_rate < ec_rate)
- ec_rate = sensor_ec_rate;
-
sensor_ec_rate = motion_sense_select_ec_rate(
sensor, SENSOR_CONFIG_AP, 1);
if (ec_int_rate == 0 ||
(sensor_ec_rate && sensor_ec_rate < ec_int_rate))
ec_int_rate = sensor_ec_rate;
}
- motion_interval = ec_rate;
- motion_int_interval =
+ ap_event_interval =
MAX(0, ec_int_rate - MOTION_SENSOR_INT_ADJUSTMENT_US);
/*
* Wake up the motion sense task: we want to sensor task to take
* in account the new period right away.
*/
task_wake(TASK_ID_MOTIONSENSE);
- return motion_interval;
}
static inline int motion_sense_init(struct motion_sensor_t *sensor)
@@ -692,11 +678,8 @@ static inline void update_sense_data(uint8_t *lpc_status, int *psample_id)
static int motion_sense_read(struct motion_sensor_t *sensor)
{
- if (sensor->state != SENSOR_INITIALIZED)
- return EC_ERROR_UNKNOWN;
-
- if (sensor->drv->get_data_rate(sensor) == 0)
- return EC_ERROR_NOT_POWERED;
+ ASSERT(sensor->state == SENSOR_INITIALIZED);
+ ASSERT(sensor->drv->get_data_rate(sensor) != 0);
#ifdef CONFIG_ACCEL_SPOOF_MODE
/*
@@ -711,6 +694,30 @@ static int motion_sense_read(struct motion_sensor_t *sensor)
return sensor->drv->read(sensor, sensor->raw_xyz);
}
+
+static inline void increment_sensor_collection(struct motion_sensor_t *sensor,
+ const timestamp_t *ts)
+{
+ sensor->next_collection += sensor->collection_rate;
+
+ if (time_after(ts->le.lo, sensor->next_collection)) {
+ /*
+ * If we get here it means that we completely missed a sensor
+ * collection time and we attempt to recover by scheduling as
+ * soon as possible. This should not happen and if it does it
+ * means that the ec cannot handle the requested data rate.
+ */
+ int missed_events =
+ time_until(sensor->next_collection, ts->le.lo) /
+ sensor->collection_rate;
+
+ CPRINTS("%s Missed %d data collections at %u - rate: %d",
+ sensor->name, missed_events, sensor->next_collection,
+ sensor->collection_rate);
+ sensor->next_collection = ts->le.lo + motion_min_interval;
+ }
+}
+
static int motion_sense_process(struct motion_sensor_t *sensor,
uint32_t *event,
const timestamp_t *ts)
@@ -729,6 +736,13 @@ static int motion_sense_process(struct motion_sensor_t *sensor,
struct ec_response_motion_sensor_data vector;
int *v = sensor->raw_xyz;
+ /*
+ * Since motion_sense_read can sleep, other task may be
+ * scheduled. In particular if suspend is called by
+ * HOOKS task, it may set colleciton_rate to 0 and we
+ * would crash in increment_sensor_collection.
+ */
+ increment_sensor_collection(sensor, ts);
ret = motion_sense_read(sensor);
if (ret == EC_SUCCESS) {
vector.flags = 0;
@@ -743,7 +757,6 @@ static int motion_sense_process(struct motion_sensor_t *sensor,
motion_sense_fifo_add_data(&vector, sensor, 3,
__hw_clock_source_read());
}
- sensor->last_collection = ts->le.lo;
} else {
ret = EC_ERROR_BUSY;
}
@@ -760,8 +773,14 @@ static int motion_sense_process(struct motion_sensor_t *sensor,
if (motion_sensor_in_forced_mode(sensor)) {
if (motion_sensor_time_to_read(ts, sensor)) {
/* Get latest data for local calculation */
+ /*
+ * Since motion_sense_read can sleep, other task may be
+ * scheduled. In particular if suspend is called by
+ * HOOKS task, it may set colleciton_rate to 0 and we
+ * would crash in increment_sensor_collection.
+ */
+ increment_sensor_collection(sensor, ts);
ret = motion_sense_read(sensor);
- sensor->last_collection = ts->le.lo;
} else {
ret = EC_ERROR_BUSY;
}
@@ -897,6 +916,7 @@ void motion_sense_task(void *u)
{
int i, ret, wait_us;
timestamp_t ts_begin_task, ts_end_task;
+ int32_t time_diff;
uint32_t event = 0;
uint16_t ready_status;
struct motion_sensor_t *sensor;
@@ -970,7 +990,6 @@ void motion_sense_task(void *u)
update_sense_data(lpc_status, &sample_id);
#endif
- ts_end_task = get_time();
#ifdef CONFIG_ACCEL_FIFO
/*
* Ask the host to flush the queue if
@@ -981,14 +1000,14 @@ void motion_sense_task(void *u)
if (fifo_flush_needed || wake_up_needed ||
event & TASK_EVENT_MOTION_ODR_CHANGE ||
queue_space(&motion_sense_fifo) < CONFIG_ACCEL_FIFO_THRES ||
- (motion_int_interval > 0 &&
- time_after(ts_end_task.le.lo,
- ts_last_int.le.lo + motion_int_interval))) {
+ (ap_event_interval > 0 &&
+ time_after(ts_begin_task.le.lo,
+ ts_last_int.le.lo + ap_event_interval))) {
if (!fifo_flush_needed)
motion_sense_insert_timestamp(
__hw_clock_source_read());
fifo_flush_needed = 0;
- ts_last_int = ts_end_task;
+ ts_last_int = ts_begin_task;
/*
* Count the number of event the AP is allowed to
* collect.
@@ -1012,25 +1031,36 @@ void motion_sense_task(void *u)
#endif
}
#endif
- if (motion_interval > 0) {
- /*
- * Delay appropriately to keep sampling time
- * consistent.
- */
- wait_us = motion_interval -
- (ts_end_task.val - ts_begin_task.val);
- /* and it cannnot be negative */
- wait_us = MAX(wait_us, 0);
+ ts_end_task = get_time();
+ wait_us = -1;
+
+ for (i = 0; i < motion_sensor_count; i++) {
+ struct motion_sensor_t *sensor = &motion_sensors[i];
+
+ if (!motion_sensor_in_forced_mode(sensor) ||
+ sensor->collection_rate == 0)
+ continue;
+
+ time_diff = time_until(ts_end_task.le.lo,
+ sensor->next_collection);
+
+ /* We missed our collection time so wake soon */
+ if (time_diff <= 0) {
+ wait_us = 0;
+ break;
+ }
+
+ if (wait_us == -1 || wait_us > time_diff)
+ wait_us = time_diff;
+ }
+ if (wait_us >= 0 && wait_us < motion_min_interval) {
/*
- * Guarantee some minimum delay to allow other lower
- * priority tasks to run.
- */
- if (wait_us < motion_min_interval)
- wait_us = motion_min_interval;
- } else {
- wait_us = -1;
+ * Guarantee some minimum delay to allow other lower
+ * priority tasks to run.
+ */
+ wait_us = motion_min_interval;
}
event = task_wait_event(wait_us);
@@ -1633,8 +1663,7 @@ static int command_accel_data_rate(int argc, char **argv)
sensor->drv->get_data_rate(sensor));
ccprintf("EC rate for sensor %d: %d\n", id,
motion_sense_ec_rate(sensor));
- ccprintf("Current EC rate: %d\n", motion_interval);
- ccprintf("Current Interrupt rate: %d\n", motion_int_interval);
+ ccprintf("Current Interrupt rate: %d\n", ap_event_interval);
}
return EC_SUCCESS;
@@ -1710,7 +1739,6 @@ DECLARE_CONSOLE_COMMAND(accelinit, command_accel_init,
#ifdef CONFIG_CMD_ACCEL_INFO
static int command_display_accel_info(int argc, char **argv)
{
- char *e;
int val;
if (argc > 3)
@@ -1724,21 +1752,6 @@ static int command_display_accel_info(int argc, char **argv)
accel_disp = val;
}
- /*
- * Second arg changes the accel task time interval. Note accel
- * sampling interval will be clobbered when chipset suspends or
- * resumes.
- */
- if (argc > 2) {
- val = strtoi(argv[2], &e, 0);
- if (*e)
- return EC_ERROR_PARAM2;
-
- motion_interval = val * MSEC;
- task_wake(TASK_ID_MOTIONSENSE);
-
- }
-
return EC_SUCCESS;
}
DECLARE_CONSOLE_COMMAND(accelinfo, command_display_accel_info,
diff --git a/common/panic_output.c b/common/panic_output.c
index e6b48a375d..fd1b1999a0 100644
--- a/common/panic_output.c
+++ b/common/panic_output.c
@@ -98,7 +98,7 @@ void panic_assert_fail(const char *msg, const char *func, const char *fname,
panic_printf("\nASSERTION FAILURE '%s' in %s() at %s:%d\n",
msg, func, fname, linenum);
#ifdef CONFIG_SOFTWARE_PANIC
- software_panic(PANIC_SW_ASSERT, linenum);
+ panic_assert(func, fname, (uint16_t)linenum);
#else
panic_reboot();
#endif
diff --git a/common/timer.c b/common/timer.c
index 117cea4b71..6f31d93b09 100644
--- a/common/timer.c
+++ b/common/timer.c
@@ -94,8 +94,7 @@ void process_timers(int overflow)
} while (next.val <= get_time().val);
}
-#ifndef CONFIG_HW_SPECIFIC_UDELAY
-void udelay(unsigned us)
+__overridable void udelay(unsigned us)
{
unsigned t0 = __hw_clock_source_read();
@@ -112,7 +111,6 @@ void udelay(unsigned us)
while (__hw_clock_source_read() - t0 <= us)
;
}
-#endif
int timer_arm(timestamp_t tstamp, task_id_t tskid)
{
diff --git a/common/usb_pd_protocol.c b/common/usb_pd_protocol.c
index a75baa5c14..6c71c155cc 100644
--- a/common/usb_pd_protocol.c
+++ b/common/usb_pd_protocol.c
@@ -2577,8 +2577,8 @@ void pd_interrupt_handler_task(void *p)
const int port_mask = (PD_STATUS_TCPC_ALERT_0 << port);
struct {
int count;
- uint32_t time;
- } storm_tracker[CONFIG_USB_PD_PORT_COUNT] = { 0 };
+ timestamp_t time;
+ } storm_tracker[CONFIG_USB_PD_PORT_COUNT] = {};
ASSERT(port >= 0 && port < CONFIG_USB_PD_PORT_COUNT);
@@ -2601,14 +2601,17 @@ void pd_interrupt_handler_task(void *p)
*/
while ((tcpc_get_alert_status() & port_mask) &&
pd_is_port_enabled(port)) {
- uint32_t now;
+ timestamp_t now;
tcpc_alert(port);
- now = get_time().le.lo;
- if (time_after(now, storm_tracker[port].time)) {
- storm_tracker[port].time =
- now + ALERT_STORM_INTERVAL;
+ now = get_time();
+ if (timestamp_expired(
+ storm_tracker[port].time, &now)) {
+ /* Reset timer into future */
+ storm_tracker[port].time.val =
+ now.val + ALERT_STORM_INTERVAL;
+
/*
* Start at 1 since we are processing
* an interrupt now