summaryrefslogtreecommitdiff
path: root/drivers/net/ipa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/gsi.c499
-rw-r--r--drivers/net/ipa/gsi.h52
-rw-r--r--drivers/net/ipa/gsi_reg.h159
-rw-r--r--drivers/net/ipa/ipa_clock.c47
-rw-r--r--drivers/net/ipa/ipa_clock.h5
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_cmd.h21
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c25
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c29
-rw-r--r--drivers/net/ipa/ipa_data.h43
-rw-r--r--drivers/net/ipa/ipa_endpoint.c258
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c6
-rw-r--r--drivers/net/ipa/ipa_interrupt.h16
-rw-r--r--drivers/net/ipa/ipa_main.c333
-rw-r--r--drivers/net/ipa/ipa_mem.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h12
-rw-r--r--drivers/net/ipa/ipa_reg.h486
-rw-r--r--drivers/net/ipa/ipa_table.c4
-rw-r--r--drivers/net/ipa/ipa_uc.c46
-rw-r--r--drivers/net/ipa/ipa_version.h1
22 files changed, 1415 insertions, 655 deletions
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 6bfac1efe037..c4795249719d 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -21,6 +21,7 @@
#include "gsi_trans.h"
#include "ipa_gsi.h"
#include "ipa_data.h"
+#include "ipa_version.h"
/**
* DOC: The IPA Generic Software Interface
@@ -91,6 +92,7 @@
#define GSI_CMD_TIMEOUT 5 /* seconds */
#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
@@ -108,62 +110,6 @@ struct gsi_event {
u8 chid;
};
-/* Hardware values from the error log register error code field */
-enum gsi_err_code {
- GSI_INVALID_TRE_ERR = 0x1,
- GSI_OUT_OF_BUFFERS_ERR = 0x2,
- GSI_OUT_OF_RESOURCES_ERR = 0x3,
- GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
- GSI_EVT_RING_EMPTY_ERR = 0x5,
- GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
- GSI_HWO_1_ERR = 0x8,
-};
-
-/* Hardware values from the error log register error type field */
-enum gsi_err_type {
- GSI_ERR_TYPE_GLOB = 0x1,
- GSI_ERR_TYPE_CHAN = 0x2,
- GSI_ERR_TYPE_EVT = 0x3,
-};
-
-/* Hardware values used when programming an event ring */
-enum gsi_evt_chtype {
- GSI_EVT_CHTYPE_MHI_EV = 0x0,
- GSI_EVT_CHTYPE_XHCI_EV = 0x1,
- GSI_EVT_CHTYPE_GPI_EV = 0x2,
- GSI_EVT_CHTYPE_XDCI_EV = 0x3,
-};
-
-/* Hardware values used when programming a channel */
-enum gsi_channel_protocol {
- GSI_CHANNEL_PROTOCOL_MHI = 0x0,
- GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
- GSI_CHANNEL_PROTOCOL_GPI = 0x2,
- GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
-};
-
-/* Hardware values representing an event ring immediate command opcode */
-enum gsi_evt_cmd_opcode {
- GSI_EVT_ALLOCATE = 0x0,
- GSI_EVT_RESET = 0x9,
- GSI_EVT_DE_ALLOC = 0xa,
-};
-
-/* Hardware values representing a generic immediate command opcode */
-enum gsi_generic_cmd_opcode {
- GSI_GENERIC_HALT_CHANNEL = 0x1,
- GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
-};
-
-/* Hardware values representing a channel immediate command opcode */
-enum gsi_ch_cmd_opcode {
- GSI_CH_ALLOCATE = 0x0,
- GSI_CH_START = 0x1,
- GSI_CH_STOP = 0x2,
- GSI_CH_RESET = 0x9,
- GSI_CH_DE_ALLOC = 0xa,
-};
-
/** gsi_channel_scratch_gpi - GPI protocol scratch register
* @max_outstanding_tre:
* Defines the maximum number of TREs allowed in a single transaction
@@ -229,21 +175,76 @@ static u32 gsi_channel_id(struct gsi_channel *channel)
return channel - &channel->gsi->channel[0];
}
+/* Update the GSI IRQ type register with the cached value */
+static void gsi_irq_type_update(struct gsi *gsi, u32 val)
+{
+ gsi->type_enabled_bitmap = val;
+ iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
+}
+
+static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
+}
+
+/* Turn off all GSI interrupts initially */
+static void gsi_irq_setup(struct gsi *gsi)
+{
+ u32 adjust;
+
+ /* Disable all interrupt types */
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear all type-specific interrupt masks */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ /* Reverse the offset adjustment for inter-EE register offsets */
+ adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
+ iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+}
+
+/* Turn off all GSI interrupts when we're all done */
+static void gsi_irq_teardown(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
{
+ bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
- gsi->event_enable_bitmap |= BIT(evt_ring_id);
- val = gsi->event_enable_bitmap;
+ gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
+ val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ /* Enable the interrupt type if this is the first channel enabled */
+ if (enable_ieob)
+ gsi_irq_type_enable(gsi, GSI_IEOB);
}
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val;
- gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
- val = gsi->event_enable_bitmap;
+ gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+
+ /* Disable the interrupt type if this was the last enabled channel */
+ if (!gsi->ieob_enabled_bitmap)
+ gsi_irq_type_disable(gsi, GSI_IEOB);
+
+ val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
@@ -252,38 +253,32 @@ static void gsi_irq_enable(struct gsi *gsi)
{
u32 val;
- /* We don't use inter-EE channel or event interrupts */
- val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
- val &= ~INTER_EE_CH_CTRL_FMASK;
- val &= ~INTER_EE_EV_CTRL_FMASK;
- iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
-
- val = GENMASK(gsi->channel_count - 1, 0);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
-
- val = GENMASK(gsi->evt_ring_count - 1, 0);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
-
- /* Each IEOB interrupt is enabled (later) as needed by channels */
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
-
- val = GSI_CNTXT_GLOB_IRQ_ALL;
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ /* Global interrupts include hardware error reports. Enable
+ * that so we can at least report the error should it occur.
+ */
+ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
- /* Never enable GSI_BREAK_POINT */
- val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK;
+ /* General GSI interrupts are reported to all EEs; if they occur
+ * they are unrecoverable (without reset). A breakpoint interrupt
+ * also exists, but we don't support that. We want to be notified
+ * of errors so we can report them, even if they can't be handled.
+ */
+ val = BIT(BUS_ERROR);
+ val |= BIT(CMD_FIFO_OVRFLOW);
+ val |= BIT(MCS_STACK_OVRFLOW);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
}
-/* Disable all GSI_interrupt types */
+/* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi)
{
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
}
/* Return the virtual address associated with a ring index */
@@ -337,13 +332,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
+ bool success;
u32 val;
+ /* We only perform one event ring command at a time, and event
+ * control interrupts should only occur when such a command
+ * is issued here. Only permit *this* event ring to trigger
+ * an interrupt, and only enable the event control IRQ type
+ * when we expect it to occur.
+ */
+ val = BIT(evt_ring_id);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+
+ /* Disable the interrupt again */
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ if (success)
+ return 0;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
@@ -360,15 +372,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state %u before alloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
+ evt_ring_id, evt_ring->state);
return -EINVAL;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state %u after alloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
ret = -EIO;
}
@@ -384,15 +396,15 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
- dev_err(gsi->dev, "bad event ring state %u before reset\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
+ evt_ring_id, evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state %u after reset\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
@@ -402,15 +414,15 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
+ evt_ring_id, evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
@@ -433,13 +445,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
+ bool success;
u32 val;
+ /* We only perform one channel command at a time, and channel
+ * control interrupts should only occur when such a command is
+ * issued here. So we only permit *this* channel to trigger
+ * an interrupt and only enable the channel control IRQ type
+ * when we expect it to occur.
+ */
+ val = BIT(channel_id);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
+ success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ /* Disable the interrupt again */
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ if (success)
+ return 0;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
@@ -458,7 +486,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Get initial channel state */
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
- dev_err(dev, "bad channel state %u before alloc\n", state);
+ dev_err(dev, "channel %u bad state %u before alloc\n",
+ channel_id, state);
return -EINVAL;
}
@@ -467,7 +496,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "bad channel state %u after alloc\n", state);
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
ret = -EIO;
}
@@ -484,7 +514,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
state != GSI_CHANNEL_STATE_STOPPED) {
- dev_err(dev, "bad channel state %u before start\n", state);
+ dev_err(dev, "channel %u bad state %u before start\n",
+ gsi_channel_id(channel), state);
return -EINVAL;
}
@@ -493,7 +524,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "bad channel state %u after start\n", state);
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
ret = -EIO;
}
@@ -517,7 +549,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
- dev_err(dev, "bad channel state %u before stop\n", state);
+ dev_err(dev, "channel %u bad state %u before stop\n",
+ gsi_channel_id(channel), state);
return -EINVAL;
}
@@ -532,7 +565,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
- dev_err(dev, "bad channel state %u after stop\n", state);
+ dev_err(dev, "channel %u bad state %u after stop\n",
+ gsi_channel_id(channel), state);
return -EIO;
}
@@ -549,7 +583,10 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
state != GSI_CHANNEL_STATE_ERROR) {
- dev_err(dev, "bad channel state %u before reset\n", state);
+ /* No need to reset a channel already in ALLOCATED state */
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(dev, "channel %u bad state %u before reset\n",
+ gsi_channel_id(channel), state);
return;
}
@@ -558,7 +595,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
- dev_err(dev, "bad channel state %u after reset\n", state);
+ dev_err(dev, "channel %u bad state %u after reset\n",
+ gsi_channel_id(channel), state);
}
/* Deallocate an ALLOCATED GSI channel */
@@ -571,7 +609,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "bad channel state %u before dealloc\n", state);
+ dev_err(dev, "channel %u bad state %u before dealloc\n",
+ channel_id, state);
return;
}
@@ -580,7 +619,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
- dev_err(dev, "bad channel state %u after dealloc\n", state);
+ dev_err(dev, "channel %u bad state %u after dealloc\n",
+ channel_id, state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -607,7 +647,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
u32 val;
- val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
+ /* We program all event rings as GPI type/protocol */
+ val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
val |= EV_INTYPE_FMASK;
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
@@ -714,8 +755,8 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Arbitrarily pick TRE 0 as the first channel element to use */
channel->tre_ring.index = 0;
- /* We program all channels to use GPI protocol */
- val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
+ /* We program all channels as GPI type/protocol */
+ val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
@@ -742,12 +783,21 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
- /* Enable the doorbell engine if requested */
- if (doorbell)
+ /* We enable the doorbell engine for IPA v3.5.1 */
+ if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
val |= USE_DB_ENG_FMASK;
- if (!channel->use_prefetch)
- val |= USE_ESCAPE_BUF_ONLY_FMASK;
+ /* v4.0 introduces an escape buffer for prefetch. We use it
+ * on all but the AP command channel.
+ */
+ if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
+ /* If not otherwise set, prefetch buffers are used */
+ if (gsi->version < IPA_VERSION_4_5)
+ val |= USE_ESCAPE_BUF_ONLY_FMASK;
+ else
+ val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
+ PREFETCH_MODE_FMASK);
+ }
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
@@ -829,8 +879,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
return ret;
}
-/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
+/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
@@ -838,10 +888,10 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
- if (legacy && !channel->toward_ipa)
+ if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
@@ -989,7 +1039,7 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
static void
gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
- if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
complete(&gsi->channel[channel_id].completion);
return;
@@ -1004,7 +1054,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
static void
gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
{
- if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ if (code == GSI_OUT_OF_RESOURCES) {
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
@@ -1034,8 +1084,8 @@ static void gsi_isr_glob_err(struct gsi *gsi)
iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
ee = u32_get_bits(val, ERR_EE_FMASK);
- which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
type = u32_get_bits(val, ERR_TYPE_FMASK);
+ which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
code = u32_get_bits(val, ERR_CODE_FMASK);
if (type == GSI_ERR_TYPE_CHAN)
@@ -1052,10 +1102,38 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
u32 result;
u32 val;
+ /* This interrupt is used to handle completions of the two GENERIC
+ * GSI commands. We use these to allocate and halt channels on
+ * the modem's behalf due to a hardware quirk on IPA v4.2. Once
+ * allocated, the modem "owns" these channels, and as a result we
+ * have no way of knowing the channel's state at any given time.
+ *
+ * It is recommended that we halt the modem channels we allocated
+ * when shutting down, but it's possible the channel isn't running
+ * at the time we issue the HALT command. We'll get an error in
+ * that case, but it's harmless (the channel is already halted).
+ *
+ * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
+ * if we receive it.
+ */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
- if (result != GENERIC_EE_SUCCESS_FVAL)
+
+ switch (result) {
+ case GENERIC_EE_SUCCESS:
+ case GENERIC_EE_CHANNEL_NOT_RUNNING:
+ gsi->result = 0;
+ break;
+
+ case GENERIC_EE_RETRY:
+ gsi->result = -EAGAIN;
+ break;
+
+ default:
dev_err(gsi->dev, "global INT1 generic result %u\n", result);
+ gsi->result = -EIO;
+ break;
+ }
complete(&gsi->completion);
}
@@ -1067,15 +1145,15 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
- if (val & ERROR_INT_FMASK)
+ if (val & BIT(ERROR_INT))
gsi_isr_glob_err(gsi);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
- val &= ~ERROR_INT_FMASK;
+ val &= ~BIT(ERROR_INT);
- if (val & GP_INT1_FMASK) {
- val ^= GP_INT1_FMASK;
+ if (val & BIT(GP_INT1)) {
+ val ^= BIT(GP_INT1);
gsi_isr_gp_int1(gsi);
}
@@ -1110,8 +1188,7 @@ static void gsi_isr_general(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
- if (val)
- dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
}
/**
@@ -1128,6 +1205,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
u32 intr_mask;
u32 cnt = 0;
+ /* enum gsi_irq_type_id defines GSI interrupt types */
while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
/* intr_mask contains bitmask of pending GSI interrupts */
do {
@@ -1136,19 +1214,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
intr_mask ^= gsi_intr;
switch (gsi_intr) {
- case CH_CTRL_FMASK:
+ case BIT(GSI_CH_CTRL):
gsi_isr_chan_ctrl(gsi);
break;
- case EV_CTRL_FMASK:
+ case BIT(GSI_EV_CTRL):
gsi_isr_evt_ctrl(gsi);
break;
- case GLOB_EE_FMASK:
+ case BIT(GSI_GLOB_EE):
gsi_isr_glob_ee(gsi);
break;
- case IEOB_FMASK:
+ case BIT(GSI_IEOB):
gsi_isr_ieob(gsi);
break;
- case GENERAL_FMASK:
+ case BIT(GSI_GENERAL):
gsi_isr_general(gsi);
break;
default:
@@ -1168,6 +1246,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ return ret;
+ }
+ gsi->irq = irq;
+
+ return 0;
+}
+
+static void gsi_irq_exit(struct gsi *gsi)
+{
+ free_irq(gsi->irq, gsi);
+}
+
/* Return the transaction associated with a transfer completion event */
static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
struct gsi_event *event)
@@ -1452,8 +1558,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi)
}
/* Setup function for a single channel */
-static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
- bool legacy)
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
@@ -1472,7 +1577,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
if (ret)
goto err_evt_ring_de_alloc;
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, true);
if (channel->toward_ipa)
netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
@@ -1511,8 +1616,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
+ bool success;
u32 val;
+ /* The error global interrupt type is always enabled (until we
+ * teardown), so we won't change that. A generic EE command
+ * completes with a GSI global interrupt of type GP_INT1. We
+ * only perform one generic command at a time (to allocate or
+ * halt a modem channel) and only from this function. So we
+ * enable the GP_INT1 IRQ type here while we're expecting it.
+ */
+ val = BIT(ERROR_INT) | BIT(GP_INT1);
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
/* First zero the result code field */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
val &= ~GENERIC_EE_RESULT_FMASK;
@@ -1523,8 +1639,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+
+ /* Disable the GP_INT1 IRQ type again */
+ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
+ if (success)
+ return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
opcode, channel_id);
@@ -1540,16 +1661,21 @@ static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
{
+ u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
int ret;
- ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
+ do
+ ret = gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_HALT_CHANNEL);
+ while (ret == -EAGAIN && retries--);
+
if (ret)
dev_err(gsi->dev, "error %d halting modem channel %u\n",
ret, channel_id);
}
/* Setup function for channels */
-static int gsi_channel_setup(struct gsi *gsi, bool legacy)
+static int gsi_channel_setup(struct gsi *gsi)
{
u32 channel_id = 0;
u32 mask;
@@ -1561,7 +1687,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
mutex_lock(&gsi->mutex);
do {
- ret = gsi_channel_setup_one(gsi, channel_id, legacy);
+ ret = gsi_channel_setup_one(gsi, channel_id);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
@@ -1647,10 +1773,11 @@ static void gsi_channel_teardown(struct gsi *gsi)
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
-int gsi_setup(struct gsi *gsi, bool legacy)
+int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
+ int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
@@ -1659,6 +1786,8 @@ int gsi_setup(struct gsi *gsi, bool legacy)
return -EIO;
}
+ gsi_irq_setup(gsi);
+
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
@@ -1691,13 +1820,18 @@ int gsi_setup(struct gsi *gsi, bool legacy)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- return gsi_channel_setup(gsi, legacy);
+ ret = gsi_channel_setup(gsi);
+ if (ret)
+ gsi_irq_teardown(gsi);
+
+ return ret;
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
+ gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
@@ -1745,7 +1879,7 @@ static void gsi_evt_ring_init(struct gsi *gsi)
u32 evt_ring_id = 0;
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->event_enable_bitmap = 0;
+ gsi->ieob_enabled_bitmap = 0;
do
init_completion(&gsi->evt_ring[evt_ring_id].completion);
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
@@ -1814,7 +1948,7 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Init function for a single channel */
static int gsi_channel_init_one(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data,
- bool command, bool prefetch)
+ bool command)
{
struct gsi_channel *channel;
u32 tre_count;
@@ -1838,7 +1972,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->use_prefetch = command && prefetch;
channel->tlv_count = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -1892,13 +2025,16 @@ static void gsi_channel_exit_one(struct gsi_channel *channel)
}
/* Init function for channels */
-static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
- const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+static int gsi_channel_init(struct gsi *gsi, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
+ bool modem_alloc;
int ret = 0;
u32 i;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = gsi->version == IPA_VERSION_4_2;
+
gsi_evt_ring_init(gsi);
/* The endpoint data array is indexed by endpoint name */
@@ -1916,7 +2052,7 @@ static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
continue;
}
- ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ ret = gsi_channel_init_one(gsi, &data[i], command);
if (ret)
goto err_unwind;
}
@@ -1952,19 +2088,20 @@ static void gsi_channel_exit(struct gsi *gsi)
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
- unsigned int irq;
+ u32 adjust;
int ret;
gsi_validate_build();
gsi->dev = dev;
+ gsi->version = version;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
@@ -1972,55 +2109,53 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
*/
init_dummy_netdev(&gsi->dummy_dev);
- ret = platform_get_irq_byname(pdev, "gsi");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
- return ret ? : -EINVAL;
- }
- irq = ret;
-
- ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
- if (ret) {
- dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
- return ret;
- }
- gsi->irq = irq;
-
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
- ret = -ENODEV;
- goto err_free_irq;
+ return -ENODEV;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
- ret = -EINVAL;
- goto err_free_irq;
+ return -EINVAL;
+ }
+
+ /* Make sure we can make our pointer adjustment if necessary */
+ adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
+ if (res->start < adjust) {
+ dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
+ adjust);
+ return -EINVAL;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
- ret = -ENOMEM;
- goto err_free_irq;
+ return -ENOMEM;
}
+ /* Adjust register range pointer downward for newer IPA versions */
+ gsi->virt -= adjust;
- ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ init_completion(&gsi->completion);
+
+ ret = gsi_irq_init(gsi, pdev);
if (ret)
goto err_iounmap;
+ ret = gsi_channel_init(gsi, count, data);
+ if (ret)
+ goto err_irq_exit;
+
mutex_init(&gsi->mutex);
- init_completion(&gsi->completion);
return 0;
+err_irq_exit:
+ gsi_irq_exit(gsi);
err_iounmap:
iounmap(gsi->virt);
-err_free_irq:
- free_irq(gsi->irq, gsi);
return ret;
}
@@ -2030,7 +2165,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- free_irq(gsi->irq, gsi);
+ gsi_irq_exit(gsi);
iounmap(gsi->virt);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 3f9f29d531c4..96c9aed397aa 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -13,6 +13,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include "ipa_version.h"
+
/* Maximum number of channels and event rings supported by the driver */
#define GSI_CHANNEL_COUNT_MAX 17
#define GSI_EVT_RING_COUNT_MAX 13
@@ -31,10 +33,10 @@ struct ipa_gsi_endpoint_data;
/* Execution environment IDs */
enum gsi_ee_id {
- GSI_EE_AP = 0,
- GSI_EE_MODEM = 1,
- GSI_EE_UC = 2,
- GSI_EE_TZ = 3,
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
struct gsi_ring {
@@ -94,12 +96,12 @@ struct gsi_trans_info {
/* Hardware values signifying the state of a channel */
enum gsi_channel_state {
- GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
- GSI_CHANNEL_STATE_ALLOCATED = 0x1,
- GSI_CHANNEL_STATE_STARTED = 0x2,
- GSI_CHANNEL_STATE_STOPPED = 0x3,
- GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
- GSI_CHANNEL_STATE_ERROR = 0xf,
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_ERROR = 0xf,
};
/* We only care about channels between IPA and AP */
@@ -107,7 +109,6 @@ struct gsi_channel {
struct gsi *gsi;
bool toward_ipa;
bool command; /* AP command TX channel or not */
- bool use_prefetch; /* use prefetch (else escape buf) */
u8 tlv_count; /* # entries in TLV FIFO */
u16 tre_count;
@@ -147,6 +148,7 @@ struct gsi_evt_ring {
struct gsi {
struct device *dev; /* Same as IPA device */
+ enum ipa_version version;
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
@@ -154,24 +156,25 @@ struct gsi {
u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
- u32 event_bitmap;
- u32 event_enable_bitmap;
- u32 modem_channel_bitmap;
+ u32 event_bitmap; /* allocated event rings */
+ u32 modem_channel_bitmap; /* modem channels to allocate */
+ u32 type_enabled_bitmap; /* GSI IRQ types enabled */
+ u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
struct completion completion; /* for global EE commands */
+ int result; /* Negative errno (generic commands) */
struct mutex mutex; /* protects commands, programming */
};
/**
* gsi_setup() - Set up the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
- * @legacy: Set up for legacy hardware
*
* Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
*/
-int gsi_setup(struct gsi *gsi, bool legacy);
+int gsi_setup(struct gsi *gsi);
/**
* gsi_teardown() - Tear down GSI subsystem
@@ -219,15 +222,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
- * @legacy: Legacy behavior
+ * @doorbell: Whether to (possibly) enable the doorbell engine
*
- * Reset a channel and reconfigure it. The @legacy flag indicates
- * that some steps should be done differently for legacy hardware.
+ * Reset a channel and reconfigure it. The @doorbell flag indicates
+ * that the doorbell engine should be enabled if needed.
*
* GSI hardware relinquishes ownership of all pending receive buffer
* transactions and they will complete with their cancelled flag set.
*/
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
@@ -236,15 +239,18 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
* gsi_init() - Initialize the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
* @pdev: IPA platform device
+ * @version: IPA hardware version (implies GSI version)
+ * @count: Number of entries in the configuration data array
+ * @data: Endpoint and channel configuration data
*
* Return: 0 if successful, or a negative error code
*
* Early stage initialization of the GSI subsystem, performing tasks
* that can be done before the GSI hardware is ready to use.
*/
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc);
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
/**
* gsi_exit() - Exit the GSI subsystem
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 8e0e9350c383..0e138bbd8205 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -38,6 +38,17 @@
* (though the actual limit is hardware-dependent).
*/
+/* GSI EE registers as a group are shifted downward by a fixed
+ * constant amount for IPA versions 4.5 and beyond. This applies
+ * to all GSI registers we use *except* the ones that disable
+ * inter-EE interrupts for channels and event channels.
+ *
+ * We handle this by adjusting the pointer to the mapped GSI memory
+ * region downward. Then in the one place we use them (gsi_irq_setup())
+ * we undo that adjustment for the inter-EE interrupt registers.
+ */
+#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
+
#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
@@ -66,12 +77,20 @@
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
#define CHID_FMASK GENMASK(12, 8)
-/* The next field is present for GSI v2.0 and above */
+/* The next field is present for IPA v4.5 and above */
#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
#define ERINDEX_FMASK GENMASK(18, 14)
#define CHSTATE_FMASK GENMASK(23, 20)
#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
+enum gsi_channel_type {
+ GSI_CHANNEL_TYPE_MHI = 0x0,
+ GSI_CHANNEL_TYPE_XHCI = 0x1,
+ GSI_CHANNEL_TYPE_GPI = 0x2,
+ GSI_CHANNEL_TYPE_XDCI = 0x3,
+};
+
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
@@ -95,8 +114,18 @@
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
-/* The next field is present for GSI v2.0 and above */
+/* The next field is only present for IPA v4.0, v4.1, and v4.2 */
#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
+/* The next two fields are present for IPA v4.5 and above */
+#define PREFETCH_MODE_FMASK GENMASK(13, 10)
+#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16)
+/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
+enum gsi_prefetch_mode {
+ GSI_USE_PREFETCH_BUFS = 0x0,
+ GSI_ESCAPE_BUF_ONLY = 0x1,
+ GSI_SMART_PREFETCH = 0x2,
+ GSI_FREE_PREFETCH = 0x3,
+};
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
@@ -128,6 +157,7 @@
#define EV_INTYPE_FMASK GENMASK(16, 16)
#define EV_CHSTATE_FMASK GENMASK(23, 20)
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
+/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
@@ -216,6 +246,15 @@
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
+/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+};
+
#define GSI_EV_CH_CMD_OFFSET \
GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
@@ -223,6 +262,13 @@
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
+/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
#define GSI_GENERIC_CMD_OFFSET \
GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
@@ -231,29 +277,43 @@
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
+/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+};
+
#define GSI_GSI_HW_PARAM_2_OFFSET \
GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
(0x0001f040 + 0x4000 * (ee))
#define IRAM_SIZE_FMASK GENMASK(2, 0)
-#define IRAM_SIZE_ONE_KB_FVAL 0
-#define IRAM_SIZE_TWO_KB_FVAL 1
-/* The next two values are available for GSI v2.0 and above */
-#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
-#define IRAM_SIZE_THREE_KB_FVAL 3
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
-/* Fields below are present for GSI v2.0 and above */
+/* Fields below are present for IPA v4.0 and above */
#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
-/* Fields below are present for GSI v2.2 and above */
+/* Fields below are present for IPA v4.2 and above */
#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */
+enum gsi_iram_size {
+ IRAM_SIZE_ONE_KB = 0x0,
+ IRAM_SIZE_TWO_KB = 0x1,
+/* The next two values are available for IPA v4.0 and above */
+ IRAM_SIZE_TWO_N_HALF_KB = 0x2,
+ IRAM_SIZE_THREE_KB = 0x3,
+ /* The next two values are available for IPA v4.5 and above */
+ IRAM_SIZE_THREE_N_HALF_KB = 0x4,
+ IRAM_SIZE_FOUR_KB = 0x5,
+};
+
+/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
@@ -262,15 +322,17 @@
GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
(0x0001f088 + 0x4000 * (ee))
-/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */
-#define CH_CTRL_FMASK GENMASK(0, 0)
-#define EV_CTRL_FMASK GENMASK(1, 1)
-#define GLOB_EE_FMASK GENMASK(2, 2)
-#define IEOB_FMASK GENMASK(3, 3)
-#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
-#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
-#define GENERAL_FMASK GENMASK(6, 6)
-#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
+
+/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
+enum gsi_irq_type_id {
+ GSI_CH_CTRL = 0x0, /* channel allocation, etc. */
+ GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */
+ GSI_GLOB_EE = 0x2, /* global/general event */
+ GSI_IEOB = 0x3, /* TRE completion */
+ GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */
+ GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */
+ GSI_GENERAL = 0x6, /* general-purpose event */
+};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
@@ -329,12 +391,13 @@
GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
(0x0001f110 + 0x4000 * (ee))
-/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
-#define ERROR_INT_FMASK GENMASK(0, 0)
-#define GP_INT1_FMASK GENMASK(1, 1)
-#define GP_INT2_FMASK GENMASK(2, 2)
-#define GP_INT3_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
+/* Values here are bit positions in the GLOB_IRQ_* registers */
+enum gsi_global_irq_id {
+ ERROR_INT = 0x0,
+ GP_INT1 = 0x1,
+ GP_INT2 = 0x2,
+ GP_INT3 = 0x3,
+};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
@@ -348,12 +411,13 @@
GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
(0x0001f128 + 0x4000 * (ee))
-/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
-#define BREAK_POINT_FMASK GENMASK(0, 0)
-#define BUS_ERROR_FMASK GENMASK(1, 1)
-#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
+/* Values here are bit positions in the (general) GSI_IRQ_* registers */
+enum gsi_general_id {
+ BREAK_POINT = 0x0,
+ BUS_ERROR = 0x1,
+ CMD_FIFO_OVRFLOW = 0x2,
+ MCS_STACK_OVRFLOW = 0x3,
+};
#define GSI_CNTXT_INTSET_OFFSET \
GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
@@ -373,6 +437,25 @@
#define ERR_TYPE_FMASK GENMASK(27, 24)
#define ERR_EE_FMASK GENMASK(31, 28)
+/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */
+enum gsi_err_code {
+ GSI_INVALID_TRE = 0x1,
+ GSI_OUT_OF_BUFFERS = 0x2,
+ GSI_OUT_OF_RESOURCES = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP = 0x4,
+ GSI_EVT_RING_EMPTY = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS = 0x6,
+ /* 7 is not assigned */
+ GSI_HWO_1 = 0x8,
+};
+
+/** enum gsi_err_type - ERR_TYPE field values in EE_ERR_LOG */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
#define GSI_ERROR_LOG_CLR_OFFSET \
GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
@@ -384,10 +467,18 @@
(0x0001f400 + 0x4000 * (ee))
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
-#define GENERIC_EE_SUCCESS_FVAL 1
-#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3
-#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5
-#define GENERIC_EE_NO_RESOURCES_FVAL 7
+
+/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
+enum gsi_generic_ee_result {
+ GENERIC_EE_SUCCESS = 0x1,
+ GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
+ GENERIC_EE_INCORRECT_DIRECTION = 0x3,
+ GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GENERIC_EE_INCORRECT_CHANNEL = 0x5,
+ GENERIC_EE_RETRY = 0x6,
+ GENERIC_EE_NO_RESOURCES = 0x7,
+};
+
#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index a2c0fde05819..9dcf16f399b7 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -13,6 +13,7 @@
#include "ipa.h"
#include "ipa_clock.h"
#include "ipa_modem.h"
+#include "ipa_data.h"
/**
* DOC: IPA Clocking
@@ -29,18 +30,6 @@
* An IPA clock reference must be held for any access to IPA hardware.
*/
-#define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */
-
-/* Interconnect path bandwidths (each times 1000 bytes per second) */
-#define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */
-#define IPA_MEMORY_PEAK (600 * 1000)
-
-#define IPA_IMEM_AVG (80 * 1000)
-#define IPA_IMEM_PEAK (350 * 1000)
-
-#define IPA_CONFIG_AVG (40 * 1000)
-#define IPA_CONFIG_PEAK (40 * 1000)
-
/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
@@ -49,6 +38,7 @@
* @memory_path: Memory interconnect
* @imem_path: Internal memory interconnect
* @config_path: Configuration space interconnect
+ * @interconnect_data: Interconnect configuration data
*/
struct ipa_clock {
refcount_t count;
@@ -57,6 +47,7 @@ struct ipa_clock {
struct icc_path *memory_path;
struct icc_path *imem_path;
struct icc_path *config_path;
+ const struct ipa_interconnect_data *interconnect_data;
};
static struct icc_path *
@@ -113,18 +104,25 @@ static void ipa_interconnect_exit(struct ipa_clock *clock)
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
+ const struct ipa_interconnect_data *data;
struct ipa_clock *clock = ipa->clock;
int ret;
- ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
+ ret = icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
if (ret)
return ret;
- ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
+ ret = icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
if (ret)
goto err_memory_path_disable;
- ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
+ ret = icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
if (ret)
goto err_imem_path_disable;
@@ -141,6 +139,7 @@ err_memory_path_disable:
/* To disable an interconnect, we just its bandwidth to 0 */
static int ipa_interconnect_disable(struct ipa *ipa)
{
+ const struct ipa_interconnect_data *data;
struct ipa_clock *clock = ipa->clock;
int ret;
@@ -159,9 +158,13 @@ static int ipa_interconnect_disable(struct ipa *ipa)
return 0;
err_imem_path_reenable:
- (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
+ (void)icc_set_bw(clock->imem_path, data->average_rate,
+ data->peak_rate);
err_memory_path_reenable:
- (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
+ (void)icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
return ret;
}
@@ -257,7 +260,8 @@ u32 ipa_clock_rate(struct ipa *ipa)
}
/* Initialize IPA clocking */
-struct ipa_clock *ipa_clock_init(struct device *dev)
+struct ipa_clock *
+ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
{
struct ipa_clock *clock;
struct clk *clk;
@@ -269,10 +273,10 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
return ERR_CAST(clk);
}
- ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
+ ret = clk_set_rate(clk, data->core_clock_rate);
if (ret) {
- dev_err(dev, "error %d setting core clock rate to %lu\n",
- ret, IPA_CORE_CLOCK_RATE);
+ dev_err(dev, "error %d setting core clock rate to %u\n",
+ ret, data->core_clock_rate);
goto err_clk_put;
}
@@ -282,6 +286,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
goto err_clk_put;
}
clock->core = clk;
+ clock->interconnect_data = data->interconnect;
ret = ipa_interconnect_init(clock, dev);
if (ret)
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
index 1d70f1de3875..1fe634760e59 100644
--- a/drivers/net/ipa/ipa_clock.h
+++ b/drivers/net/ipa/ipa_clock.h
@@ -9,6 +9,7 @@
struct device;
struct ipa;
+struct ipa_clock_data;
/**
* ipa_clock_rate() - Return the current IPA core clock rate
@@ -21,10 +22,12 @@ u32 ipa_clock_rate(struct ipa *ipa);
/**
* ipa_clock_init() - Initialize IPA clocking
* @dev: IPA device
+ * @data: Clock configuration data
*
* Return: A pointer to an ipa_clock structure, or a pointer-coded error
*/
-struct ipa_clock *ipa_clock_init(struct device *dev);
+struct ipa_clock *ipa_clock_init(struct device *dev,
+ const struct ipa_clock_data *data);
/**
* ipa_clock_exit() - Inverse of ipa_clock_init()
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d92dd3f09b73..002e51448510 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -38,9 +38,9 @@
/* Some commands can wait until indicated pipeline stages are clear */
enum pipeline_clear_options {
- pipeline_clear_hps = 0,
- pipeline_clear_src_grp = 1,
- pipeline_clear_full = 2,
+ pipeline_clear_hps = 0x0,
+ pipeline_clear_src_grp = 0x1,
+ pipeline_clear_full = 0x2,
};
/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index f7e6f87facf7..4ed09c486abc 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -27,16 +27,16 @@ struct gsi_channel;
* a request is *not* an immediate command.
*/
enum ipa_cmd_opcode {
- IPA_CMD_NONE = 0,
- IPA_CMD_IP_V4_FILTER_INIT = 3,
- IPA_CMD_IP_V6_FILTER_INIT = 4,
- IPA_CMD_IP_V4_ROUTING_INIT = 7,
- IPA_CMD_IP_V6_ROUTING_INIT = 8,
- IPA_CMD_HDR_INIT_LOCAL = 9,
- IPA_CMD_REGISTER_WRITE = 12,
- IPA_CMD_IP_PACKET_INIT = 16,
- IPA_CMD_DMA_SHARED_MEM = 19,
- IPA_CMD_IP_PACKET_TAG_STATUS = 20,
+ IPA_CMD_NONE = 0x0,
+ IPA_CMD_IP_V4_FILTER_INIT = 0x3,
+ IPA_CMD_IP_V6_FILTER_INIT = 0x4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 0x7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 0x8,
+ IPA_CMD_HDR_INIT_LOCAL = 0x9,
+ IPA_CMD_REGISTER_WRITE = 0xc,
+ IPA_CMD_IP_PACKET_INIT = 0x10,
+ IPA_CMD_DMA_SHARED_MEM = 0x13,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 0x14,
};
/**
@@ -50,7 +50,6 @@ struct ipa_cmd_info {
enum dma_data_direction direction;
};
-
#ifdef IPA_VALIDATE
/**
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index d4c2bc7ad24b..5cc0ed77edb9 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -24,6 +24,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_DMA_ONLY,
.config = {
+ .resource_group = 0,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
},
@@ -42,6 +43,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 0,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -65,6 +67,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.seq_type =
IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
.config = {
+ .resource_group = 0,
.checksum = true,
.qmap = true,
.status_enable = true,
@@ -88,6 +91,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 0,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -305,6 +309,26 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+static struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 100 * 1000 * 1000, /* Hz */
+ /* Interconnect rates are in 1000 byte/second units */
+ .interconnect = {
+ [IPA_INTERCONNECT_MEMORY] = {
+ .peak_rate = 465000, /* 465 MBps */
+ .average_rate = 80000, /* 80 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ [IPA_INTERCONNECT_IMEM] = {
+ .peak_rate = 68570, /* 68.570 MBps */
+ .average_rate = 0, /* unused */
+ },
+ [IPA_INTERCONNECT_CONFIG] = {
+ .peak_rate = 30000, /* 30 MBps */
+ .average_rate = 0, /* unused */
+ },
+ },
+};
+
/* Configuration data for the SC7180 SoC. */
const struct ipa_data ipa_data_sc7180 = {
.version = IPA_VERSION_4_2,
@@ -312,4 +336,5 @@ const struct ipa_data ipa_data_sc7180 = {
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index de2768d71ab5..f8fee8d3ca42 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -26,6 +26,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_DMA_ONLY,
.config = {
+ .resource_group = 1,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
},
@@ -44,6 +45,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 1,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -67,6 +69,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.seq_type =
IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
.config = {
+ .resource_group = 1,
.checksum = true,
.qmap = true,
.status_enable = true,
@@ -90,6 +93,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 1,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -146,11 +150,11 @@ static const struct ipa_resource_src ipa_resource_src[] = {
.type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
.limits[0] = {
.min = 1,
- .max = 63,
+ .max = 255,
},
.limits[1] = {
.min = 1,
- .max = 63,
+ .max = 255,
},
},
{
@@ -325,6 +329,26 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+static struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 75 * 1000 * 1000, /* Hz */
+ /* Interconnect rates are in 1000 byte/second units */
+ .interconnect = {
+ [IPA_INTERCONNECT_MEMORY] = {
+ .peak_rate = 600000, /* 600 MBps */
+ .average_rate = 80000, /* 80 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ [IPA_INTERCONNECT_IMEM] = {
+ .peak_rate = 350000, /* 350 MBps */
+ .average_rate = 0, /* unused */
+ },
+ [IPA_INTERCONNECT_CONFIG] = {
+ .peak_rate = 40000, /* 40 MBps */
+ .average_rate = 0, /* unused */
+ },
+ },
+};
+
/* Configuration data for the SDM845 SoC. */
const struct ipa_data ipa_data_sdm845 = {
.version = IPA_VERSION_3_5_1,
@@ -332,4 +356,5 @@ const struct ipa_data ipa_data_sdm845 = {
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 7fc1058a5ca9..0ed5ffe2b8da 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -45,10 +45,10 @@
* the IPA endpoint.
*/
-/* The maximum value returned by ipa_resource_group_count() */
-#define IPA_RESOURCE_GROUP_COUNT 4
+/* The maximum value returned by ipa_resource_group_{src,dst}_count() */
+#define IPA_RESOURCE_GROUP_SRC_MAX 5
+#define IPA_RESOURCE_GROUP_DST_MAX 5
-/** enum ipa_resource_type_src - source resource types */
/**
* struct gsi_channel_data - GSI channel configuration data
* @tre_count: number of TREs in the channel ring
@@ -109,6 +109,7 @@ struct ipa_endpoint_rx_data {
/**
* struct ipa_endpoint_config_data - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
* @checksum: whether checksum offload is enabled
* @qmap: whether endpoint uses QMAP protocol
* @aggregation: whether endpoint supports aggregation
@@ -119,6 +120,7 @@ struct ipa_endpoint_rx_data {
* @rx: RX-specific endpoint information (see above)
*/
struct ipa_endpoint_config_data {
+ u32 resource_group;
bool checksum;
bool qmap;
bool aggregation;
@@ -206,7 +208,7 @@ struct ipa_resource_limits {
*/
struct ipa_resource_src {
enum ipa_resource_type_src type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_SRC_MAX];
};
/**
@@ -216,7 +218,7 @@ struct ipa_resource_src {
*/
struct ipa_resource_dst {
enum ipa_resource_type_dst type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_DST_MAX];
};
/**
@@ -239,7 +241,7 @@ struct ipa_resource_data {
};
/**
- * struct ipa_mem - description of IPA memory regions
+ * struct ipa_mem_data - description of IPA memory regions
* @local_count: number of regions defined in the local[] array
* @local: array of IPA-local memory region descriptors
* @imem_addr: physical address of IPA region within IMEM
@@ -256,6 +258,34 @@ struct ipa_mem_data {
u32 smem_size;
};
+/** enum ipa_interconnect_id - IPA interconnect identifier */
+enum ipa_interconnect_id {
+ IPA_INTERCONNECT_MEMORY,
+ IPA_INTERCONNECT_IMEM,
+ IPA_INTERCONNECT_CONFIG,
+ IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
+};
+
+/**
+ * struct ipa_interconnect_data - description of IPA interconnect rates
+ * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ */
+struct ipa_interconnect_data {
+ u32 peak_rate;
+ u32 average_rate;
+};
+
+/**
+ * struct ipa_clock_data - description of IPA clock and interconnect rates
+ * @core_clock_rate: Core clock rate (Hz)
+ * @interconnect: Array of interconnect bandwidth parameters
+ */
+struct ipa_clock_data {
+ u32 core_clock_rate;
+ struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+};
+
/**
* struct ipa_data - combined IPA/GSI configuration data
* @version: IPA hardware version
@@ -271,6 +301,7 @@ struct ipa_data {
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
const struct ipa_mem_data *mem_data;
+ const struct ipa_clock_data *clock_data;
};
extern const struct ipa_data ipa_data_sdm845;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index b40b711cf4bd..9f4be9812a1f 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -37,7 +37,7 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */
+#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -74,31 +74,6 @@ struct ipa_status {
#ifdef IPA_VALIDATE
-static void ipa_endpoint_validate_build(void)
-{
- /* The aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, which means that we need to supply
- * enough space in a receive buffer to hold a complete MTU
- * plus normal skb overhead *after* that aggregation byte
- * limit has been crossed.
- *
- * This check just ensures we don't define a receive buffer
- * size that would exceed what we can represent in the field
- * that is used to program its size.
- */
- BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
- field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
- IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
-
- /* I honestly don't know where this requirement comes from. But
- * it holds, and if we someday need to loosen the constraint we
- * can try to track it down.
- */
- BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
-}
-
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -180,14 +155,24 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
+static u32 aggr_byte_limit_max(enum ipa_version version)
+{
+ if (version < IPA_VERSION_4_5)
+ return field_max(aggr_byte_limit_fmask(true));
+
+ return field_max(aggr_byte_limit_fmask(false));
+}
+
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
+ u32 limit;
- ipa_endpoint_validate_build();
+ /* Not sure where this constraint come from... */
+ BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
@@ -195,6 +180,26 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false;
}
+ /* The aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, which means that we need to supply
+ * enough space in a receive buffer to hold a complete MTU
+ * plus normal skb overhead *after* that aggregation byte
+ * limit has been crossed.
+ *
+ * This check ensures we don't define a receive buffer size
+ * that would exceed what we can represent in the field that
+ * is used to program its size.
+ */
+ limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
+ limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (limit < IPA_RX_BUFFER_SIZE) {
+ dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
+ IPA_RX_BUFFER_SIZE, limit);
+ return false;
+ }
+
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
@@ -485,28 +490,34 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
if (endpoint->data->qmap) {
size_t header_size = sizeof(struct rmnet_map_header);
+ enum ipa_version version = ipa->version;
/* We might supply a checksum header after the QMAP header */
if (endpoint->toward_ipa && endpoint->data->checksum)
header_size += sizeof(struct rmnet_map_ul_csum_header);
- val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
+ val |= ipa_header_size_encoded(version, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 off; /* Field offset within header */
+ u32 offset; /* Field offset within header */
/* Where IPA will write the metadata value */
- off = offsetof(struct rmnet_map_header, mux_id);
- val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
+ offset = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encoded(version, offset);
/* Where IPA will write the length */
- off = offsetof(struct rmnet_map_header, pkt_len);
+ offset = offsetof(struct rmnet_map_header, pkt_len);
+ /* Upper bits are stored in HDR_EXT with IPA v4.5 */
+ if (version == IPA_VERSION_4_5)
+ offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
+
val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
- val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
+ val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
val |= HDR_OFST_METADATA_VALID_FMASK;
@@ -514,16 +525,17 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
/* HDR_LEN_INC_DEAGG_HDR is 0 */
- /* HDR_METADATA_REG_VALID is 0 (TX only) */
+ /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->data->rx.pad_align;
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
val |= HDR_ENDIANNESS_FMASK; /* big endian */
@@ -545,10 +557,24 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ /* IPA v4.5 adds some most-significant bits to a few fields,
+ * two of which are defined in the HDR (not HDR_EXT) register.
+ */
+ if (ipa->version == IPA_VERSION_4_5) {
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
+ if (endpoint->data->qmap && !endpoint->toward_ipa) {
+ u32 offset;
+
+ offset = offsetof(struct rmnet_map_header, pkt_len);
+ offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
+ val |= u32_encode_bits(offset,
+ HDR_OFST_PKT_SIZE_MSB_FMASK);
+ /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
+ }
+ }
+ iowrite32(val, ipa->reg_virt + offset);
}
-
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
@@ -603,29 +629,84 @@ static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
return rx_buffer_size / SZ_1K;
}
+/* Encoded values for AGGR endpoint register fields */
+static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
+{
+ if (version < IPA_VERSION_4_5)
+ return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
+
+ return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
+}
+
+/* Encode the aggregation timer limit (microseconds) based on IPA version */
+static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
+{
+ u32 gran_sel;
+ u32 fmask;
+ u32 val;
+
+ if (version < IPA_VERSION_4_5) {
+ /* We set aggregation granularity in ipa_hardware_config() */
+ limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+
+ return u32_encode_bits(limit, aggr_time_limit_fmask(true));
+ }
+
+ /* IPA v4.5 expresses the time limit using Qtime. The AP has
+ * pulse generators 0 and 1 available, which were configured
+ * in ipa_qtime_config() to have granularity 100 usec and
+ * 1 msec, respectively. Use pulse generator 0 if possible,
+ * otherwise fall back to pulse generator 1.
+ */
+ fmask = aggr_time_limit_fmask(false);
+ val = DIV_ROUND_CLOSEST(limit, 100);
+ if (val > field_max(fmask)) {
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ gran_sel = AGGR_GRAN_SEL_FMASK;
+ val = DIV_ROUND_CLOSEST(limit, 1000);
+ } else {
+ /* We can use pulse generator 0 (100 usec granularity) */
+ gran_sel = 0;
+ }
+
+ return gran_sel | u32_encode_bits(val, fmask);
+}
+
+static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
+{
+ u32 val = enabled ? 1 : 0;
+
+ if (version < IPA_VERSION_4_5)
+ return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
+
+ return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
+}
+
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
+ enum ipa_version version = endpoint->ipa->version;
u32 val = 0;
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
+ bool close_eof;
u32 limit;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
- val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
+ val |= aggr_byte_limit_encoded(version, limit);
- limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
- limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
- val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
+ limit = IPA_AGGR_TIME_LIMIT;
+ val |= aggr_time_limit_encoded(version, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- if (endpoint->data->rx.aggr_close_eof)
- val |= AGGR_SW_EOF_ACTIVE_FMASK;
+ close_eof = endpoint->data->rx.aggr_close_eof;
+ val |= aggr_sw_eof_active_encoded(version, close_eof);
+
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
} else {
val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
@@ -634,6 +715,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
+ /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
/* other fields ignored */
@@ -642,12 +724,45 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
-/* The head-of-line blocking timer is defined as a tick count, where each
- * tick represents 128 cycles of the IPA core clock. Return the value
- * that should be written to that register that represents the timeout
- * period provided.
+/* Return the Qtime-based head-of-line blocking timer value that
+ * represents the given number of microseconds. The result
+ * includes both the timer value and the selected timer granularity.
*/
-static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
+{
+ u32 gran_sel;
+ u32 val;
+
+ /* IPA v4.5 expresses time limits using Qtime. The AP has
+ * pulse generators 0 and 1 available, which were configured
+ * in ipa_qtime_config() to have granularity 100 usec and
+ * 1 msec, respectively. Use pulse generator 0 if possible,
+ * otherwise fall back to pulse generator 1.
+ */
+ val = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (val > field_max(TIME_LIMIT_FMASK)) {
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ gran_sel = GRAN_SEL_FMASK;
+ val = DIV_ROUND_CLOSEST(microseconds, 1000);
+ } else {
+ /* We can use pulse generator 0 (100 usec granularity) */
+ gran_sel = 0;
+ }
+
+ return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
+}
+
+/* The head-of-line blocking timer is defined as a tick count. For
+ * IPA version 4.5 the tick count is based on the Qtimer, which is
+ * derived from the 19.2 MHz SoC XO clock. For older IPA versions
+ * each tick represents 128 cycles of the IPA core clock.
+ *
+ * Return the encoded value that should be written to that register
+ * that represents the timeout period provided. For IPA v4.2 this
+ * encodes a base and scale value, while for earlier versions the
+ * value is a simple tick count.
+ */
+static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
{
u32 width;
u32 scale;
@@ -659,14 +774,17 @@ static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
+ if (ipa->version == IPA_VERSION_4_5)
+ return hol_block_timer_qtime_val(ipa, microseconds);
+
/* Use 64 bit arithmetic to avoid overflow... */
rate = ipa_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* ...but we still need to fit into a 32-bit register */
WARN_ON(ticks > U32_MAX);
- /* IPA v3.5.1 just records the tick count */
- if (ipa->version == IPA_VERSION_3_5_1)
+ /* IPA v3.5.1 through v4.1 just record the tick count */
+ if (ipa->version < IPA_VERSION_4_2)
return (u32)ticks;
/* For IPA v4.2, the tick count is represented by base and
@@ -704,7 +822,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
u32 val;
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
- val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
+ val = hol_block_timer_val(ipa, microseconds);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -751,6 +869,16 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 val;
+
+ val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
@@ -834,9 +962,10 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(status_endpoint_id,
STATUS_ENDP_FMASK);
}
- /* STATUS_LOCATION is 0 (status element precedes packet) */
- /* The next field is present for IPA v4.0 and above */
- /* STATUS_PKT_SUPPRESS_FMASK is 0 */
+ /* STATUS_LOCATION is 0, meaning status element precedes
+ * packet (not present for IPA v4.5)
+ */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
}
iowrite32(val, ipa->reg_virt + offset);
@@ -1207,7 +1336,6 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
- bool legacy;
u32 retries;
u32 len = 1;
void *virt;
@@ -1269,8 +1397,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- gsi_channel_reset(gsi, endpoint->channel_id, legacy);
+ gsi_channel_reset(gsi, endpoint->channel_id, true);
msleep(1);
@@ -1293,21 +1420,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
bool special;
- bool legacy;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
* is active, we need to handle things specially to recover.
* All other cases just need to reset the underlying GSI channel.
- *
- * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ special = ipa->version == IPA_VERSION_3_5_1 &&
+ !endpoint->toward_ipa &&
+ endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
- gsi_channel_reset(&ipa->gsi, channel_id, legacy);
+ gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
dev_err(&ipa->pdev->dev,
@@ -1328,6 +1453,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
ipa_endpoint_status(endpoint);
}
@@ -1538,8 +1664,8 @@ int ipa_endpoint_config(struct ipa *ipa)
val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
+ rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
+ max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1548,7 +1674,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
+ max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 58a245de488e..881ecc27bd6e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -25,7 +25,7 @@ struct ipa_gsi_endpoint_data;
#define IPA_MTU ETH_DATA_LEN
enum ipa_endpoint_name {
- IPA_ENDPOINT_AP_MODEM_TX = 0,
+ IPA_ENDPOINT_AP_MODEM_TX,
IPA_ENDPOINT_MODEM_LAN_TX,
IPA_ENDPOINT_MODEM_COMMAND_TX,
IPA_ENDPOINT_AP_COMMAND_TX,
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index cc1ea28f7bc2..61dd7605bcb6 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -139,12 +139,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 val;
/* assert(mask & ipa->available); */
- val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
if (enable)
val |= mask;
else
val &= ~mask;
- iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
}
/* Enable TX_SUSPEND for an endpoint */
@@ -168,7 +168,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
u32 val;
val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
- iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_CLR_OFFSET);
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 727e9c5044d1..b5d63a0cd19e 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -13,22 +13,6 @@ struct ipa;
struct ipa_interrupt;
/**
- * enum ipa_irq_id - IPA interrupt type
- * @IPA_IRQ_UC_0: Microcontroller event interrupt
- * @IPA_IRQ_UC_1: Microcontroller response interrupt
- * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
- *
- * The data ready interrupt is signaled if data has arrived that is destined
- * for an AP RX endpoint whose underlying GSI channel is suspended/stopped.
- */
-enum ipa_irq_id {
- IPA_IRQ_UC_0 = 2,
- IPA_IRQ_UC_1 = 3,
- IPA_IRQ_TX_SUSPEND = 14,
- IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */
-};
-
-/**
* typedef ipa_irq_handler_t - IPA interrupt handler function type
* @ipa: IPA pointer
* @irq_id: interrupt type
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index cd4d993b0bbb..84bb8ae92725 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -70,6 +70,14 @@
#define IPA_FWS_PATH "ipa_fws.mdt"
#define IPA_PAS_ID 15
+/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
+#define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */
+#define TAG_TIMESTAMP_SHIFT 14
+#define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */
+
+/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
+#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
+
/**
* ipa_suspend_handler() - Handle the suspend IPA interrupt
* @ipa: IPA pointer
@@ -111,8 +119,7 @@ int ipa_setup(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
- /* Setup for IPA v3.5.1 has some slight differences */
- ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ ret = gsi_setup(&ipa->gsi);
if (ret)
return ret;
@@ -231,8 +238,10 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
- } else {
+ } else if (ipa->version < IPA_VERSION_4_5) {
val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ } else {
+ /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
}
val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
@@ -244,31 +253,100 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
/* Configure DDR and PCIe max read/write QSB values */
static void ipa_hardware_config_qsb(struct ipa *ipa)
{
+ enum ipa_version version = ipa->version;
+ u32 max0;
+ u32 max1;
u32 val;
- /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */
+ /* QMB_0 represents DDR; QMB_1 represents PCIe */
val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
- if (ipa->version == IPA_VERSION_4_2)
- val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK);
- else
- val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK);
+ switch (version) {
+ case IPA_VERSION_4_2:
+ max1 = 0; /* PCIe not present */
+ break;
+ case IPA_VERSION_4_5:
+ max1 = 8;
+ break;
+ default:
+ max1 = 4;
+ break;
+ }
+ val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK);
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
- if (ipa->version == IPA_VERSION_3_5_1) {
- val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK);
- val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
- } else {
- val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK);
- if (ipa->version == IPA_VERSION_4_2)
- val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK);
- else
- val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ max1 = 12;
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ max0 = 8;
+ break;
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ max0 = 12;
+ break;
+ case IPA_VERSION_4_2:
+ max0 = 12;
+ max1 = 0; /* PCIe not present */
+ break;
+ case IPA_VERSION_4_5:
+ max0 = 0; /* No limit (hardware maximum) */
+ break;
+ }
+ val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK);
+ val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK);
+ if (version != IPA_VERSION_3_5_1) {
/* GEN_QMB_0_MAX_READS_BEATS is 0 */
/* GEN_QMB_1_MAX_READS_BEATS is 0 */
}
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
}
+/* IPA uses unified Qtime starting at IPA v4.5, implementing various
+ * timestamps and timers independent of the IPA core clock rate. The
+ * Qtimer is based on a 56-bit timestamp incremented at each tick of
+ * a 19.2 MHz SoC crystal oscillator (XO clock).
+ *
+ * For IPA timestamps (tag, NAT, data path logging) a lower resolution
+ * timestamp is achieved by shifting the Qtimer timestamp value right
+ * some number of bits to produce the low-order bits of the coarser
+ * granularity timestamp.
+ *
+ * For timers, a common timer clock is derived from the XO clock using
+ * a divider (we use 192, to produce a 100kHz timer clock). From
+ * this common clock, three "pulse generators" are used to produce
+ * timer ticks at a configurable frequency. IPA timers (such as
+ * those used for aggregation or head-of-line block handling) now
+ * define their period based on one of these pulse generators.
+ */
+static void ipa_qtime_config(struct ipa *ipa)
+{
+ u32 val;
+
+ /* Timer clock divider must be disabled when we change the rate */
+ iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+
+ /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
+ val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
+ val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
+ /* Configure tag and NAT Qtime timestamp resolution as well */
+ val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
+ val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+
+ /* Set granularity of pulse generators used for other timers */
+ val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
+ val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
+ val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+
+ /* Actual divider is 1 more than value supplied here */
+ val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+
+ /* Divider value is set; re-enable the common timer clock divider */
+ val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+}
+
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
@@ -295,7 +373,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
*/
static void ipa_hardware_dcd_config(struct ipa *ipa)
{
- /* Recommended values for IPA 3.5 according to IPA HPG */
+ /* Recommended values for IPA 3.5 and later according to IPA HPG */
ipa_idle_indication_cfg(ipa, 256, false);
}
@@ -311,22 +389,26 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa)
{
+ enum ipa_version version = ipa->version;
u32 granularity;
u32 val;
- /* Fill in backward-compatibility register, based on version */
- val = ipa_reg_bcr_val(ipa->version);
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+ /* IPA v4.5 has no backward compatibility register */
+ if (version < IPA_VERSION_4_5) {
+ val = ipa_reg_bcr_val(version);
+ iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+ }
- if (ipa->version != IPA_VERSION_3_5_1) {
- /* Enable open global clocks (hardware workaround) */
+ /* Implement some hardware workarounds */
+ if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) {
+ /* Enable open global clocks (not needed for IPA v4.5) */
val = GLOBAL_FMASK;
val |= GLOBAL_2X_CLK_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
- /* Disable PA mask to allow HOLB drop (hardware workaround) */
+ /* Disable PA mask to allow HOLB drop */
val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN;
+ val &= ~PA_MASK_EN_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
}
@@ -335,15 +417,21 @@ static void ipa_hardware_config(struct ipa *ipa)
/* Configure system bus limits */
ipa_hardware_config_qsb(ipa);
- /* Configure aggregation granularity */
- val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ if (version < IPA_VERSION_4_5) {
+ /* Configure aggregation timer granularity */
+ granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ } else {
+ ipa_qtime_config(ipa);
+ }
- /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */
- if (ipa->version == IPA_VERSION_4_2)
- iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET);
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ if (version == IPA_VERSION_4_2) {
+ u32 offset = ipa_reg_filt_rout_hash_en_offset(version);
+
+ iowrite32(0, ipa->reg_virt + offset);
+ }
/* Enable dynamic clock division */
ipa_hardware_dcd_config(ipa);
@@ -363,52 +451,41 @@ static void ipa_hardware_deconfig(struct ipa *ipa)
#ifdef IPA_VALIDATION
-/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */
-static int ipa_resource_group_count(struct ipa *ipa)
-{
- switch (ipa->version) {
- case IPA_VERSION_3_5_1:
- return 3;
-
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- return 4;
-
- case IPA_VERSION_4_2:
- return 1;
-
- default:
- return 0;
- }
-}
-
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
- u32 group_count = ipa_resource_group_count(ipa);
+ u32 group_count;
u32 i;
u32 j;
- if (!group_count)
+ /* We program at most 6 source or destination resource group limits */
+ BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6);
+
+ group_count = ipa_resource_group_src_count(ipa->version);
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX)
return false;
- /* Return an error if a non-zero resource group limit is specified
- * for a resource not supported by hardware.
+ /* Return an error if a non-zero resource limit is specified
+ * for a resource group not supported by hardware.
*/
for (i = 0; i < data->resource_src_count; i++) {
const struct ipa_resource_src *resource;
resource = &data->resource_src[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
+ group_count = ipa_resource_group_dst_count(ipa->version);
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX)
+ return false;
+
for (i = 0; i < data->resource_dst_count; i++) {
const struct ipa_resource_dst *resource;
resource = &data->resource_dst[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
@@ -435,46 +512,64 @@ ipa_resource_config_common(struct ipa *ipa, u32 offset,
val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ if (ylimits) {
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ }
iowrite32(val, ipa->reg_virt + offset);
}
-static void ipa_resource_config_src_01(struct ipa *ipa,
- const struct ipa_resource_src *resource)
+static void ipa_resource_config_src(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
{
- u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ u32 group_count = ipa_resource_group_src_count(ipa->version);
+ const struct ipa_resource_limits *ylimits;
+ u32 offset;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[0], &resource->limits[1]);
-}
+ offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-static void ipa_resource_config_src_23(struct ipa *ipa,
- const struct ipa_resource_src *resource)
-{
- u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ if (group_count < 2)
+ return;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[2], &resource->limits[3]);
-}
+ offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-static void ipa_resource_config_dst_01(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
-{
- u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ if (group_count < 4)
+ return;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[0], &resource->limits[1]);
+ offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
}
-static void ipa_resource_config_dst_23(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
+static void ipa_resource_config_dst(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
{
- u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ u32 group_count = ipa_resource_group_dst_count(ipa->version);
+ const struct ipa_resource_limits *ylimits;
+ u32 offset;
+
+ offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
- ipa_resource_config_common(ipa, offset,
- &resource->limits[2], &resource->limits[3]);
+ if (group_count < 2)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
+
+ if (group_count < 4)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
}
static int
@@ -485,15 +580,11 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
if (!ipa_resource_limits_valid(ipa, data))
return -EINVAL;
- for (i = 0; i < data->resource_src_count; i++) {
- ipa_resource_config_src_01(ipa, &data->resource_src[i]);
- ipa_resource_config_src_23(ipa, &data->resource_src[i]);
- }
+ for (i = 0; i < data->resource_src_count; i++)
+ ipa_resource_config_src(ipa, data->resource_src);
- for (i = 0; i < data->resource_dst_count; i++) {
- ipa_resource_config_dst_01(ipa, &data->resource_dst[i]);
- ipa_resource_config_dst_23(ipa, &data->resource_dst[i]);
- }
+ for (i = 0; i < data->resource_dst_count; i++)
+ ipa_resource_config_dst(ipa, data->resource_dst);
return 0;
}
@@ -678,16 +769,13 @@ static void ipa_validate_build(void)
*/
BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
- /* Exceeding 128 bytes makes the transaction pool *much* larger */
- BUILD_BUG_ON(sizeof(struct gsi_trans) > 128);
-
/* This is used as a divisor */
BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY));
+ field_max(AGGR_GRANULARITY_FMASK));
#endif /* IPA_VALIDATE */
}
@@ -720,15 +808,21 @@ static int ipa_probe(struct platform_device *pdev)
const struct ipa_data *data;
struct ipa_clock *clock;
struct rproc *rproc;
- bool modem_alloc;
bool modem_init;
struct ipa *ipa;
- bool prefetch;
phandle ph;
int ret;
ipa_validate_build();
+ /* Get configuration data early; needed for clock initialization */
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ /* This is really IPA_VALIDATE (should never happen) */
+ dev_err(dev, "matched hardware not supported\n");
+ return -ENODEV;
+ }
+
/* If we need Trust Zone, make sure it's available */
modem_init = of_property_read_bool(dev->of_node, "modem-init");
if (!modem_init)
@@ -749,22 +843,13 @@ static int ipa_probe(struct platform_device *pdev)
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
- clock = ipa_clock_init(dev);
+ clock = ipa_clock_init(dev, data->clock_data);
if (IS_ERR(clock)) {
ret = PTR_ERR(clock);
goto err_rproc_put;
}
- /* No more EPROBE_DEFER. Get our configuration data */
- data = of_device_get_match_data(dev);
- if (!data) {
- /* This is really IPA_VALIDATE (should never happen) */
- dev_err(dev, "matched hardware not supported\n");
- ret = -ENOTSUPP;
- goto err_clock_exit;
- }
-
- /* Allocate and initialize the IPA structure */
+ /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) {
ret = -ENOMEM;
@@ -785,17 +870,12 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_reg_exit;
- /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
- prefetch = ipa->version != IPA_VERSION_3_5_1;
- /* IPA v4.2 requires the AP to allocate channels for the modem */
- modem_alloc = ipa->version == IPA_VERSION_4_2;
-
- ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
- data->endpoint_data, modem_alloc);
+ ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
+ data->endpoint_data);
if (ret)
goto err_mem_exit;
- /* Result is a non-zero mask endpoints that support filtering */
+ /* Result is a non-zero mask of endpoints that support filtering */
ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
data->endpoint_data);
if (!ipa->filter_map) {
@@ -870,6 +950,11 @@ static int ipa_remove(struct platform_device *pdev)
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
+ /* If starting or stopping is in progress, try once more */
+ if (ret == -EBUSY) {
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ ret = ipa_modem_stop(ipa);
+ }
if (ret)
return ret;
@@ -890,6 +975,15 @@ static int ipa_remove(struct platform_device *pdev)
return 0;
}
+static void ipa_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = ipa_remove(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
+}
+
/**
* ipa_suspend() - Power management system suspend callback
* @dev: IPA device structure
@@ -947,8 +1041,9 @@ static const struct dev_pm_ops ipa_pm_ops = {
};
static struct platform_driver ipa_driver = {
- .probe = ipa_probe,
- .remove = ipa_remove,
+ .probe = ipa_probe,
+ .remove = ipa_remove,
+ .shutdown = ipa_shutdown,
.driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 2d45c444a67f..0cc3a3374caa 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -89,7 +89,7 @@ int ipa_mem_setup(struct ipa *ipa)
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- iowrite32(ipa->mem_offset + offset,
+ iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset,
ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
return 0;
@@ -160,13 +160,13 @@ int ipa_mem_config(struct ipa *ipa)
mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
/* If the sizes don't match, issue a warning */
- if (ipa->mem_offset + mem_size > ipa->mem_size) {
- dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
- mem_size);
- } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ if (ipa->mem_offset + mem_size < ipa->mem_size) {
dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
mem_size);
ipa->mem_size = mem_size;
+ } else if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
}
/* Prealloc DMA memory for zeroing regions */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 5090f0f923ad..2fc64483f275 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -168,7 +168,7 @@ static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
ipa_qmi->indication_sent = false;
}
-static struct qmi_ops ipa_server_ops = {
+static const struct qmi_ops ipa_server_ops = {
.bye = ipa_server_bye,
};
@@ -234,7 +234,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
}
/* The server handles two request message types sent by the modem. */
-static struct qmi_msg_handler ipa_server_msg_handlers[] = {
+static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
{
.type = QMI_REQUEST,
.msg_id = IPA_QMI_INDICATION_REGISTER,
@@ -261,7 +261,7 @@ static void ipa_client_init_driver(struct qmi_handle *qmi,
}
/* The client handles one response message type sent by the modem. */
-static struct qmi_msg_handler ipa_client_msg_handlers[] = {
+static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
{
.type = QMI_RESPONSE,
.msg_id = IPA_QMI_INIT_DRIVER,
@@ -413,7 +413,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
int ret;
ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
- qmi = &ipa_qmi->client_handle,
+ qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
dev = &ipa->pdev->dev;
@@ -463,7 +463,7 @@ ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
return 0;
}
-static struct qmi_ops ipa_client_ops = {
+static const struct qmi_ops ipa_client_ops = {
.new_server = ipa_client_new_server,
};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index cfac456cea0c..12b6621f4b0e 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -74,12 +74,12 @@ struct ipa_init_complete_ind {
/* The AP tells the modem its platform type. We assume Android. */
enum ipa_platform_type {
- IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */
- IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */
- IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */
- IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */
- IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */
- IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0x0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 0x1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 0x2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 0x3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 0x4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
/* This defines the start and end offset of a range of memory. Both
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index e542598fd775..e6b0827a244e 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -65,14 +65,15 @@ struct ipa;
* of valid bits for the register.
*/
-#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038
-
#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
+/* The next field is not supported for IPA v4.1 */
#define ENABLE_FMASK GENMASK(0, 0)
#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
+/* The next field is not present for IPA v4.5 */
#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
+/* The remaining fields are not present for IPA v3.5.1 */
#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
@@ -86,6 +87,8 @@ struct ipa;
#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17)
+/* The next field is present for IPA v4.5 */
+#define IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN_FMASK GENMASK(21, 21)
#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
#define RX_FMASK GENMASK(0, 0)
@@ -105,11 +108,13 @@ struct ipa;
#define ACK_MNGR_FMASK GENMASK(14, 14)
#define D_DCPH_FMASK GENMASK(15, 15)
#define H_DCPH_FMASK GENMASK(16, 16)
+/* The next field is not present for IPA v4.5 */
#define DCMP_FMASK GENMASK(17, 17)
#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
#define TX_0_FMASK GENMASK(19, 19)
#define TX_1_FMASK GENMASK(20, 20)
#define FNR_FMASK GENMASK(21, 21)
+/* The remaining fields are not present for IPA v3.5.1 */
#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
@@ -118,6 +123,8 @@ struct ipa;
#define GSI_IF_FMASK GENMASK(27, 27)
#define GLOBAL_FMASK GENMASK(28, 28)
#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
+/* The next field is present for IPA v4.5 */
+#define DPL_FIFO_FMASK GENMASK(30, 30)
#define IPA_REG_ROUTE_OFFSET 0x00000048
#define ROUTE_DIS_FMASK GENMASK(0, 0)
@@ -138,25 +145,17 @@ struct ipa;
#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0 and above */
+/* The next two fields are not present for IPA v3.5.1 */
#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
{
if (version == IPA_VERSION_3_5_1)
- return 0x0000010c;
+ return 0x000008c;
- return 0x000000b4;
+ return 0x0000148;
}
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-
-/* The next register is present for IPA v4.2 and above */
-#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148
-#define IPV6_ROUTER_HASH_EN GENMASK(0, 0)
-#define IPV6_FILTER_HASH_EN GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_EN GENMASK(8, 8)
-#define IPV4_FILTER_HASH_EN GENMASK(12, 12)
static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
{
@@ -166,76 +165,108 @@ static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
return 0x000014c;
}
-#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12)
+/* The next four fields are used for the hash enable and flush registers */
+#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
+#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
+#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+
+/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
+static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000010c;
+
+ return 0x000000b4;
+}
+/* The next register is not present for IPA v4.5 */
#define IPA_REG_BCR_OFFSET 0x000001d0
-#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0)
-#define BCR_TX_NOT_USING_BRESP BIT(1)
-#define BCR_SUSPEND_L2_IRQ BIT(3)
-#define BCR_HOLB_DROP_L2_IRQ BIT(4)
-#define BCR_DUAL_TX BIT(5)
+/* The next two fields are not present for IPA v4.2 */
+#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
+#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
+/* The next field is invalid for IPA v4.1 */
+#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
+/* The next two fields are not present for IPA v4.2 */
+#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
+#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
+#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
+#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
+#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
+#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
+#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
/* Backward compatibility register value to use for each version */
static inline u32 ipa_reg_bcr_val(enum ipa_version version)
{
if (version == IPA_VERSION_3_5_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP |
- BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+ return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_TX_NOT_USING_BRESP_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK;
if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ |
- BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+ return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK;
+
+ /* assert(version != IPA_VERSION_4_5); */
return 0x00000000;
}
+/* The value of the next register must be a multiple of 8 */
#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
+
+/* The next register is not present for IPA v4.5 */
+#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
/* The internal inactivity timer clock is used for the aggregation timer */
-#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-#define AGGR_GRANULARITY GENMASK(8, 4)
/* Compute the value to use in the AGGR_GRANULARITY field representing the
* given number of microseconds. The value is one less than the number of
- * timer ticks in the requested period. Zero not a valid granularity value.
+ * timer ticks in the requested period. 0 not a valid granularity value.
*/
static inline u32 ipa_aggr_granularity_val(u32 usec)
{
return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
+/* The next register is not present for IPA v4.5 */
#define IPA_REG_TX_CFG_OFFSET 0x000001fc
/* The first three fields are present for IPA v3.5.1 only */
-#define TX0_PREFETCH_DISABLE GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2)
-/* The next fields are present for IPA v4.0 and above */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11)
-#define PA_MASK_EN GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13)
-/* The last two fields are present for IPA v4.2 and above */
-#define SSPND_PA_NO_START_STATE GENMASK(18, 18)
-#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19)
+#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
+#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
+#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
+/* The next six fields are present for IPA v4.0 and above */
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
+#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
+#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
+#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
+#define PA_MASK_EN_FMASK GENMASK(12, 12)
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
+/* The next field is present for IPA v4.5 */
+#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
+/* The next two fields are present for IPA v4.2 only */
+#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
+#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define BAM_MAX_PIPES_FMASK GENMASK(4, 0)
-#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24)
+#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
+#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
+#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
+#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
{
- if (version == IPA_VERSION_4_2)
+ if (version >= IPA_VERSION_4_2)
return 0x00000240;
return 0x00000220;
@@ -244,25 +275,102 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
+/* The next register is present for IPA v4.5 */
+#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
+#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
+#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
+#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
+#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
+
+/* The next register is present for IPA v4.5 */
+#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
+#define DIV_VALUE_FMASK GENMASK(8, 0)
+#define DIV_ENABLE_FMASK GENMASK(31, 31)
+
+/* The next register is present for IPA v4.5 */
+#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
+#define GRAN_0_FMASK GENMASK(2, 0)
+#define GRAN_1_FMASK GENMASK(5, 3)
+#define GRAN_2_FMASK GENMASK(8, 6)
+/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+enum ipa_pulse_gran {
+ IPA_GRAN_10_US = 0x0,
+ IPA_GRAN_20_US = 0x1,
+ IPA_GRAN_50_US = 0x2,
+ IPA_GRAN_100_US = 0x3,
+ IPA_GRAN_1_MS = 0x4,
+ IPA_GRAN_10_MS = 0x5,
+ IPA_GRAN_100_MS = 0x6,
+ IPA_GRAN_655350_US = 0x7,
+};
+
+/* # IPA source resource groups available based on version */
+static inline u32 ipa_resource_group_src_count(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ case IPA_VERSION_4_5:
+ return 5;
+
+ default:
+ return 0;
+ }
+}
+
+/* # IPA destination resource groups available based on version */
+static inline u32 ipa_resource_group_dst_count(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ return 3;
+
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ case IPA_VERSION_4_5:
+ return 5;
+
+ default:
+ return 0;
+ }
+}
+
+/* Not all of the following are valid (depends on the count, above) */
#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000400 + 0x0020 * (rt))
#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
(0x00000404 + 0x0020 * (rt))
+/* The next register is only present for IPA v4.5 */
#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000408 + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000500 + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
(0x00000504 + 0x0020 * (rt))
+/* The next register is only present for IPA v4.5 */
#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000508 + 0x0020 * (rt))
+/* The next four fields are used for all resource group registers */
#define X_MIN_LIM_FMASK GENMASK(5, 0)
#define X_MAX_LIM_FMASK GENMASK(13, 8)
+/* The next two fields are not always present (if resource count is odd) */
#define Y_MIN_LIM_FMASK GENMASK(21, 16)
#define Y_MAX_LIM_FMASK GENMASK(29, 24)
#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
(0x00000800 + 0x0070 * (ep))
+/* The next field should only used for IPA v3.5.1 */
#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
#define ENDP_DELAY_FMASK GENMASK(1, 1)
@@ -273,6 +381,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL = 0x1,
+ IPA_CS_OFFLOAD_DL = 0x2,
+};
+
#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
(0x00000810 + 0x0070 * (ep))
#define HDR_LEN_FMASK GENMASK(5, 0)
@@ -283,7 +398,45 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
#define HDR_A5_MUX_FMASK GENMASK(26, 26)
#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
+/* The next field is not present for IPA v4.5 */
#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
+/* The next two fields are present for IPA v4.5 */
+#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
+#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
+
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static inline u32 ipa_header_size_encoded(enum ipa_version version,
+ u32 header_size)
+{
+ u32 val;
+
+ val = u32_encode_bits(header_size, HDR_LEN_FMASK);
+ if (version < IPA_VERSION_4_5)
+ return val;
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(HDR_LEN_FMASK);
+ val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
+ u32 offset)
+{
+ u32 val;
+
+ val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
+ if (version < IPA_VERSION_4_5)
+ return val;
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(HDR_OFST_METADATA_FMASK);
+ val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
+
+ return val;
+}
#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
(0x00000814 + 0x0070 * (ep))
@@ -293,6 +446,10 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
+/* The next three fields are present for IPA v4.5 */
+#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
+#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
+#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
@@ -302,22 +459,77 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
(0x00000820 + 0x0070 * (txep))
#define MODE_FMASK GENMASK(2, 0)
+/* The next field is present for IPA v4.5 */
+#define DCPH_ENABLE_FMASK GENMASK(3, 3)
#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
#define PAD_EN_FMASK GENMASK(29, 29)
+/* The next register is not present for IPA v4.5 */
#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+enum ipa_mode {
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
+};
+
#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
(0x00000824 + 0x0070 * (ep))
#define AGGR_EN_FMASK GENMASK(1, 0)
#define AGGR_TYPE_FMASK GENMASK(4, 2)
-#define AGGR_BYTE_LIMIT_FMASK GENMASK(9, 5)
-#define AGGR_TIME_LIMIT_FMASK GENMASK(14, 10)
-#define AGGR_PKT_LIMIT_FMASK GENMASK(20, 15)
-#define AGGR_SW_EOF_ACTIVE_FMASK GENMASK(21, 21)
-#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
-#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
+static inline u32 aggr_byte_limit_fmask(bool legacy)
+{
+ return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
+}
+
+static inline u32 aggr_time_limit_fmask(bool legacy)
+{
+ return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
+}
+
+static inline u32 aggr_pkt_limit_fmask(bool legacy)
+{
+ return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
+}
+
+static inline u32 aggr_sw_eof_active_fmask(bool legacy)
+{
+ return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
+}
+
+static inline u32 aggr_force_close_fmask(bool legacy)
+{
+ return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
+}
+
+static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
+{
+ return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
+}
+
+/* The next field is present for IPA v4.5 */
+#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
+
+/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR = 0x0,
+ IPA_ENABLE_AGGR = 0x1,
+ IPA_ENABLE_DEAGGR = 0x2,
+};
+
+/** enum ipa_aggr_type - aggregation type field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
@@ -327,21 +539,37 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
(0x00000830 + 0x0070 * (rxep))
-/* The next fields are present for IPA v4.2 only */
+/* The next two fields are present for IPA v4.2 only */
#define BASE_VALUE_FMASK GENMASK(4, 0)
#define SCALE_FMASK GENMASK(12, 8)
+/* The next two fields are present for IPA v4.5 */
+#define TIME_LIMIT_FMASK GENMASK(4, 0)
+#define GRAN_SEL_FMASK GENMASK(8, 8)
/* Valid only for TX (IPA consumer) endpoints */
#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
(0x00000834 + 0x0070 * (txep))
#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
+#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
+#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
(0x00000838 + 0x0070 * (ep))
-#define RSRC_GRP_FMASK GENMASK(1, 0)
+/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
+static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
+{
+ switch (version) {
+ case IPA_VERSION_4_2:
+ return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+ case IPA_VERSION_4_5:
+ return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
+ default:
+ return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
+ }
+}
/* Valid only for TX (IPA consumer) endpoints */
#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
@@ -351,15 +579,35 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N
+ * @IPA_SEQ_DMA_ONLY: only DMA is performed
+ * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
+ * second packet processing pass + no decipher + microcontroller
+ * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
+ * packet processing + no decipher + no uCP + HPS REP DMA parser
+ * @IPA_SEQ_INVALID: invalid sequencer type
+ *
+ * The values defined here are broken into 4-bit nibbles that are written
+ * into fields of the ENDP_INIT_SEQ registers.
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA_ONLY = 0x0000,
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
+ IPA_SEQ_INVALID = 0xffff,
+};
+
#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
(0x00000840 + 0x0070 * (ep))
#define STATUS_EN_FMASK GENMASK(0, 0)
#define STATUS_ENDP_FMASK GENMASK(5, 1)
+/* The next field is not present for IPA v4.5 */
#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0 and above */
+/* The next field is not present for IPA v3.5.1 */
#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */
+/* The next register is only present for IPA versions that support hashing */
#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
(0x0000085c + 0x0070 * (er))
#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
@@ -394,89 +642,69 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
(0x00003010 + 0x1000 * (ee))
+/**
+ * enum ipa_irq_id - Bit positions representing type of IPA IRQ
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ *
+ * IRQ types not described above are not currently used.
+ */
+enum ipa_irq_id {
+ IPA_IRQ_BAD_SNOC_ACCESS = 0x0,
+ /* Type (bit) 0x1 is not defined */
+ IPA_IRQ_UC_0 = 0x2,
+ IPA_IRQ_UC_1 = 0x3,
+ IPA_IRQ_UC_2 = 0x4,
+ IPA_IRQ_UC_3 = 0x5,
+ IPA_IRQ_UC_IN_Q_NOT_EMPTY = 0x6,
+ IPA_IRQ_UC_RX_CMD_Q_NOT_FULL = 0x7,
+ IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY = 0x8,
+ IPA_IRQ_RX_ERR = 0x9,
+ IPA_IRQ_DEAGGR_ERR = 0xa,
+ IPA_IRQ_TX_ERR = 0xb,
+ IPA_IRQ_STEP_MODE = 0xc,
+ IPA_IRQ_PROC_ERR = 0xd,
+ IPA_IRQ_TX_SUSPEND = 0xe,
+ IPA_IRQ_TX_HOLB_DROP = 0xf,
+ IPA_IRQ_BAM_GSI_IDLE = 0x10,
+ IPA_IRQ_PIPE_YELLOW_BELOW = 0x11,
+ IPA_IRQ_PIPE_RED_BELOW = 0x12,
+ IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13,
+ IPA_IRQ_PIPE_RED_ABOVE = 0x14,
+ IPA_IRQ_UCP = 0x15,
+ IPA_IRQ_DCMP = 0x16,
+ IPA_IRQ_GSI_EE = 0x17,
+ IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18,
+ IPA_IRQ_GSI_UC = 0x19,
+ /* The next bit is present for IPA v4.5 */
+ IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a,
+ IPA_IRQ_COUNT, /* Last; not an id */
+};
#define IPA_REG_IRQ_UC_OFFSET \
IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
(0x0000301c + 0x1000 * (ee))
+#define UC_INTR_FMASK GENMASK(0, 0)
+/* ipa->available defines the valid bits in the SUSPEND_INFO register */
#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
(0x00003030 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \
- IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \
+/* ipa->available defines the valid bits in the IRQ_SUSPEND_EN register */
+#define IPA_REG_IRQ_SUSPEND_EN_OFFSET \
+ IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(ee) \
(0x00003034 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */
-#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \
- IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \
+/* ipa->available defines the valid bits in the IRQ_SUSPEND_CLR register */
+#define IPA_REG_IRQ_SUSPEND_CLR_OFFSET \
+ IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(ee) \
(0x00003038 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */
-
-/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
-enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0,
- IPA_CS_OFFLOAD_UL = 1,
- IPA_CS_OFFLOAD_DL = 2,
- IPA_CS_RSVD
-};
-
-/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
-enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0,
- IPA_ENABLE_AGGR = 1,
- IPA_ENABLE_DEAGGR = 2,
-};
-
-/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */
-enum ipa_aggr_type {
- IPA_MBIM_16 = 0,
- IPA_HDLC = 1,
- IPA_TLP = 2,
- IPA_RNDIS = 3,
- IPA_GENERIC = 4,
- IPA_COALESCE = 5,
- IPA_QCMAP = 6,
-};
-
-/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
-enum ipa_mode {
- IPA_BASIC = 0,
- IPA_ENABLE_FRAMING_HDLC = 1,
- IPA_ENABLE_DEFRAMING_HDLC = 2,
- IPA_DMA = 3,
-};
-
-/**
- * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N
- * @IPA_SEQ_DMA_ONLY: only DMA is performed
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP:
- * packet processing + no decipher + microcontroller (Ethernet Bridging)
- * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
- * second packet processing pass + no decipher + microcontroller
- * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher
- * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
- * packet processing + no decipher + no uCP + HPS REP DMA parser
- * @IPA_SEQ_INVALID: invalid sequencer type
- *
- * The values defined here are broken into 4-bit nibbles that are written
- * into fields of the INIT_SEQ_N endpoint registers.
- */
-enum ipa_seq_type {
- IPA_SEQ_DMA_ONLY = 0x0000,
- IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002,
- IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
- IPA_SEQ_DMA_DEC = 0x0011,
- IPA_SEQ_DMA_COMP_DECOMP = 0x0020,
- IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
- IPA_SEQ_INVALID = 0xffff,
-};
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index b3790aa952a1..32e2d3e052d5 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -422,8 +422,8 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
- val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
+ val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
+ val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
ipa_cmd_register_write_add(trans, offset, val, val, false);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index b382d47bc70d..dee58a6596d4 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -86,32 +86,32 @@ struct ipa_uc_mem_area {
/** enum ipa_uc_command - commands from the AP to the microcontroller */
enum ipa_uc_command {
- IPA_UC_COMMAND_NO_OP = 0,
- IPA_UC_COMMAND_UPDATE_FLAGS = 1,
- IPA_UC_COMMAND_DEBUG_RUN_TEST = 2,
- IPA_UC_COMMAND_DEBUG_GET_INFO = 3,
- IPA_UC_COMMAND_ERR_FATAL = 4,
- IPA_UC_COMMAND_CLK_GATE = 5,
- IPA_UC_COMMAND_CLK_UNGATE = 6,
- IPA_UC_COMMAND_MEMCPY = 7,
- IPA_UC_COMMAND_RESET_PIPE = 8,
- IPA_UC_COMMAND_REG_WRITE = 9,
- IPA_UC_COMMAND_GSI_CH_EMPTY = 10,
+ IPA_UC_COMMAND_NO_OP = 0x0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 0x1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 0x2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 0x3,
+ IPA_UC_COMMAND_ERR_FATAL = 0x4,
+ IPA_UC_COMMAND_CLK_GATE = 0x5,
+ IPA_UC_COMMAND_CLK_UNGATE = 0x6,
+ IPA_UC_COMMAND_MEMCPY = 0x7,
+ IPA_UC_COMMAND_RESET_PIPE = 0x8,
+ IPA_UC_COMMAND_REG_WRITE = 0x9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 0xa,
};
/** enum ipa_uc_response - microcontroller response codes */
enum ipa_uc_response {
- IPA_UC_RESPONSE_NO_OP = 0,
- IPA_UC_RESPONSE_INIT_COMPLETED = 1,
- IPA_UC_RESPONSE_CMD_COMPLETED = 2,
- IPA_UC_RESPONSE_DEBUG_GET_INFO = 3,
+ IPA_UC_RESPONSE_NO_OP = 0x0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 0x1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 0x2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 0x3,
};
/** enum ipa_uc_event - common cpu events reported by the microcontroller */
enum ipa_uc_event {
- IPA_UC_EVENT_NO_OP = 0,
- IPA_UC_EVENT_ERROR = 1,
- IPA_UC_EVENT_LOG_INFO = 2,
+ IPA_UC_EVENT_NO_OP = 0x0,
+ IPA_UC_EVENT_ERROR = 0x1,
+ IPA_UC_EVENT_LOG_INFO = 0x2,
};
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
@@ -129,9 +129,10 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
- else
+ else if (shared->event != IPA_UC_EVENT_LOG_INFO)
dev_err(dev, "unsupported microcontroller event %hhu\n",
shared->event);
+ /* The LOG_INFO event can be safely ignored */
}
/* Microcontroller response IPA interrupt handler */
@@ -191,14 +192,19 @@ void ipa_uc_teardown(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ u32 val;
+ /* Fill in the command data */
shared->command = command;
shared->command_param = cpu_to_le32(command_param);
shared->command_param_hi = 0;
shared->response = 0;
shared->response_param = 0;
- iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+ /* Use an interrupt to tell the microcontroller the command is ready */
+ val = u32_encode_bits(1, UC_INTR_FMASK);
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 85449df0f512..2944e2a89023 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -18,6 +18,7 @@ enum ipa_version {
IPA_VERSION_4_0, /* GSI version 2.0 */
IPA_VERSION_4_1, /* GSI version 2.1 */
IPA_VERSION_4_2, /* GSI version 2.2 */
+ IPA_VERSION_4_5, /* GSI version 2.5 */
};
#endif /* _IPA_VERSION_H_ */