summaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/gsi.c117
-rw-r--r--drivers/net/ipa/gsi.h12
-rw-r--r--drivers/net/ipa/gsi_private.h6
-rw-r--r--drivers/net/ipa/gsi_trans.h12
-rw-r--r--drivers/net/ipa/ipa_clock.c8
-rw-r--r--drivers/net/ipa/ipa_clock.h10
-rw-r--r--drivers/net/ipa/ipa_cmd.h10
-rw-r--r--drivers/net/ipa/ipa_endpoint.c304
-rw-r--r--drivers/net/ipa/ipa_gsi.h13
-rw-r--r--drivers/net/ipa/ipa_interrupt.h2
-rw-r--r--drivers/net/ipa/ipa_main.c13
-rw-r--r--drivers/net/ipa/ipa_mem.c7
-rw-r--r--drivers/net/ipa/ipa_reg.h60
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_table.c3
-rw-r--r--drivers/net/ipa/ipa_table.h4
-rw-r--r--drivers/net/ipa/ipa_uc.c15
17 files changed, 336 insertions, 262 deletions
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index ac7e5a04c8ac..0e63d35320aa 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -336,6 +336,7 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
+ struct device *dev = gsi->dev;
u32 val;
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
@@ -344,8 +345,8 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
- dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
- "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
+ dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
+ opcode, evt_ring_id, evt_ring->state);
return -ETIMEDOUT;
}
@@ -358,13 +359,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
-
- if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state %u before alloc\n",
+ evt_ring->state);
return -EINVAL;
+ }
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
+ dev_err(gsi->dev, "bad event ring state %u after alloc\n",
evt_ring->state);
ret = -EIO;
}
@@ -381,14 +384,14 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
- dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
+ dev_err(gsi->dev, "bad event ring state %u before reset\n",
evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
+ dev_err(gsi->dev, "bad event ring state %u after reset\n",
evt_ring->state);
}
@@ -399,14 +402,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
+ dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
+ dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
evt_ring->state);
}
@@ -429,6 +432,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
+ struct device *dev = gsi->dev;
u32 val;
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
@@ -437,8 +441,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
- dev_err(gsi->dev,
- "GSI command %u to channel %u timed out (state is %u)\n",
+ dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
return -ETIMEDOUT;
@@ -448,21 +451,23 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct device *dev = gsi->dev;
enum gsi_channel_state state;
int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
- if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
+ dev_err(dev, "bad channel state %u before alloc\n", state);
return -EINVAL;
+ }
ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
- state);
+ dev_err(dev, "bad channel state %u after alloc\n", state);
ret = -EIO;
}
@@ -472,21 +477,23 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Start an ALLOCATED channel */
static int gsi_channel_start_command(struct gsi_channel *channel)
{
+ struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
- state != GSI_CHANNEL_STATE_STOPPED)
+ state != GSI_CHANNEL_STATE_STOPPED) {
+ dev_err(dev, "bad channel state %u before start\n", state);
return -EINVAL;
+ }
ret = gsi_channel_command(channel, GSI_CH_START);
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(channel->gsi->dev,
- "bad channel state (%u) after start\n", state);
+ dev_err(dev, "bad channel state %u after start\n", state);
ret = -EIO;
}
@@ -496,6 +503,7 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Stop a GSI channel in STARTED state */
static int gsi_channel_stop_command(struct gsi_channel *channel)
{
+ struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
@@ -508,8 +516,10 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return 0;
if (state != GSI_CHANNEL_STATE_STARTED &&
- state != GSI_CHANNEL_STATE_STOP_IN_PROC)
+ state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
+ dev_err(dev, "bad channel state %u before stop\n", state);
return -EINVAL;
+ }
ret = gsi_channel_command(channel, GSI_CH_STOP);
@@ -522,8 +532,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
- dev_err(channel->gsi->dev,
- "bad channel state (%u) after stop\n", state);
+ dev_err(dev, "bad channel state %u after stop\n", state);
return -EIO;
}
@@ -531,6 +540,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
/* Reset a GSI channel in ALLOCATED or ERROR state. */
static void gsi_channel_reset_command(struct gsi_channel *channel)
{
+ struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
@@ -539,8 +549,7 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
state != GSI_CHANNEL_STATE_ERROR) {
- dev_err(channel->gsi->dev,
- "bad channel state (%u) before reset\n", state);
+ dev_err(dev, "bad channel state %u before reset\n", state);
return;
}
@@ -549,21 +558,20 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
- dev_err(channel->gsi->dev,
- "bad channel state (%u) after reset\n", state);
+ dev_err(dev, "bad channel state %u after reset\n", state);
}
/* Deallocate an ALLOCATED GSI channel */
static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct device *dev = gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(gsi->dev,
- "bad channel state (%u) before dealloc\n", state);
+ dev_err(dev, "bad channel state %u before dealloc\n", state);
return;
}
@@ -572,8 +580,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev,
- "bad channel state (%u) after dealloc\n", state);
+ dev_err(dev, "bad channel state %u after dealloc\n", state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -1146,8 +1153,8 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
break;
default:
dev_err(gsi->dev,
- "%s: unrecognized type 0x%08x\n",
- __func__, gsi_intr);
+ "unrecognized interrupt type 0x%08x\n",
+ gsi_intr);
break;
}
} while (intr_mask);
@@ -1251,7 +1258,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
if (ring->virt && addr % size) {
dma_free_coherent(dev, size, ring->virt, ring->addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
- size);
+ size);
return -EINVAL; /* Not a good error value, but distinct */
} else if (!ring->virt) {
return -ENOMEM;
@@ -1356,7 +1363,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
* gsi_channel_poll_one() - Return a single completed transaction on a channel
* @channel: Channel to be polled
*
- * @Return: Transaction pointer, or null if none are available
+ * Return: Transaction pointer, or null if none are available
*
* This function returns the first entry on a channel's completed transaction
* list. If that list is empty, the hardware is consulted to determine
@@ -1386,8 +1393,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
* gsi_channel_poll() - NAPI poll function for a channel
* @napi: NAPI structure for the channel
* @budget: Budget supplied by NAPI core
-
- * @Return: Number of items polled (<= budget)
+ *
+ * Return: Number of items polled (<= budget)
*
* Single transactions completed by hardware are polled until either
* the budget is exhausted, or there are no more. Each transaction
@@ -1642,12 +1649,13 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi, bool legacy)
{
+ struct device *dev = gsi->dev;
u32 val;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
if (!(val & ENABLED_FMASK)) {
- dev_err(gsi->dev, "GSI has not been enabled\n");
+ dev_err(dev, "GSI has not been enabled\n");
return -EIO;
}
@@ -1655,24 +1663,24 @@ int gsi_setup(struct gsi *gsi, bool legacy)
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
if (!gsi->channel_count) {
- dev_err(gsi->dev, "GSI reports zero channels supported\n");
+ dev_err(dev, "GSI reports zero channels supported\n");
return -EINVAL;
}
if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
- dev_warn(gsi->dev,
- "limiting to %u channels (hardware supports %u)\n",
+ dev_warn(dev,
+ "limiting to %u channels; hardware supports %u\n",
GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
}
gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
if (!gsi->evt_ring_count) {
- dev_err(gsi->dev, "GSI reports zero event rings supported\n");
+ dev_err(dev, "GSI reports zero event rings supported\n");
return -EINVAL;
}
if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
- dev_warn(gsi->dev,
- "limiting to %u event rings (hardware supports %u)\n",
+ dev_warn(dev,
+ "limiting to %u event rings; hardware supports %u\n",
GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
}
@@ -1758,19 +1766,19 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Make sure channel ids are in the range driver supports */
if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
- dev_err(dev, "bad channel id %u (must be less than %u)\n",
+ dev_err(dev, "bad channel id %u; must be less than %u\n",
channel_id, GSI_CHANNEL_COUNT_MAX);
return false;
}
if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
- dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
+ dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
return false;
}
if (!data->channel.tlv_count ||
data->channel.tlv_count > GSI_TLV_MAX) {
- dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
+ dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
channel_id, data->channel.tlv_count, GSI_TLV_MAX);
return false;
}
@@ -1788,13 +1796,13 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
}
if (!is_power_of_2(data->channel.tre_count)) {
- dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
+ dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
channel_id, data->channel.tre_count);
return false;
}
if (!is_power_of_2(data->channel.event_count)) {
- dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
+ dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
channel_id, data->channel.event_count);
return false;
}
@@ -1948,6 +1956,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
u32 count, const struct ipa_gsi_endpoint_data *data,
bool modem_alloc)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
unsigned int irq;
@@ -1955,7 +1964,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
gsi_validate_build();
- gsi->dev = &pdev->dev;
+ gsi->dev = dev;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
@@ -1966,43 +1975,41 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
/* Get the GSI IRQ and request for it to wake the system */
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
- dev_err(gsi->dev,
- "DT error %d getting \"gsi\" IRQ property\n", ret);
+ dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
return ret ? : -EINVAL;
}
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
if (ret) {
- dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
return ret;
}
gsi->irq = irq;
ret = enable_irq_wake(gsi->irq);
if (ret)
- dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
+ dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
gsi->irq_wake_enabled = !ret;
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
- dev_err(gsi->dev,
- "DT error getting \"gsi\" memory property\n");
+ dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV;
goto err_disable_irq_wake;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
- dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
+ dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL;
goto err_disable_irq_wake;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
- dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
+ dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM;
goto err_disable_irq_wake;
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 90a02194e7ad..061312773df0 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -167,7 +167,7 @@ struct gsi {
* @gsi: Address of GSI structure embedded in an IPA structure
* @legacy: Set up for legacy hardware
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
@@ -185,7 +185,7 @@ void gsi_teardown(struct gsi *gsi);
* @gsi: GSI pointer
* @channel_id: Channel whose limit is to be returned
*
- * @Return: The maximum number of TREs oustanding on the channel
+ * Return: The maximum number of TREs oustanding on the channel
*/
u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
@@ -194,7 +194,7 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
* @gsi: GSI pointer
* @channel_id: Channel whose limit is to be returned
*
- * @Return: The maximum TRE count per transaction on the channel
+ * Return: The maximum TRE count per transaction on the channel
*/
u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
@@ -203,7 +203,7 @@ u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
* @gsi: GSI pointer
* @channel_id: Channel to start
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
int gsi_channel_start(struct gsi *gsi, u32 channel_id);
@@ -212,7 +212,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id);
* @gsi: GSI pointer returned by gsi_setup()
* @channel_id: Channel to stop
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
@@ -238,7 +238,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
* @gsi: Address of GSI structure embedded in an IPA structure
* @pdev: IPA platform device
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*
* Early stage initialization of the GSI subsystem, performing tasks
* that can be done before the GSI hardware is ready to use.
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index b57d0198ebc1..1785c9d3344d 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -44,7 +44,7 @@ void gsi_trans_complete(struct gsi_trans *trans);
* @channel: Channel associated with the transaction
* @index: Index of the TRE having a transaction
*
- * @Return: The GSI transaction pointer associated with the TRE index
+ * Return: The GSI transaction pointer associated with the TRE index
*/
struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
u32 index);
@@ -53,7 +53,7 @@ struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
* gsi_channel_trans_complete() - Return a channel's next completed transaction
* @channel: Channel whose next transaction is to be returned
*
- * @Return: The next completed transaction, or NULL if nothing new
+ * Return: The next completed transaction, or NULL if nothing new
*/
struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel);
@@ -76,7 +76,7 @@ void gsi_channel_trans_cancel_pending(struct gsi_channel *channel);
* @gsi: GSI pointer
* @channel_id: Channel number
*
- * @Return: 0 if successful, or -ENOMEM on allocation failure
+ * Return: 0 if successful, or -ENOMEM on allocation failure
*
* Creates and sets up information for managing transactions on a channel
*/
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 1477fc15b30a..4d4606b5fa95 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -75,7 +75,7 @@ struct gsi_trans {
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
u32 max_alloc);
@@ -85,7 +85,7 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
* @pool: Pool pointer
* @count: Number of elements to allocate from the pool
*
- * @Return: Virtual address of element(s) allocated from the pool
+ * Return: Virtual address of element(s) allocated from the pool
*/
void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count);
@@ -103,7 +103,7 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool);
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*
* Structures in this pool reside in DMA-coherent memory.
*/
@@ -115,7 +115,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
* @pool: DMA pool pointer
* @addr: DMA address "handle" associated with the allocation
*
- * @Return: Virtual address of element allocated from the pool
+ * Return: Virtual address of element allocated from the pool
*
* Only one element at a time may be allocated from a DMA pool.
*/
@@ -134,7 +134,7 @@ void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
* @tre_count: Number of elements in the transaction
* @direction: DMA direction for entire SGL (or DMA_NONE)
*
- * @Return: A GSI transaction structure, or a null pointer if all
+ * Return: A GSI transaction structure, or a null pointer if all
* available transactions are in use
*/
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
@@ -175,7 +175,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
* @trans: Transaction
* @skb: Socket buffer for transfer (outbound)
*
- * @Return: 0, or -EMSGSIZE if socket data won't fit in transaction.
+ * Return: 0, or -EMSGSIZE if socket data won't fit in transaction.
*/
int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb);
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index c5204fd58ac4..398f2e47043d 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -44,7 +44,7 @@
/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
- * @mutex; Protects clock enable/disable
+ * @mutex: Protects clock enable/disable
* @core: IPA core clock
* @memory_path: Memory interconnect
* @imem_path: Internal memory interconnect
@@ -256,6 +256,12 @@ void ipa_clock_put(struct ipa *ipa)
mutex_unlock(&clock->mutex);
}
+/* Return the current IPA core clock rate */
+u32 ipa_clock_rate(struct ipa *ipa)
+{
+ return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
+}
+
/* Initialize IPA clocking */
struct ipa_clock *ipa_clock_init(struct device *dev)
{
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
index bc52b35e6bb2..1d70f1de3875 100644
--- a/drivers/net/ipa/ipa_clock.h
+++ b/drivers/net/ipa/ipa_clock.h
@@ -11,10 +11,18 @@ struct device;
struct ipa;
/**
+ * ipa_clock_rate() - Return the current IPA core clock rate
+ * @ipa: IPA structure
+ *
+ * Return: The current clock rate (in Hz), or 0.
+ */
+u32 ipa_clock_rate(struct ipa *ipa);
+
+/**
* ipa_clock_init() - Initialize IPA clocking
* @dev: IPA device
*
- * @Return: A pointer to an ipa_clock structure, or a pointer-coded error
+ * Return: A pointer to an ipa_clock structure, or a pointer-coded error
*/
struct ipa_clock *ipa_clock_init(struct device *dev);
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 1a646e0264a0..f7e6f87facf7 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -61,7 +61,7 @@ struct ipa_cmd_info {
* @ipv6: - Whether the table is for IPv6 or IPv4
* @hashed: - Whether the table is hashed or non-hashed
*
- * @Return: true if region is valid, false otherwise
+ * Return: true if region is valid, false otherwise
*/
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route, bool ipv6, bool hashed);
@@ -70,7 +70,7 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
* @ipa: - IPA pointer
*
- * @Return: true if assumptions required for command are valid
+ * Return: true if assumptions required for command are valid
*/
bool ipa_cmd_data_valid(struct ipa *ipa);
@@ -95,7 +95,7 @@ static inline bool ipa_cmd_data_valid(struct ipa *ipa)
* @channel: AP->IPA command TX GSI channel pointer
* @tre_count: Number of pool elements to allocate
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count);
@@ -166,7 +166,7 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans);
/**
* ipa_cmd_tag_process_add_count() - Number of commands in a tag process
*
- * @Return: The number of elements to allocate in a transaction
+ * Return: The number of elements to allocate in a transaction
* to hold tag process commands
*/
u32 ipa_cmd_tag_process_count(void);
@@ -184,7 +184,7 @@ void ipa_cmd_tag_process(struct ipa *ipa);
* @ipa: IPA pointer
* @tre_count: Number of elements in the transaction
*
- * @Return: A GSI transaction structure, or a null pointer if all
+ * Return: A GSI transaction structure, or a null pointer if all
* available transactions are in use
*/
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9e58e495d373..b7efd7c95e9c 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -21,6 +21,7 @@
#include "ipa_modem.h"
#include "ipa_table.h"
#include "ipa_gsi.h"
+#include "ipa_clock.h"
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
@@ -36,7 +37,7 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
+#define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -318,41 +319,102 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
/* assert(endpoint->toward_ipa); */
- (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ /* Delay mode doesn't work properly for IPA v4.2 */
+ if (endpoint->ipa->version != IPA_VERSION_4_2)
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
}
-/* Returns previous suspend state (true means it was enabled) */
+static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ offset = ipa_reg_state_aggr_active_offset(ipa->version);
+ val = ioread32(ipa->reg_virt + offset);
+
+ return !!(val & mask);
+}
+
+static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(mask & ipa->available); */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+}
+
+/**
+ * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
+ * @endpoint: Endpoint on which to emulate a suspend
+ *
+ * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
+ * with an open aggregation frame. This is to work around a hardware
+ * issue in IPA version 3.5.1 where the suspend interrupt will not be
+ * generated when it should be.
+ */
+static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+
+ if (!endpoint->data->aggregation)
+ return;
+
+ /* Nothing to do if the endpoint doesn't have aggregation open */
+ if (!ipa_endpoint_aggr_active(endpoint))
+ return;
+
+ /* Force close aggregation */
+ ipa_endpoint_force_close(endpoint);
+
+ ipa_interrupt_simulate_suspend(ipa->interrupt);
+}
+
+/* Returns previous suspend state (true means suspend was enabled) */
static bool
ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
{
+ bool suspended;
+
+ if (endpoint->ipa->version != IPA_VERSION_3_5_1)
+ return enable; /* For IPA v4.0+, no change made */
+
/* assert(!endpoint->toward_ipa); */
- return ipa_endpoint_init_ctrl(endpoint, enable);
+ suspended = ipa_endpoint_init_ctrl(endpoint, enable);
+
+ /* A client suspended with an open aggregation frame will not
+ * generate a SUSPEND IPA interrupt. If enabling suspend, have
+ * ipa_endpoint_suspend_aggr() handle this.
+ */
+ if (enable && !suspended)
+ ipa_endpoint_suspend_aggr(endpoint);
+
+ return suspended;
}
/* Enable or disable delay or suspend mode on all modem endpoints */
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
- bool support_suspend;
u32 endpoint_id;
/* DELAY mode doesn't work correctly on IPA v4.2 */
if (ipa->version == IPA_VERSION_4_2)
return;
- /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
- support_suspend = ipa->version == IPA_VERSION_3_5_1;
-
for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
- /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
+ /* Set TX delay mode or RX suspend mode */
if (endpoint->toward_ipa)
ipa_endpoint_program_delay(endpoint, enable);
- else if (support_suspend)
+ else
(void)ipa_endpoint_program_suspend(endpoint, enable);
}
}
@@ -437,6 +499,9 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
}
/**
+ * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
+ * @endpoint: Endpoint pointer
+ *
* We program QMAP endpoints so each packet received is preceded by a QMAP
* header structure. The QMAP header contains a 1-byte mux_id and 2-byte
* packet size field, and we have the IPA hardware populate both for each
@@ -527,10 +592,13 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
u32 val = 0;
u32 offset;
+ if (endpoint->toward_ipa)
+ return; /* Register not valid for TX endpoints */
+
offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
- if (!endpoint->toward_ipa && endpoint->data->qmap)
+ if (endpoint->data->qmap)
val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -541,7 +609,10 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
u32 val;
- if (endpoint->toward_ipa && endpoint->data->dma_mode) {
+ if (!endpoint->toward_ipa)
+ return; /* Register not valid for RX endpoints */
+
+ if (endpoint->data->dma_mode) {
enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
u32 dma_endpoint_id;
@@ -552,7 +623,7 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
} else {
val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
}
- /* Other bitfields unspecified (and 0) */
+ /* All other bits unspecified (and 0) */
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -576,17 +647,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
- u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
u32 limit;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- val |= u32_encode_bits(aggr_size,
- AGGR_BYTE_LIMIT_FMASK);
+
+ limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
+
limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
- val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
- AGGR_TIME_LIMIT_FMASK);
- val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
+ limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+ val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
+
+ /* AGGR_PKT_LIMIT is 0 (unlimited) */
+
if (endpoint->data->rx.aggr_close_eof)
val |= AGGR_SW_EOF_ACTIVE_FMASK;
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
@@ -605,63 +679,70 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
-/* A return value of 0 indicates an error */
+/* The head-of-line blocking timer is defined as a tick count, where each
+ * tick represents 128 cycles of the IPA core clock. Return the value
+ * that should be written to that register that represents the timeout
+ * period provided.
+ */
static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
{
+ u32 width;
u32 scale;
- u32 base;
+ u64 ticks;
+ u64 rate;
+ u32 high;
u32 val;
if (!microseconds)
- return 0; /* invalid delay */
-
- /* Timer is represented in units of clock ticks. */
- if (ipa->version < IPA_VERSION_4_2)
- return microseconds; /* XXX Needs to be computed */
-
- /* IPA v4.2 represents the tick count as base * scale */
- scale = 1; /* XXX Needs to be computed */
- if (scale > field_max(SCALE_FMASK))
- return 0; /* scale too big */
-
- base = DIV_ROUND_CLOSEST(microseconds, scale);
- if (base > field_max(BASE_VALUE_FMASK))
- return 0; /* microseconds too big */
+ return 0; /* Nothing to compute if timer period is 0 */
+
+ /* Use 64 bit arithmetic to avoid overflow... */
+ rate = ipa_clock_rate(ipa);
+ ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
+ /* ...but we still need to fit into a 32-bit register */
+ WARN_ON(ticks > U32_MAX);
+
+ /* IPA v3.5.1 just records the tick count */
+ if (ipa->version == IPA_VERSION_3_5_1)
+ return (u32)ticks;
+
+ /* For IPA v4.2, the tick count is represented by base and
+ * scale fields within the 32-bit timer register, where:
+ * ticks = base << scale;
+ * The best precision is achieved when the base value is as
+ * large as possible. Find the highest set bit in the tick
+ * count, and extract the number of bits in the base field
+ * such that that high bit is included.
+ */
+ high = fls(ticks); /* 1..32 */
+ width = HWEIGHT32(BASE_VALUE_FMASK);
+ scale = high > width ? high - width : 0;
+ if (scale) {
+ /* If we're scaling, round up to get a closer result */
+ ticks += 1 << (scale - 1);
+ /* High bit was set, so rounding might have affected it */
+ if (fls(ticks) != high)
+ scale++;
+ }
val = u32_encode_bits(scale, SCALE_FMASK);
- val |= u32_encode_bits(base, BASE_VALUE_FMASK);
+ val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
return val;
}
-static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
- u32 microseconds)
+/* If microseconds is 0, timeout is immediate */
+static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
+ u32 microseconds)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
u32 offset;
u32 val;
- /* XXX We'll fix this when the register definition is clear */
- if (microseconds) {
- struct device *dev = &ipa->pdev->dev;
-
- dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
- endpoint_id);
- microseconds = 0;
- }
-
- if (microseconds) {
- val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
- if (!val)
- return -EINVAL;
- } else {
- val = 0; /* timeout is immediate */
- }
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
+ val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
iowrite32(val, ipa->reg_virt + offset);
-
- return 0;
}
static void
@@ -671,7 +752,7 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
u32 offset;
u32 val;
- val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
+ val = enable ? HOL_BLOCK_EN_FMASK : 0;
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -683,10 +764,10 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[i];
- if (endpoint->ee_id != GSI_EE_MODEM)
+ if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
- (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
+ ipa_endpoint_init_hol_block_timer(endpoint, 0);
ipa_endpoint_init_hol_block_enable(endpoint, true);
}
}
@@ -696,6 +777,9 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
u32 val = 0;
+ if (!endpoint->toward_ipa)
+ return; /* Register not valid for RX endpoints */
+
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
@@ -710,6 +794,9 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
u32 seq_type = endpoint->seq_type;
u32 val = 0;
+ if (!endpoint->toward_ipa)
+ return; /* Register not valid for RX endpoints */
+
/* Sequencer type is made up of four nibbles */
val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
@@ -837,6 +924,8 @@ err_free_pages:
/**
* ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ * @endpoint: Endpoint to be replenished
+ * @count: Number of buffers to send to hardware
*
* Allocate RX packet wrapper structures with maximal socket buffers
* for an endpoint. These are supplied to the hardware, which fills
@@ -1139,29 +1228,6 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
ipa_endpoint_default_route_set(ipa, 0);
}
-static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
-{
- u32 mask = BIT(endpoint->endpoint_id);
- struct ipa *ipa = endpoint->ipa;
- u32 offset;
- u32 val;
-
- /* assert(mask & ipa->available); */
- offset = ipa_reg_state_aggr_active_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
-
- return !!(val & mask);
-}
-
-static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
-{
- u32 mask = BIT(endpoint->endpoint_id);
- struct ipa *ipa = endpoint->ipa;
-
- /* assert(mask & ipa->available); */
- iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
-}
-
/**
* ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
* @endpoint: Endpoint to be reset
@@ -1170,7 +1236,7 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
* on its underlying GSI channel, a special sequence of actions must be
* taken to ensure the IPA pipeline is properly cleared.
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
@@ -1206,8 +1272,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
gsi_channel_reset(gsi, endpoint->channel_id, false);
/* Make sure the channel isn't suspended */
- if (endpoint->ipa->version == IPA_VERSION_3_5_1)
- suspended = ipa_endpoint_program_suspend(endpoint, false);
+ suspended = ipa_endpoint_program_suspend(endpoint, false);
/* Start channel and do a 1 byte read */
ret = gsi_channel_start(gsi, endpoint->channel_id);
@@ -1290,23 +1355,18 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
- if (endpoint->toward_ipa) {
- if (endpoint->ipa->version != IPA_VERSION_4_2)
- ipa_endpoint_program_delay(endpoint, false);
- ipa_endpoint_init_hdr_ext(endpoint);
- ipa_endpoint_init_aggr(endpoint);
- ipa_endpoint_init_deaggr(endpoint);
- ipa_endpoint_init_seq(endpoint);
- } else {
- if (endpoint->ipa->version == IPA_VERSION_3_5_1)
- (void)ipa_endpoint_program_suspend(endpoint, false);
- ipa_endpoint_init_hdr_ext(endpoint);
- ipa_endpoint_init_aggr(endpoint);
- }
+ if (endpoint->toward_ipa)
+ ipa_endpoint_program_delay(endpoint, false);
+ else
+ (void)ipa_endpoint_program_suspend(endpoint, false);
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_hdr(endpoint);
+ ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_seq(endpoint);
ipa_endpoint_status(endpoint);
}
@@ -1362,34 +1422,6 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
endpoint->endpoint_id);
}
-/**
- * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
- * @endpoint_id: Endpoint on which to emulate a suspend
- *
- * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
- * with an open aggregation frame. This is to work around a hardware
- * issue in IPA version 3.5.1 where the suspend interrupt will not be
- * generated when it should be.
- */
-static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
-{
- struct ipa *ipa = endpoint->ipa;
-
- /* assert(ipa->version == IPA_VERSION_3_5_1); */
-
- if (!endpoint->data->aggregation)
- return;
-
- /* Nothing to do if the endpoint doesn't have aggregation open */
- if (!ipa_endpoint_aggr_active(endpoint))
- return;
-
- /* Force close aggregation */
- ipa_endpoint_force_close(endpoint);
-
- ipa_interrupt_simulate_suspend(ipa->interrupt);
-}
-
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
@@ -1403,19 +1435,11 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
ipa_endpoint_replenish_disable(endpoint);
- /* IPA v3.5.1 doesn't use channel stop for suspend */
- stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
- if (!endpoint->toward_ipa && !stop_channel) {
- /* Due to a hardware bug, a client suspended with an open
- * aggregation frame will not generate a SUSPEND IPA
- * interrupt. We work around this by force-closing the
- * aggregation frame, then simulating the arrival of such
- * an interrupt.
- */
+ if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, true);
- ipa_endpoint_suspend_aggr(endpoint);
- }
+ /* IPA v3.5.1 doesn't use channel stop for suspend */
+ stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
if (ret)
dev_err(dev, "error %d suspending channel %u\n", ret,
@@ -1432,11 +1456,11 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
return;
- /* IPA v3.5.1 doesn't use channel start for resume */
- start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
- if (!endpoint->toward_ipa && !start_channel)
+ if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, false);
+ /* IPA v3.5.1 doesn't use channel start for resume */
+ start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
if (ret)
dev_err(dev, "error %d resuming channel %u\n", ret,
diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h
index 0a40f3dc55fc..c02cb6f3a2e1 100644
--- a/drivers/net/ipa/ipa_gsi.h
+++ b/drivers/net/ipa/ipa_gsi.h
@@ -43,9 +43,9 @@ void ipa_gsi_trans_release(struct gsi_trans *trans);
*/
void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
u32 byte_count);
+
/**
- * ipa_gsi_trans_complete() - GSI transaction completion callback
-ipa_gsi_channel_tx_completed()
+ * ipa_gsi_channel_tx_completed() - GSI transaction completion callback
* @gsi: GSI pointer
* @channel_id: Channel number
* @count: Number of transactions completed since last report
@@ -57,6 +57,15 @@ ipa_gsi_channel_tx_completed()
void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
u32 byte_count);
+/* ipa_gsi_endpoint_data_empty() - Empty endpoint config data test
+ * @data: endpoint configuration data
+ *
+ * Determines whether an endpoint configuration data entry is empty,
+ * meaning it contains no valid configuration information and should
+ * be ignored.
+ *
+ * Return: true if empty; false otherwise
+ */
bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data);
#endif /* _IPA_GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index d4f4c1c9f0b1..727e9c5044d1 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -104,7 +104,7 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
* ipa_interrupt_setup() - Set up the IPA interrupt framework
* @ipa: IPA pointer
*
- * @Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ * Return: Pointer to IPA SMP2P info, or a pointer-coded error
*/
struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 76d5108b8403..1fdfec41e442 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -277,6 +277,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
/**
* ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
+ * @ipa: IPA pointer
*
* Configures when the IPA signals it is idle to the global clock
* controller, which can respond by scalling down the clock to
@@ -495,6 +496,7 @@ static void ipa_resource_deconfig(struct ipa *ipa)
/**
* ipa_config() - Configure IPA hardware
* @ipa: IPA pointer
+ * @data: IPA configuration data
*
* Perform initialization requiring IPA clock to be enabled.
*/
@@ -674,6 +676,11 @@ static void ipa_validate_build(void)
/* This is used as a divisor */
BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+
+ /* Aggregation granularity value can't be 0, and must fit */
+ BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
+ BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
+ field_max(AGGR_GRANULARITY));
#endif /* IPA_VALIDATE */
}
@@ -681,7 +688,7 @@ static void ipa_validate_build(void)
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
*
- * @Return: 0 if successful, or a negative error code (possibly
+ * Return: 0 if successful, or a negative error code (possibly
* EPROBE_DEFER)
*
* This is the main entry point for the IPA driver. Initialization proceeds
@@ -897,7 +904,7 @@ static int ipa_remove(struct platform_device *pdev)
* ipa_suspend() - Power management system suspend callback
* @dev: IPA device structure
*
- * @Return: Zero
+ * Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
*/
@@ -915,7 +922,7 @@ static int ipa_suspend(struct device *dev)
* ipa_resume() - Power management system resume callback
* @dev: IPA device structure
*
- * @Return: Always returns 0
+ * Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
*/
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 3ef814119aab..2d45c444a67f 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -41,6 +41,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
/**
* ipa_mem_setup() - Set up IPA AP and modem shared memory areas
+ * @ipa: IPA pointer
*
* Set up the shared memory regions in IPA local memory. This involves
* zero-filling memory regions, and in the case of header memory, telling
@@ -52,7 +53,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
* The AP informs the modem where its portions of memory are located
* in a QMI exchange that occurs at modem startup.
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
int ipa_mem_setup(struct ipa *ipa)
{
@@ -137,8 +138,9 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
/**
* ipa_mem_config() - Configure IPA shared memory
+ * @ipa: IPA pointer
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*/
int ipa_mem_config(struct ipa *ipa)
{
@@ -238,6 +240,7 @@ void ipa_mem_deconfig(struct ipa *ipa)
/**
* ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
+ * @ipa: IPA pointer
*
* Zero regions of IPA-local memory used by the modem. These are configured
* (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 0a688d8c1d7c..eb4e39fa7d4b 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -32,10 +32,12 @@ struct ipa;
* parameter is supplied to the offset macro. The "ee" value is a member of
* the gsi_ee enumerated type.
*
- * The offset of a register dependent on endpoint id is computed by a macro
- * that is supplied a parameter "ep". The "ep" value is assumed to be less
- * than the maximum endpoint value for the current hardware, and that will
- * not exceed IPA_ENDPOINT_MAX.
+ * The offset of a register dependent on endpoint ID is computed by a macro
+ * that is supplied a parameter "ep", "txep", or "rxep". A register with an
+ * "ep" parameter is valid for any endpoint; a register with a "txep" or
+ * "rxep" parameter is valid only for TX or RX endpoints, respectively. The
+ * "*ep" value is assumed to be less than the maximum valid endpoint ID
+ * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
*
* The offset of registers related to filter and route tables is computed
* by a macro that is supplied a parameter "er". The "er" represents an
@@ -190,24 +192,23 @@ static inline u32 ipa_reg_bcr_val(enum ipa_version version)
return 0x00000000;
}
-
#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+/* The internal inactivity timer clock is used for the aggregation timer */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+
#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
#define AGGR_GRANULARITY GENMASK(8, 4)
-/* Compute the value to use in the AGGR_GRANULARITY field representing
- * the given number of microseconds (up to 1 millisecond).
- * x = (32 * usec) / 1000 - 1
+/* Compute the value to use in the AGGR_GRANULARITY field representing the
+ * given number of microseconds. The value is one less than the number of
+ * timer ticks in the requested period. Zero not a valid granularity value.
*/
-static inline u32 ipa_aggr_granularity_val(u32 microseconds)
+static inline u32 ipa_aggr_granularity_val(u32 usec)
{
- /* assert(microseconds >= 16); (?) */
- /* assert(microseconds <= 1015); */
-
- return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1;
+ return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
#define IPA_REG_TX_CFG_OFFSET 0x000001fc
@@ -293,11 +294,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \
- (0x00000818 + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
+ (0x00000818 + 0x0070 * (rxep))
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \
- (0x00000820 + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
+ (0x00000820 + 0x0070 * (txep))
#define MODE_FMASK GENMASK(2, 0)
#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
@@ -316,19 +319,21 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \
- (0x0000082c + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
+ (0x0000082c + 0x0070 * (rxep))
#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
-/* The next register is valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \
- (0x00000830 + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
+ (0x00000830 + 0x0070 * (rxep))
/* The next fields are present for IPA v4.2 only */
#define BASE_VALUE_FMASK GENMASK(4, 0)
#define SCALE_FMASK GENMASK(12, 8)
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \
- (0x00000834 + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
+ (0x00000834 + 0x0070 * (txep))
#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
@@ -338,8 +343,9 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
(0x00000838 + 0x0070 * (ep))
#define RSRC_GRP_FMASK GENMASK(1, 0)
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \
- (0x0000083c + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
+ (0x0000083c + 0x0070 * (txep))
#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0)
#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4)
#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
@@ -353,7 +359,7 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
/* The next field is present for IPA v4.0 and above */
#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-/* "er" is either an endpoint id (for filters) or a route id (for routes) */
+/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */
#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
(0x0000085c + 0x0070 * (er))
#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 1f65cdc9d406..bf0e4063cfd9 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -15,7 +15,7 @@ struct ipa;
* @ipa: IPA pointer
* @modem_init: Whether the modem is responsible for GSI initialization
*
- * @Return: 0 if successful, or a negative error code
+ * Return: 0 if successful, or a negative error code
*
*/
int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 9df2a3e78c98..2098ca2f2c90 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -505,7 +505,7 @@ void ipa_table_teardown(struct ipa *ipa)
/**
* ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
- * @endpoint_id: Endpoint whose filter hash tuple should be zeroed
+ * @endpoint: Endpoint whose filter hash tuple should be zeroed
*
* Endpoint must be for the AP (not modem) and support filtering. Updates
* the filter hash values without changing route ones.
@@ -560,6 +560,7 @@ static bool ipa_route_id_modem(u32 route_id)
/**
* ipa_route_tuple_zero() - Zero a hashed route table entry tuple
+ * @ipa: IPA pointer
* @route_id: Route table entry whose hash tuple should be zeroed
*
* Updates the route hash values without changing filter ones.
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 64ea0221441a..78038d14fcea 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -25,7 +25,7 @@ struct ipa;
* ipa_table_valid() - Validate route and filter table memory regions
* @ipa: IPA pointer
- * @Return: true if all regions are valid, false otherwise
+ * Return: true if all regions are valid, false otherwise
*/
bool ipa_table_valid(struct ipa *ipa);
@@ -33,7 +33,7 @@ bool ipa_table_valid(struct ipa *ipa);
* ipa_filter_map_valid() - Validate a filter table endpoint bitmap
* @ipa: IPA pointer
*
- * @Return: true if all regions are valid, false otherwise
+ * Return: true if all regions are valid, false otherwise
*/
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index a1f8db00d55a..1a0b04e0ab74 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -35,31 +35,34 @@
*/
/* Supports hardware interface version 0x2000 */
-/* Offset relative to the base of the IPA shared address space of the
- * shared region used for communication with the microcontroller. The
- * region is 128 bytes in size, but only the first 40 bytes are used.
- */
-#define IPA_MEM_UC_OFFSET 0x0000
-
/* Delay to allow a the microcontroller to save state when crashing */
#define IPA_SEND_DELAY 100 /* microseconds */
/**
* struct ipa_uc_mem_area - AP/microcontroller shared memory area
* @command: command code (AP->microcontroller)
+ * @reserved0: reserved bytes; avoid reading or writing
* @command_param: low 32 bits of command parameter (AP->microcontroller)
* @command_param_hi: high 32 bits of command parameter (AP->microcontroller)
*
* @response: response code (microcontroller->AP)
+ * @reserved1: reserved bytes; avoid reading or writing
* @response_param: response parameter (microcontroller->AP)
*
* @event: event code (microcontroller->AP)
+ * @reserved2: reserved bytes; avoid reading or writing
* @event_param: event parameter (microcontroller->AP)
*
* @first_error_address: address of first error-source on SNOC
* @hw_state: state of hardware (including error type information)
* @warning_counter: counter of non-fatal hardware errors
+ * @reserved3: reserved bytes; avoid reading or writing
* @interface_version: hardware-reported interface version
+ * @reserved4: reserved bytes; avoid reading or writing
+ *
+ * A shared memory area at the base of IPA resident memory is used for
+ * communication with the microcontroller. The region is 128 bytes in
+ * size, but only the first 40 bytes (structured this way) are used.
*/
struct ipa_uc_mem_area {
u8 command; /* enum ipa_uc_command */