diff options
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r-- | drivers/net/ipa/gsi.c | 125 | ||||
-rw-r--r-- | drivers/net/ipa/gsi.h | 15 | ||||
-rw-r--r-- | drivers/net/ipa/ipa.h | 10 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_cmd.c | 59 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_cmd.h | 11 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-sc7180.c | 14 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-sdm845.c | 15 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data.h | 29 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.c | 174 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.h | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_main.c | 8 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_mem.c | 210 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_mem.h | 3 |
13 files changed, 394 insertions, 281 deletions
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 8d9ca1c335e8..012304ddaed2 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -238,11 +238,6 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); } -static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask) -{ - iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); -} - static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) { u32 val; @@ -415,13 +410,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) evt_ring->state); } -/* Return the hardware's notion of the current state of a channel */ -static enum gsi_channel_state -gsi_channel_state(struct gsi *gsi, u32 channel_id) +/* Fetch the current state of a channel from hardware */ +static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) { + u32 channel_id = gsi_channel_id(channel); + void *virt = channel->gsi->virt; u32 val; - val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); + val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); return u32_get_bits(val, CHSTATE_FMASK); } @@ -432,16 +428,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) { struct completion *completion = &channel->completion; u32 channel_id = gsi_channel_id(channel); + struct gsi *gsi = channel->gsi; u32 val; val = u32_encode_bits(channel_id, CH_CHID_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); - if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion)) + if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) return 0; /* Success! */ - dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out " - "(state is %u)\n", opcode, channel_id, channel->state); + dev_err(gsi->dev, + "GSI command %u to channel %u timed out (state is %u)\n", + opcode, channel_id, gsi_channel_state(channel)); return -ETIMEDOUT; } @@ -450,18 +448,21 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; + enum gsi_channel_state state; int ret; /* Get initial channel state */ - channel->state = gsi_channel_state(gsi, channel_id); - - if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED) + state = gsi_channel_state(channel); + if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) return -EINVAL; ret = gsi_channel_command(channel, GSI_CH_ALLOCATE); - if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) { + + /* Channel state will normally have been updated */ + state = gsi_channel_state(channel); + if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) { dev_err(gsi->dev, "bad channel state (%u) after alloc\n", - channel->state); + state); ret = -EIO; } @@ -471,18 +472,21 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) /* Start an ALLOCATED channel */ static int gsi_channel_start_command(struct gsi_channel *channel) { - enum gsi_channel_state state = channel->state; + enum gsi_channel_state state; int ret; + state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED && state != GSI_CHANNEL_STATE_STOPPED) return -EINVAL; ret = gsi_channel_command(channel, GSI_CH_START); - if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) { + + /* Channel state will normally have been updated */ + state = gsi_channel_state(channel); + if (!ret && state != GSI_CHANNEL_STATE_STARTED) { dev_err(channel->gsi->dev, - "bad channel state (%u) after start\n", - channel->state); + "bad channel state (%u) after start\n", state); ret = -EIO; } @@ -492,23 +496,27 @@ static int gsi_channel_start_command(struct gsi_channel *channel) /* Stop a GSI channel in STARTED state */ static int gsi_channel_stop_command(struct gsi_channel *channel) { - enum gsi_channel_state state = channel->state; + enum gsi_channel_state state; int ret; + state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_STARTED && state != GSI_CHANNEL_STATE_STOP_IN_PROC) return -EINVAL; ret = gsi_channel_command(channel, GSI_CH_STOP); - if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED) + + /* Channel state will normally have been updated */ + state = gsi_channel_state(channel); + if (ret || state == GSI_CHANNEL_STATE_STOPPED) return ret; /* We may have to try again if stop is in progress */ - if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC) + if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) return -EAGAIN; - dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n", - channel->state); + dev_err(channel->gsi->dev, + "bad channel state (%u) after stop\n", state); return -EIO; } @@ -516,41 +524,49 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) /* Reset a GSI channel in ALLOCATED or ERROR state. */ static void gsi_channel_reset_command(struct gsi_channel *channel) { + enum gsi_channel_state state; int ret; msleep(1); /* A short delay is required before a RESET command */ - if (channel->state != GSI_CHANNEL_STATE_STOPPED && - channel->state != GSI_CHANNEL_STATE_ERROR) { + state = gsi_channel_state(channel); + if (state != GSI_CHANNEL_STATE_STOPPED && + state != GSI_CHANNEL_STATE_ERROR) { dev_err(channel->gsi->dev, - "bad channel state (%u) before reset\n", - channel->state); + "bad channel state (%u) before reset\n", state); return; } ret = gsi_channel_command(channel, GSI_CH_RESET); - if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) + + /* Channel state will normally have been updated */ + state = gsi_channel_state(channel); + if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) dev_err(channel->gsi->dev, - "bad channel state (%u) after reset\n", - channel->state); + "bad channel state (%u) after reset\n", state); } /* Deallocate an ALLOCATED GSI channel */ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; + enum gsi_channel_state state; int ret; - if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad channel state (%u) before dealloc\n", - channel->state); + state = gsi_channel_state(channel); + if (state != GSI_CHANNEL_STATE_ALLOCATED) { + dev_err(gsi->dev, + "bad channel state (%u) before dealloc\n", state); return; } ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC); - if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED) - dev_err(gsi->dev, "bad channel state (%u) after dealloc\n", - channel->state); + + /* Channel state will normally have been updated */ + state = gsi_channel_state(channel); + if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED) + dev_err(gsi->dev, + "bad channel state (%u) after dealloc\n", state); } /* Ring an event ring doorbell, reporting the last entry processed by the AP. @@ -756,7 +772,6 @@ static void gsi_channel_deprogram(struct gsi_channel *channel) int gsi_channel_start(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - u32 evt_ring_id = channel->evt_ring_id; int ret; mutex_lock(&gsi->mutex); @@ -765,9 +780,6 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) mutex_unlock(&gsi->mutex); - /* Clear the channel's event ring interrupt in case it's pending */ - gsi_isr_ieob_clear(gsi, BIT(evt_ring_id)); - gsi_channel_thaw(channel); return ret; @@ -777,6 +789,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) int gsi_channel_stop(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; + enum gsi_channel_state state; u32 retries; int ret; @@ -786,7 +799,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) * STOP command timed out. We won't stop a channel if stopping it * was successful previously (so we still want the freeze above). */ - if (channel->state == GSI_CHANNEL_STATE_STOPPED) + state = gsi_channel_state(channel); + if (state == GSI_CHANNEL_STATE_STOPPED) return 0; /* RX channels might require a little time to enter STOPPED state */ @@ -811,18 +825,18 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) } /* Reset and reconfigure a channel (possibly leaving doorbell disabled) */ -void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable) +void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) { struct gsi_channel *channel = &gsi->channel[channel_id]; mutex_lock(&gsi->mutex); - /* Due to a hardware quirk we need to reset RX channels twice. */ gsi_channel_reset_command(channel); - if (!channel->toward_ipa) + /* Due to a hardware quirk we may need to reset RX channels twice. */ + if (legacy && !channel->toward_ipa) gsi_channel_reset_command(channel); - gsi_channel_program(channel, db_enable); + gsi_channel_program(channel, legacy); gsi_channel_trans_cancel_pending(channel); mutex_unlock(&gsi->mutex); @@ -940,7 +954,6 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi) channel_mask ^= BIT(channel_id); channel = &gsi->channel[channel_id]; - channel->state = gsi_channel_state(gsi, channel_id); complete(&channel->completion); } @@ -1071,7 +1084,7 @@ static void gsi_isr_ieob(struct gsi *gsi) u32 event_mask; event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); - gsi_isr_ieob_clear(gsi, event_mask); + iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); while (event_mask) { u32 evt_ring_id = __ffs(event_mask); @@ -1435,7 +1448,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi) /* Setup function for a single channel */ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, - bool db_enable) + bool legacy) { struct gsi_channel *channel = &gsi->channel[channel_id]; u32 evt_ring_id = channel->evt_ring_id; @@ -1454,7 +1467,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, if (ret) goto err_evt_ring_de_alloc; - gsi_channel_program(channel, db_enable); + gsi_channel_program(channel, legacy); if (channel->toward_ipa) netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, @@ -1531,7 +1544,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) } /* Setup function for channels */ -static int gsi_channel_setup(struct gsi *gsi, bool db_enable) +static int gsi_channel_setup(struct gsi *gsi, bool legacy) { u32 channel_id = 0; u32 mask; @@ -1543,7 +1556,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool db_enable) mutex_lock(&gsi->mutex); do { - ret = gsi_channel_setup_one(gsi, channel_id, db_enable); + ret = gsi_channel_setup_one(gsi, channel_id, legacy); if (ret) goto err_unwind; } while (++channel_id < gsi->channel_count); @@ -1629,7 +1642,7 @@ static void gsi_channel_teardown(struct gsi *gsi) } /* Setup function for GSI. GSI firmware must be loaded and initialized */ -int gsi_setup(struct gsi *gsi, bool db_enable) +int gsi_setup(struct gsi *gsi, bool legacy) { u32 val; @@ -1672,7 +1685,7 @@ int gsi_setup(struct gsi *gsi, bool db_enable) /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); - return gsi_channel_setup(gsi, db_enable); + return gsi_channel_setup(gsi, legacy); } /* Inverse of gsi_setup() */ diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 0698ff1ae7a6..90a02194e7ad 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -113,8 +113,7 @@ struct gsi_channel { u16 tre_count; u16 event_count; - struct completion completion; /* signals channel state changes */ - enum gsi_channel_state state; + struct completion completion; /* signals channel command completion */ struct gsi_ring tre_ring; u32 evt_ring_id; @@ -166,14 +165,14 @@ struct gsi { /** * gsi_setup() - Set up the GSI subsystem * @gsi: Address of GSI structure embedded in an IPA structure - * @db_enable: Whether to use the GSI doorbell engine + * @legacy: Set up for legacy hardware * * @Return: 0 if successful, or a negative error code * * Performs initialization that must wait until the GSI hardware is * ready (including firmware loaded). */ -int gsi_setup(struct gsi *gsi, bool db_enable); +int gsi_setup(struct gsi *gsi, bool legacy); /** * gsi_teardown() - Tear down GSI subsystem @@ -221,15 +220,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id); * gsi_channel_reset() - Reset an allocated GSI channel * @gsi: GSI pointer * @channel_id: Channel to be reset - * @db_enable: Whether doorbell engine should be enabled + * @legacy: Legacy behavior * - * Reset a channel and reconfigure it. The @db_enable flag indicates - * whether the doorbell engine will be enabled following reconfiguration. + * Reset a channel and reconfigure it. The @legacy flag indicates + * that some steps should be done differently for legacy hardware. * * GSI hardware relinquishes ownership of all pending receive buffer * transactions and they will complete with their cancelled flag set. */ -void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable); +void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy); int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop); int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 23fb29889e5a..b10a85392952 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -47,6 +47,10 @@ struct ipa_interrupt; * @mem_offset: Offset from @mem_virt used for access to IPA memory * @mem_size: Total size (bytes) of memory at @mem_virt * @mem: Array of IPA-local memory region descriptors + * @imem_iova: I/O virtual address of IPA region in IMEM + * @imem_size; Size of IMEM region + * @smem_iova: I/O virtual address of IPA region in SMEM + * @smem_size; Size of SMEM region * @zero_addr: DMA address of preallocated zero-filled memory * @zero_virt: Virtual address of preallocated zero-filled memory * @zero_size: Size (bytes) of preallocated zero-filled memory @@ -88,6 +92,12 @@ struct ipa { u32 mem_size; const struct ipa_mem *mem; + unsigned long imem_iova; + size_t imem_size; + + unsigned long smem_iova; + size_t smem_size; + dma_addr_t zero_addr; void *zero_virt; size_t zero_size; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index cee417181f98..c9ab865e7290 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -103,28 +103,6 @@ struct ipa_cmd_ip_packet_init { /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */ #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0) -/* IPA_CMD_DMA_TASK_32B_ADDR */ - -/* This opcode gets modified with a DMA operation count */ - -#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8) - -struct ipa_cmd_hw_dma_task_32b_addr { - __le16 flags; - __le16 size; - __le32 addr; - __le16 packet_size; - u8 reserved[6]; -}; - -/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */ -#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0) -#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11) -#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12) -#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13) -#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14) -#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15) - /* IPA_CMD_DMA_SHARED_MEM */ /* For IPA v4.0+, this opcode gets modified with pipeline clear options */ @@ -163,7 +141,6 @@ union ipa_cmd_payload { struct ipa_cmd_hw_hdr_init_local hdr_init_local; struct ipa_cmd_register_write register_write; struct ipa_cmd_ip_packet_init ip_packet_init; - struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr; struct ipa_cmd_hw_dma_mem_mem dma_shared_mem; struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status; }; @@ -508,42 +485,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) direction, opcode); } -/* Use a 32-bit DMA command to zero a block of memory */ -void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size, - dma_addr_t addr, bool toward_ipa) -{ - struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); - enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR; - struct ipa_cmd_hw_dma_task_32b_addr *payload; - union ipa_cmd_payload *cmd_payload; - enum dma_data_direction direction; - dma_addr_t payload_addr; - u16 flags; - - /* assert(addr <= U32_MAX); */ - addr &= GENMASK_ULL(31, 0); - - /* The opcode encodes the number of DMA operations in the high byte */ - opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK); - - direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - - /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */ - flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK; - /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */ - - cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); - payload = &cmd_payload->dma_task_32b_addr; - - payload->flags = cpu_to_le16(flags); - payload->size = cpu_to_le16(size); - payload->addr = cpu_to_le32((u32)addr); - payload->packet_size = cpu_to_le16(size); - - gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); -} - /* Use a DMA command to read or write a block of IPA-resident memory */ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, dma_addr_t addr, bool toward_ipa) diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 4917525b3a47..e440aa69c8b5 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -35,7 +35,6 @@ enum ipa_cmd_opcode { IPA_CMD_HDR_INIT_LOCAL = 9, IPA_CMD_REGISTER_WRITE = 12, IPA_CMD_IP_PACKET_INIT = 16, - IPA_CMD_DMA_TASK_32B_ADDR = 17, IPA_CMD_DMA_SHARED_MEM = 19, IPA_CMD_IP_PACKET_TAG_STATUS = 20, }; @@ -148,16 +147,6 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, u32 mask, bool clear_full); /** - * ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction - * @trans: GSi transaction - * @size: Number of bytes to be memory to be transferred - * @addr: DMA address of buffer to be read into or written from - * @toward_ipa: true means write to IPA memory; false means read - */ -void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size, - dma_addr_t addr, bool toward_ipa); - -/** * ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction * @trans: GSI transaction * @offset: Offset of IPA memory to be read or written diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c index 042b5fc3c135..43faa35ae726 100644 --- a/drivers/net/ipa/ipa_data-sc7180.c +++ b/drivers/net/ipa/ipa_data-sc7180.c @@ -193,7 +193,7 @@ static const struct ipa_resource_data ipa_resource_data = { }; /* IPA-resident memory region configuration for the SC7180 SoC. */ -static const struct ipa_mem ipa_mem_data[] = { +static const struct ipa_mem ipa_mem_local_data[] = { [IPA_MEM_UC_SHARED] = { .offset = 0x0000, .size = 0x0080, @@ -296,12 +296,20 @@ static const struct ipa_mem ipa_mem_data[] = { }, }; +static struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146a8000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00002000, +}; + /* Configuration data for the SC7180 SoC. */ const struct ipa_data ipa_data_sc7180 = { .version = IPA_VERSION_4_2, .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, - .mem_count = ARRAY_SIZE(ipa_mem_data), - .mem_data = ipa_mem_data, + .mem_data = &ipa_mem_data, }; diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index 0d9c36e1e806..52d4b84e0dac 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -74,7 +74,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .tx = { .status_endpoint = IPA_ENDPOINT_MODEM_AP_RX, - .delay = true, }, }, }, @@ -235,7 +234,7 @@ static const struct ipa_resource_data ipa_resource_data = { }; /* IPA-resident memory region configuration for the SDM845 SoC. */ -static const struct ipa_mem ipa_mem_data[] = { +static const struct ipa_mem ipa_mem_local_data[] = { [IPA_MEM_UC_SHARED] = { .offset = 0x0000, .size = 0x0080, @@ -318,12 +317,20 @@ static const struct ipa_mem ipa_mem_data[] = { }, }; +static struct ipa_mem_data ipa_mem_data = { + .local_count = ARRAY_SIZE(ipa_mem_local_data), + .local = ipa_mem_local_data, + .imem_addr = 0x146bd000, + .imem_size = 0x00002000, + .smem_id = 497, + .smem_size = 0x00002000, +}; + /* Configuration data for the SDM845 SoC. */ const struct ipa_data ipa_data_sdm845 = { .version = IPA_VERSION_3_5_1, .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data), .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, - .mem_count = ARRAY_SIZE(ipa_mem_data), - .mem_data = ipa_mem_data, + .mem_data = &ipa_mem_data, }; diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 7110de2de817..7fc1058a5ca9 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -80,18 +80,12 @@ struct gsi_channel_data { /** * struct ipa_endpoint_tx_data - configuration data for TX endpoints * @status_endpoint: endpoint to which status elements are sent - * @delay: whether endpoint starts in delay mode - * - * Delay mode prevents a TX endpoint from transmitting anything, even if - * commands have been presented to the hardware. Once the endpoint exits - * delay mode, queued transfer commands are sent. * * The @status_endpoint is only valid if the endpoint's @status_enable * flag is set. */ struct ipa_endpoint_tx_data { enum ipa_endpoint_name status_endpoint; - bool delay; }; /** @@ -245,15 +239,21 @@ struct ipa_resource_data { }; /** - * struct ipa_mem - IPA-local memory region description - * @offset: offset in IPA memory space to base of the region - * @size: size in bytes base of the region - * @canary_count: number of 32-bit "canary" values that precede region + * struct ipa_mem - description of IPA memory regions + * @local_count: number of regions defined in the local[] array + * @local: array of IPA-local memory region descriptors + * @imem_addr: physical address of IPA region within IMEM + * @imem_size: size in bytes of IPA IMEM region + * @smem_id: item identifier for IPA region within SMEM memory + * @imem_size: size in bytes of the IPA SMEM region */ struct ipa_mem_data { - u32 offset; - u16 size; - u16 canary_count; + u32 local_count; + const struct ipa_mem *local; + u32 imem_addr; + u32 imem_size; + u32 smem_id; + u32 smem_size; }; /** @@ -270,8 +270,7 @@ struct ipa_data { u32 endpoint_count; /* # entries in endpoint_data[] */ const struct ipa_gsi_endpoint_data *endpoint_data; const struct ipa_resource_data *resource_data; - u32 mem_count; /* # entries in mem_data[] */ - const struct ipa_mem *mem_data; + const struct ipa_mem_data *mem_data; }; extern const struct ipa_data ipa_data_sdm845; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index a21534f1462f..82066a223a67 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -32,14 +32,9 @@ /* The amount of RX buffer space consumed by standard skb overhead */ #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) -#define IPA_ENDPOINT_STOP_RX_RETRIES 10 -#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */ - #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ -#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */ - /** enum ipa_status_opcode - status element opcode hardware values */ enum ipa_status_opcode { IPA_STATUS_OPCODE_PACKET = 0x01, @@ -284,25 +279,52 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, /* suspend_delay represents suspend for RX, delay for TX endpoints. * Note that suspend is not supported starting with IPA v4.0. */ -static int +static bool ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) { u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; + bool state; u32 mask; u32 val; - /* assert(ipa->version == IPA_VERSION_3_5_1 */ + /* Suspend is not supported for IPA v4.0+. Delay doesn't work + * correctly on IPA v4.2. + * + * if (endpoint->toward_ipa) + * assert(ipa->version != IPA_VERSION_4.2); + * else + * assert(ipa->version == IPA_VERSION_3_5_1); + */ mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; val = ioread32(ipa->reg_virt + offset); - if (suspend_delay == !!(val & mask)) - return -EALREADY; /* Already set to desired state */ + /* Don't bother if it's already in the requested state */ + state = !!(val & mask); + if (suspend_delay != state) { + val ^= mask; + iowrite32(val, ipa->reg_virt + offset); + } - val ^= mask; - iowrite32(val, ipa->reg_virt + offset); + return state; +} - return 0; +/* We currently don't care what the previous state was for delay mode */ +static void +ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) +{ + /* assert(endpoint->toward_ipa); */ + + (void)ipa_endpoint_init_ctrl(endpoint, enable); +} + +/* Returns previous suspend state (true means it was enabled) */ +static bool +ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) +{ + /* assert(!endpoint->toward_ipa); */ + + return ipa_endpoint_init_ctrl(endpoint, enable); } /* Enable or disable delay or suspend mode on all modem endpoints */ @@ -311,7 +333,7 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) bool support_suspend; u32 endpoint_id; - /* DELAY mode doesn't work right on IPA v4.2 */ + /* DELAY mode doesn't work correctly on IPA v4.2 */ if (ipa->version == IPA_VERSION_4_2) return; @@ -325,8 +347,10 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) continue; /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */ - if (endpoint->toward_ipa || support_suspend) - (void)ipa_endpoint_init_ctrl(endpoint, enable); + if (endpoint->toward_ipa) + ipa_endpoint_program_delay(endpoint, enable); + else if (support_suspend) + (void)ipa_endpoint_program_suspend(endpoint, enable); } } @@ -1133,10 +1157,10 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) { struct device *dev = &endpoint->ipa->pdev->dev; struct ipa *ipa = endpoint->ipa; - bool endpoint_suspended = false; struct gsi *gsi = &ipa->gsi; + bool suspended = false; dma_addr_t addr; - bool db_enable; + bool legacy; u32 retries; u32 len = 1; void *virt; @@ -1164,8 +1188,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) /* Make sure the channel isn't suspended */ if (endpoint->ipa->version == IPA_VERSION_3_5_1) - if (!ipa_endpoint_init_ctrl(endpoint, false)) - endpoint_suspended = true; + suspended = ipa_endpoint_program_suspend(endpoint, false); /* Start channel and do a 1 byte read */ ret = gsi_channel_start(gsi, endpoint->channel_id); @@ -1191,7 +1214,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) gsi_trans_read_byte_done(gsi, endpoint->channel_id); - ret = ipa_endpoint_stop(endpoint); + ret = gsi_channel_stop(gsi, endpoint->channel_id); if (ret) goto out_suspend_again; @@ -1200,18 +1223,18 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) * complete the channel reset sequence. Finish by suspending the * channel again (if necessary). */ - db_enable = ipa->version == IPA_VERSION_3_5_1; - gsi_channel_reset(gsi, endpoint->channel_id, db_enable); + legacy = ipa->version == IPA_VERSION_3_5_1; + gsi_channel_reset(gsi, endpoint->channel_id, legacy); msleep(1); goto out_suspend_again; err_endpoint_stop: - ipa_endpoint_stop(endpoint); + (void)gsi_channel_stop(gsi, endpoint->channel_id); out_suspend_again: - if (endpoint_suspended) - (void)ipa_endpoint_init_ctrl(endpoint, true); + if (suspended) + (void)ipa_endpoint_program_suspend(endpoint, true); dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); out_kfree: kfree(virt); @@ -1223,8 +1246,8 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) { u32 channel_id = endpoint->channel_id; struct ipa *ipa = endpoint->ipa; - bool db_enable; bool special; + bool legacy; int ret = 0; /* On IPA v3.5.1, if an RX endpoint is reset while aggregation @@ -1233,12 +1256,12 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) * * IPA v3.5.1 enables the doorbell engine. Newer versions do not. */ - db_enable = ipa->version == IPA_VERSION_3_5_1; + legacy = ipa->version == IPA_VERSION_3_5_1; special = !endpoint->toward_ipa && endpoint->data->aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); else - gsi_channel_reset(&ipa->gsi, channel_id, db_enable); + gsi_channel_reset(&ipa->gsi, channel_id, legacy); if (ret) dev_err(&ipa->pdev->dev, @@ -1246,94 +1269,18 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) ret, endpoint->channel_id, endpoint->endpoint_id); } -static int ipa_endpoint_stop_rx_dma(struct ipa *ipa) -{ - u16 size = IPA_ENDPOINT_STOP_RX_SIZE; - struct gsi_trans *trans; - dma_addr_t addr; - int ret; - - trans = ipa_cmd_trans_alloc(ipa, 1); - if (!trans) { - dev_err(&ipa->pdev->dev, - "no transaction for RX endpoint STOP workaround\n"); - return -EBUSY; - } - - /* Read into the highest part of the zero memory area */ - addr = ipa->zero_addr + ipa->zero_size - size; - - ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false); - - ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT); - if (ret) - gsi_trans_free(trans); - - return ret; -} - -/** - * ipa_endpoint_stop() - Stops a GSI channel in IPA - * @client: Client whose endpoint should be stopped - * - * This function implements the sequence to stop a GSI channel - * in IPA. This function returns when the channel is is STOP state. - * - * Return value: 0 on success, negative otherwise - */ -int ipa_endpoint_stop(struct ipa_endpoint *endpoint) -{ - u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES; - int ret; - - do { - struct ipa *ipa = endpoint->ipa; - struct gsi *gsi = &ipa->gsi; - - ret = gsi_channel_stop(gsi, endpoint->channel_id); - if (ret != -EAGAIN || endpoint->toward_ipa) - break; - - /* For IPA v3.5.1, send a DMA read task and check again */ - if (ipa->version == IPA_VERSION_3_5_1) { - ret = ipa_endpoint_stop_rx_dma(ipa); - if (ret) - break; - } - - msleep(1); - } while (retries--); - - return retries ? ret : -EIO; -} - static void ipa_endpoint_program(struct ipa_endpoint *endpoint) { - struct device *dev = &endpoint->ipa->pdev->dev; - int ret; - if (endpoint->toward_ipa) { - bool delay_mode = endpoint->data->tx.delay; - - ret = ipa_endpoint_init_ctrl(endpoint, delay_mode); - /* Endpoint is expected to not be in delay mode */ - if (!ret != delay_mode) { - dev_warn(dev, - "TX endpoint %u was %sin delay mode\n", - endpoint->endpoint_id, - delay_mode ? "already " : ""); - } + if (endpoint->ipa->version != IPA_VERSION_4_2) + ipa_endpoint_program_delay(endpoint, false); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_aggr(endpoint); ipa_endpoint_init_deaggr(endpoint); ipa_endpoint_init_seq(endpoint); } else { - if (endpoint->ipa->version == IPA_VERSION_3_5_1) { - if (!ipa_endpoint_init_ctrl(endpoint, false)) - dev_warn(dev, - "RX endpoint %u was suspended\n", - endpoint->endpoint_id); - } + if (endpoint->ipa->version == IPA_VERSION_3_5_1) + (void)ipa_endpoint_program_suspend(endpoint, false); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_aggr(endpoint); } @@ -1374,12 +1321,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) { u32 mask = BIT(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; + struct gsi *gsi = &ipa->gsi; int ret; - if (!(endpoint->ipa->enabled & mask)) + if (!(ipa->enabled & mask)) return; - endpoint->ipa->enabled ^= mask; + ipa->enabled ^= mask; if (!endpoint->toward_ipa) { ipa_endpoint_replenish_disable(endpoint); @@ -1388,7 +1336,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) } /* Note that if stop fails, the channel's state is not well-defined */ - ret = ipa_endpoint_stop(endpoint); + ret = gsi_channel_stop(gsi, endpoint->channel_id); if (ret) dev_err(&ipa->pdev->dev, "error %d attempting to stop endpoint %u\n", ret, @@ -1445,7 +1393,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) * aggregation frame, then simulating the arrival of such * an interrupt. */ - WARN_ON(ipa_endpoint_init_ctrl(endpoint, true)); + (void)ipa_endpoint_program_suspend(endpoint, true); ipa_endpoint_suspend_aggr(endpoint); } @@ -1468,7 +1416,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) /* IPA v3.5.1 doesn't use channel start for resume */ start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; if (!endpoint->toward_ipa && !start_channel) - WARN_ON(ipa_endpoint_init_ctrl(endpoint, false)); + (void)ipa_endpoint_program_suspend(endpoint, false); ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); if (ret) diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 4b336a1f759d..3b297d65828e 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -76,8 +76,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa); int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb); -int ipa_endpoint_stop(struct ipa_endpoint *endpoint); - void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint); int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint); diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 28998dcce3d2..76d5108b8403 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -108,7 +108,7 @@ int ipa_setup(struct ipa *ipa) struct ipa_endpoint *command_endpoint; int ret; - /* IPA v4.0 and above don't use the doorbell engine. */ + /* Setup for IPA v3.5.1 has some slight differences */ ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1); if (ret) return ret; @@ -778,7 +778,7 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_kfree_ipa; - ret = ipa_mem_init(ipa, data->mem_count, data->mem_data); + ret = ipa_mem_init(ipa, data->mem_data); if (ret) goto err_reg_exit; @@ -933,8 +933,8 @@ static int ipa_resume(struct device *dev) } static const struct dev_pm_ops ipa_pm_ops = { - .suspend_noirq = ipa_suspend, - .resume_noirq = ipa_resume, + .suspend = ipa_suspend, + .resume = ipa_resume, }; static struct platform_driver ipa_driver = { diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 42d2c29d9f0c..3ef814119aab 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -8,19 +8,24 @@ #include <linux/bitfield.h> #include <linux/bug.h> #include <linux/dma-mapping.h> +#include <linux/iommu.h> #include <linux/io.h> +#include <linux/soc/qcom/smem.h> #include "ipa.h" #include "ipa_reg.h" +#include "ipa_data.h" #include "ipa_cmd.h" #include "ipa_mem.h" -#include "ipa_data.h" #include "ipa_table.h" #include "gsi_trans.h" /* "Canary" value placed between memory regions to detect overflow */ #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef) +/* SMEM host id representing the modem. */ +#define QCOM_SMEM_HOST_MODEM 1 + /* Add an immediate command to a transaction that zeroes a memory region */ static void ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem) @@ -265,16 +270,194 @@ int ipa_mem_zero_modem(struct ipa *ipa) return 0; } +/** + * ipa_imem_init() - Initialize IMEM memory used by the IPA + * @ipa: IPA pointer + * @addr: Physical address of the IPA region in IMEM + * @size: Size (bytes) of the IPA region in IMEM + * + * IMEM is a block of shared memory separate from system DRAM, and + * a portion of this memory is available for the IPA to use. The + * modem accesses this memory directly, but the IPA accesses it + * via the IOMMU, using the AP's credentials. + * + * If this region exists (size > 0) we map it for read/write access + * through the IOMMU using the IPA device. + * + * Note: @addr and @size are not guaranteed to be page-aligned. + */ +static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size) +{ + struct device *dev = &ipa->pdev->dev; + struct iommu_domain *domain; + unsigned long iova; + phys_addr_t phys; + int ret; + + if (!size) + return 0; /* IMEM memory not used */ + + domain = iommu_get_domain_for_dev(dev); + if (!domain) { + dev_err(dev, "no IOMMU domain found for IMEM\n"); + return -EINVAL; + } + + /* Align the address down and the size up to page boundaries */ + phys = addr & PAGE_MASK; + size = PAGE_ALIGN(size + addr - phys); + iova = phys; /* We just want a direct mapping */ + + ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); + if (ret) + return ret; + + ipa->imem_iova = iova; + ipa->imem_size = size; + + return 0; +} + +static void ipa_imem_exit(struct ipa *ipa) +{ + struct iommu_domain *domain; + struct device *dev; + + if (!ipa->imem_size) + return; + + dev = &ipa->pdev->dev; + domain = iommu_get_domain_for_dev(dev); + if (domain) { + size_t size; + + size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size); + if (size != ipa->imem_size) + dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n", + size, ipa->imem_size); + } else { + dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n"); + } + + ipa->imem_size = 0; + ipa->imem_iova = 0; +} + +/** + * ipa_smem_init() - Initialize SMEM memory used by the IPA + * @ipa: IPA pointer + * @item: Item ID of SMEM memory + * @size: Size (bytes) of SMEM memory region + * + * SMEM is a managed block of shared DRAM, from which numbered "items" + * can be allocated. One item is designated for use by the IPA. + * + * The modem accesses SMEM memory directly, but the IPA accesses it + * via the IOMMU, using the AP's credentials. + * + * If size provided is non-zero, we allocate it and map it for + * access through the IOMMU. + * + * Note: @size and the item address are is not guaranteed to be page-aligned. + */ +static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size) +{ + struct device *dev = &ipa->pdev->dev; + struct iommu_domain *domain; + unsigned long iova; + phys_addr_t phys; + phys_addr_t addr; + size_t actual; + void *virt; + int ret; + + if (!size) + return 0; /* SMEM memory not used */ + + /* SMEM is memory shared between the AP and another system entity + * (in this case, the modem). An allocation from SMEM is persistent + * until the AP reboots; there is no way to free an allocated SMEM + * region. Allocation only reserves the space; to use it you need + * to "get" a pointer it (this implies no reference counting). + * The item might have already been allocated, in which case we + * use it unless the size isn't what we expect. + */ + ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size); + if (ret && ret != -EEXIST) { + dev_err(dev, "error %d allocating size %zu SMEM item %u\n", + ret, size, item); + return ret; + } + + /* Now get the address of the SMEM memory region */ + virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual); + if (IS_ERR(virt)) { + ret = PTR_ERR(virt); + dev_err(dev, "error %d getting SMEM item %u\n", ret, item); + return ret; + } + + /* In case the region was already allocated, verify the size */ + if (ret && actual != size) { + dev_err(dev, "SMEM item %u has size %zu, expected %zu\n", + item, actual, size); + return -EINVAL; + } + + domain = iommu_get_domain_for_dev(dev); + if (!domain) { + dev_err(dev, "no IOMMU domain found for SMEM\n"); + return -EINVAL; + } + + /* Align the address down and the size up to a page boundary */ + addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK; + phys = addr & PAGE_MASK; + size = PAGE_ALIGN(size + addr - phys); + iova = phys; /* We just want a direct mapping */ + + ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE); + if (ret) + return ret; + + ipa->smem_iova = iova; + ipa->smem_size = size; + + return 0; +} + +static void ipa_smem_exit(struct ipa *ipa) +{ + struct device *dev = &ipa->pdev->dev; + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (domain) { + size_t size; + + size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size); + if (size != ipa->smem_size) + dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n", + size, ipa->smem_size); + + } else { + dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n"); + } + + ipa->smem_size = 0; + ipa->smem_iova = 0; +} + /* Perform memory region-related initialization */ -int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem) +int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data) { struct device *dev = &ipa->pdev->dev; struct resource *res; int ret; - if (count > IPA_MEM_COUNT) { + if (mem_data->local_count > IPA_MEM_COUNT) { dev_err(dev, "to many memory regions (%u > %u)\n", - count, IPA_MEM_COUNT); + mem_data->local_count, IPA_MEM_COUNT); return -EINVAL; } @@ -302,13 +485,30 @@ int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem) ipa->mem_size = resource_size(res); /* The ipa->mem[] array is indexed by enum ipa_mem_id values */ - ipa->mem = mem; + ipa->mem = mem_data->local; + + ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size); + if (ret) + goto err_unmap; + + ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size); + if (ret) + goto err_imem_exit; return 0; + +err_imem_exit: + ipa_imem_exit(ipa); +err_unmap: + memunmap(ipa->mem_virt); + + return ret; } /* Inverse of ipa_mem_init() */ void ipa_mem_exit(struct ipa *ipa) { + ipa_smem_exit(ipa); + ipa_imem_exit(ipa); memunmap(ipa->mem_virt); } diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h index 065cb499ebe5..f99180f84f0d 100644 --- a/drivers/net/ipa/ipa_mem.h +++ b/drivers/net/ipa/ipa_mem.h @@ -7,6 +7,7 @@ #define _IPA_MEM_H_ struct ipa; +struct ipa_mem_data; /** * DOC: IPA Local Memory @@ -84,7 +85,7 @@ void ipa_mem_teardown(struct ipa *ipa); int ipa_mem_zero_modem(struct ipa *ipa); -int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem); +int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data); void ipa_mem_exit(struct ipa *ipa); #endif /* _IPA_MEM_H_ */ |