diff options
Diffstat (limited to 'drivers/remoteproc/remoteproc_core.c')
-rw-r--r-- | drivers/remoteproc/remoteproc_core.c | 595 |
1 files changed, 481 insertions, 114 deletions
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index aa6206706fe3..54ec38fc5dca 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -53,6 +53,11 @@ typedef int (*rproc_handle_resources_t)(struct rproc *rproc, typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int offset, int avail); +static int rproc_alloc_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem); +static int rproc_release_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem); + /* Unique indices for remoteproc devices */ static DEFINE_IDA(rproc_dev_index); @@ -140,6 +145,22 @@ static void rproc_disable_iommu(struct rproc *rproc) iommu_domain_free(domain); } +static phys_addr_t rproc_va_to_pa(void *cpu_addr) +{ + /* + * Return physical address according to virtual address location + * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent + * - in kernel: if region allocated in generic dma memory pool + */ + if (is_vmalloc_addr(cpu_addr)) { + return page_to_phys(vmalloc_to_page(cpu_addr)) + + offset_in_page(cpu_addr); + } + + WARN_ON(!virt_addr_valid(cpu_addr)); + return virt_to_phys(cpu_addr); +} + /** * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address * @rproc: handle of a remote processor @@ -201,27 +222,128 @@ out: } EXPORT_SYMBOL(rproc_da_to_va); +/** + * rproc_find_carveout_by_name() - lookup the carveout region by a name + * @rproc: handle of a remote processor + * @name,..: carveout name to find (standard printf format) + * + * Platform driver has the capability to register some pre-allacoted carveout + * (physically contiguous memory regions) before rproc firmware loading and + * associated resource table analysis. These regions may be dedicated memory + * regions internal to the coprocessor or specified DDR region with specific + * attributes + * + * This function is a helper function with which we can go over the + * allocated carveouts and return associated region characteristics like + * coprocessor address, length or processor virtual address. + * + * Return: a valid pointer on carveout entry on success or NULL on failure. + */ +struct rproc_mem_entry * +rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) +{ + va_list args; + char _name[32]; + struct rproc_mem_entry *carveout, *mem = NULL; + + if (!name) + return NULL; + + va_start(args, name); + vsnprintf(_name, sizeof(_name), name, args); + va_end(args); + + list_for_each_entry(carveout, &rproc->carveouts, node) { + /* Compare carveout and requested names */ + if (!strcmp(carveout->name, _name)) { + mem = carveout; + break; + } + } + + return mem; +} + +/** + * rproc_check_carveout_da() - Check specified carveout da configuration + * @rproc: handle of a remote processor + * @mem: pointer on carveout to check + * @da: area device address + * @len: associated area size + * + * This function is a helper function to verify requested device area (couple + * da, len) is part of specified carevout. + * + * Return: 0 if carveout match request else -ENOMEM + */ +int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem, + u32 da, u32 len) +{ + struct device *dev = &rproc->dev; + int delta = 0; + + /* Check requested resource length */ + if (len > mem->len) { + dev_err(dev, "Registered carveout doesn't fit len request\n"); + return -ENOMEM; + } + + if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) { + /* Update existing carveout da */ + mem->da = da; + } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) { + delta = da - mem->da; + + /* Check requested resource belongs to registered carveout */ + if (delta < 0) { + dev_err(dev, + "Registered carveout doesn't fit da request\n"); + return -ENOMEM; + } + + if (delta + len > mem->len) { + dev_err(dev, + "Registered carveout doesn't fit len request\n"); + return -ENOMEM; + } + } + + return 0; +} + int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) { struct rproc *rproc = rvdev->rproc; struct device *dev = &rproc->dev; struct rproc_vring *rvring = &rvdev->vring[i]; struct fw_rsc_vdev *rsc; - dma_addr_t dma; - void *va; int ret, size, notifyid; + struct rproc_mem_entry *mem; /* actual size of vring (in bytes) */ size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); - /* - * Allocate non-cacheable memory for the vring. In the future - * this call will also configure the IOMMU for us - */ - va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); - if (!va) { - dev_err(dev->parent, "dma_alloc_coherent failed\n"); - return -EINVAL; + rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; + + /* Search for pre-registered carveout */ + mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, + i); + if (mem) { + if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size)) + return -ENOMEM; + } else { + /* Register carveout in in list */ + mem = rproc_mem_entry_init(dev, 0, 0, size, rsc->vring[i].da, + rproc_alloc_carveout, + rproc_release_carveout, + "vdev%dvring%d", + rvdev->index, i); + if (!mem) { + dev_err(dev, "Can't allocate memory entry structure\n"); + return -ENOMEM; + } + + rproc_add_carveout(rproc, mem); } /* @@ -232,7 +354,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); if (ret < 0) { dev_err(dev, "idr_alloc failed: %d\n", ret); - dma_free_coherent(dev->parent, size, va, dma); return ret; } notifyid = ret; @@ -241,21 +362,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) if (notifyid > rproc->max_notifyid) rproc->max_notifyid = notifyid; - dev_dbg(dev, "vring%d: va %pK dma %pad size 0x%x idr %d\n", - i, va, &dma, size, notifyid); - - rvring->va = va; - rvring->dma = dma; rvring->notifyid = notifyid; - /* - * Let the rproc know the notifyid and da of this vring. - * Not all platforms use dma_alloc_coherent to automatically - * set up the iommu. In this case the device address (da) will - * hold the physical address and not the device address. - */ - rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; - rsc->vring[i].da = dma; + /* Let the rproc know the notifyid of this vring.*/ rsc->vring[i].notifyid = notifyid; return 0; } @@ -287,12 +396,10 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) void rproc_free_vring(struct rproc_vring *rvring) { - int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); struct rproc *rproc = rvring->rvdev->rproc; int idx = rvring->rvdev->vring - rvring; struct fw_rsc_vdev *rsc; - dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma); idr_remove(&rproc->notifyids, rvring->notifyid); /* reset resource entry info */ @@ -379,6 +486,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, rvdev->id = rsc->id; rvdev->rproc = rproc; + rvdev->index = rproc->nb_vdev++; /* parse the vrings */ for (i = 0; i < rsc->num_of_vrings; i++) { @@ -423,9 +531,6 @@ void rproc_vdev_release(struct kref *ref) for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { rvring = &rvdev->vring[id]; - if (!rvring->va) - continue; - rproc_free_vring(rvring); } @@ -584,61 +689,31 @@ out: } /** - * rproc_handle_carveout() - handle phys contig memory allocation requests + * rproc_alloc_carveout() - allocated specified carveout * @rproc: rproc handle - * @rsc: the resource entry - * @avail: size of available data (for image validation) - * - * This function will handle firmware requests for allocation of physically - * contiguous memory regions. - * - * These request entries should come first in the firmware's resource table, - * as other firmware entries might request placing other data objects inside - * these memory regions (e.g. data/code segments, trace resource entries, ...). + * @mem: the memory entry to allocate * - * Allocating memory this way helps utilizing the reserved physical memory - * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries - * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB - * pressure is important; it may have a substantial impact on performance. + * This function allocate specified memory entry @mem using + * dma_alloc_coherent() as default allocator */ -static int rproc_handle_carveout(struct rproc *rproc, - struct fw_rsc_carveout *rsc, - int offset, int avail) +static int rproc_alloc_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem) { - struct rproc_mem_entry *carveout, *mapping; + struct rproc_mem_entry *mapping = NULL; struct device *dev = &rproc->dev; dma_addr_t dma; void *va; int ret; - if (sizeof(*rsc) > avail) { - dev_err(dev, "carveout rsc is truncated\n"); - return -EINVAL; - } - - /* make sure reserved bytes are zeroes */ - if (rsc->reserved) { - dev_err(dev, "carveout rsc has non zero reserved bytes\n"); - return -EINVAL; - } - - dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n", - rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags); - - carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); - if (!carveout) - return -ENOMEM; - - va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL); + va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL); if (!va) { dev_err(dev->parent, - "failed to allocate dma memory: len 0x%x\n", rsc->len); - ret = -ENOMEM; - goto free_carv; + "failed to allocate dma memory: len 0x%x\n", mem->len); + return -ENOMEM; } dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n", - va, &dma, rsc->len); + va, &dma, mem->len); /* * Ok, this is non-standard. @@ -657,15 +732,23 @@ static int rproc_handle_carveout(struct rproc *rproc, * to use the iommu-based DMA API: we expect 'dma' to contain the * physical address in this case. */ - if (rproc->domain) { + + if (mem->da != FW_RSC_ADDR_ANY) { + if (!rproc->domain) { + dev_err(dev->parent, + "Bad carveout rsc configuration\n"); + ret = -ENOMEM; + goto dma_free; + } + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) { ret = -ENOMEM; goto dma_free; } - ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, - rsc->flags); + ret = iommu_map(rproc->domain, mem->da, dma, mem->len, + mem->flags); if (ret) { dev_err(dev, "iommu_map failed: %d\n", ret); goto free_mapping; @@ -678,52 +761,219 @@ static int rproc_handle_carveout(struct rproc *rproc, * We can't trust the remote processor not to change the * resource table, so we must maintain this info independently. */ - mapping->da = rsc->da; - mapping->len = rsc->len; + mapping->da = mem->da; + mapping->len = mem->len; list_add_tail(&mapping->node, &rproc->mappings); dev_dbg(dev, "carveout mapped 0x%x to %pad\n", - rsc->da, &dma); + mem->da, &dma); + } else { + mem->da = (u32)dma; } - /* - * Some remote processors might need to know the pa - * even though they are behind an IOMMU. E.g., OMAP4's - * remote M3 processor needs this so it can control - * on-chip hardware accelerators that are not behind - * the IOMMU, and therefor must know the pa. - * - * Generally we don't want to expose physical addresses - * if we don't have to (remote processors are generally - * _not_ trusted), so we might want to do this only for - * remote processor that _must_ have this (e.g. OMAP4's - * dual M3 subsystem). - * - * Non-IOMMU processors might also want to have this info. - * In this case, the device address and the physical address - * are the same. - */ - rsc->pa = dma; - - carveout->va = va; - carveout->len = rsc->len; - carveout->dma = dma; - carveout->da = rsc->da; - - list_add_tail(&carveout->node, &rproc->carveouts); + mem->dma = (u32)dma; + mem->va = va; return 0; free_mapping: kfree(mapping); dma_free: - dma_free_coherent(dev->parent, rsc->len, va, dma); -free_carv: - kfree(carveout); + dma_free_coherent(dev->parent, mem->len, va, dma); return ret; } -/* +/** + * rproc_release_carveout() - release acquired carveout + * @rproc: rproc handle + * @mem: the memory entry to release + * + * This function releases specified memory entry @mem allocated via + * rproc_alloc_carveout() function by @rproc. + */ +static int rproc_release_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = &rproc->dev; + + /* clean up carveout allocations */ + dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma); + return 0; +} + +/** + * rproc_handle_carveout() - handle phys contig memory allocation requests + * @rproc: rproc handle + * @rsc: the resource entry + * @avail: size of available data (for image validation) + * + * This function will handle firmware requests for allocation of physically + * contiguous memory regions. + * + * These request entries should come first in the firmware's resource table, + * as other firmware entries might request placing other data objects inside + * these memory regions (e.g. data/code segments, trace resource entries, ...). + * + * Allocating memory this way helps utilizing the reserved physical memory + * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries + * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB + * pressure is important; it may have a substantial impact on performance. + */ +static int rproc_handle_carveout(struct rproc *rproc, + struct fw_rsc_carveout *rsc, + int offset, int avail) +{ + struct rproc_mem_entry *carveout; + struct device *dev = &rproc->dev; + + if (sizeof(*rsc) > avail) { + dev_err(dev, "carveout rsc is truncated\n"); + return -EINVAL; + } + + /* make sure reserved bytes are zeroes */ + if (rsc->reserved) { + dev_err(dev, "carveout rsc has non zero reserved bytes\n"); + return -EINVAL; + } + + dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n", + rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags); + + /* + * Check carveout rsc already part of a registered carveout, + * Search by name, then check the da and length + */ + carveout = rproc_find_carveout_by_name(rproc, rsc->name); + + if (carveout) { + if (carveout->rsc_offset != FW_RSC_ADDR_ANY) { + dev_err(dev, + "Carveout already associated to resource table\n"); + return -ENOMEM; + } + + if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len)) + return -ENOMEM; + + /* Update memory carveout with resource table info */ + carveout->rsc_offset = offset; + carveout->flags = rsc->flags; + + return 0; + } + + /* Register carveout in in list */ + carveout = rproc_mem_entry_init(dev, 0, 0, rsc->len, rsc->da, + rproc_alloc_carveout, + rproc_release_carveout, rsc->name); + if (!carveout) { + dev_err(dev, "Can't allocate memory entry structure\n"); + return -ENOMEM; + } + + carveout->flags = rsc->flags; + carveout->rsc_offset = offset; + rproc_add_carveout(rproc, carveout); + + return 0; +} + +/** + * rproc_add_carveout() - register an allocated carveout region + * @rproc: rproc handle + * @mem: memory entry to register + * + * This function registers specified memory entry in @rproc carveouts list. + * Specified carveout should have been allocated before registering. + */ +void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem) +{ + list_add_tail(&mem->node, &rproc->carveouts); +} +EXPORT_SYMBOL(rproc_add_carveout); + +/** + * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct + * @dev: pointer on device struct + * @va: virtual address + * @dma: dma address + * @len: memory carveout length + * @da: device address + * @release: memory carveout function + * @name: carveout name + * + * This function allocates a rproc_mem_entry struct and fill it with parameters + * provided by client. + */ +struct rproc_mem_entry * +rproc_mem_entry_init(struct device *dev, + void *va, dma_addr_t dma, int len, u32 da, + int (*alloc)(struct rproc *, struct rproc_mem_entry *), + int (*release)(struct rproc *, struct rproc_mem_entry *), + const char *name, ...) +{ + struct rproc_mem_entry *mem; + va_list args; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return mem; + + mem->va = va; + mem->dma = dma; + mem->da = da; + mem->len = len; + mem->alloc = alloc; + mem->release = release; + mem->rsc_offset = FW_RSC_ADDR_ANY; + mem->of_resm_idx = -1; + + va_start(args, name); + vsnprintf(mem->name, sizeof(mem->name), name, args); + va_end(args); + + return mem; +} +EXPORT_SYMBOL(rproc_mem_entry_init); + +/** + * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct + * from a reserved memory phandle + * @dev: pointer on device struct + * @of_resm_idx: reserved memory phandle index in "memory-region" + * @len: memory carveout length + * @da: device address + * @name: carveout name + * + * This function allocates a rproc_mem_entry struct and fill it with parameters + * provided by client. + */ +struct rproc_mem_entry * +rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len, + u32 da, const char *name, ...) +{ + struct rproc_mem_entry *mem; + va_list args; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return mem; + + mem->da = da; + mem->len = len; + mem->rsc_offset = FW_RSC_ADDR_ANY; + mem->of_resm_idx = of_resm_idx; + + va_start(args, name); + vsnprintf(mem->name, sizeof(mem->name), name, args); + va_end(args); + + return mem; +} +EXPORT_SYMBOL(rproc_of_resm_mem_entry_init); + +/** * A lookup table for resource handlers. The indices are defined in * enum fw_resource_type. */ @@ -845,6 +1095,70 @@ static void rproc_unprepare_subdevices(struct rproc *rproc) } /** + * rproc_alloc_registered_carveouts() - allocate all carveouts registered + * in the list + * @rproc: the remote processor handle + * + * This function parses registered carveout list, performs allocation + * if alloc() ops registered and updates resource table information + * if rsc_offset set. + * + * Return: 0 on success + */ +static int rproc_alloc_registered_carveouts(struct rproc *rproc) +{ + struct rproc_mem_entry *entry, *tmp; + struct fw_rsc_carveout *rsc; + struct device *dev = &rproc->dev; + int ret; + + list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { + if (entry->alloc) { + ret = entry->alloc(rproc, entry); + if (ret) { + dev_err(dev, "Unable to allocate carveout %s: %d\n", + entry->name, ret); + return -ENOMEM; + } + } + + if (entry->rsc_offset != FW_RSC_ADDR_ANY) { + /* update resource table */ + rsc = (void *)rproc->table_ptr + entry->rsc_offset; + + /* + * Some remote processors might need to know the pa + * even though they are behind an IOMMU. E.g., OMAP4's + * remote M3 processor needs this so it can control + * on-chip hardware accelerators that are not behind + * the IOMMU, and therefor must know the pa. + * + * Generally we don't want to expose physical addresses + * if we don't have to (remote processors are generally + * _not_ trusted), so we might want to do this only for + * remote processor that _must_ have this (e.g. OMAP4's + * dual M3 subsystem). + * + * Non-IOMMU processors might also want to have this info. + * In this case, the device address and the physical address + * are the same. + */ + + /* Use va if defined else dma to generate pa */ + if (entry->va) + rsc->pa = (u32)rproc_va_to_pa(entry->va); + else + rsc->pa = (u32)entry->dma; + + rsc->da = entry->da; + rsc->len = entry->len; + } + } + + return 0; +} + +/** * rproc_coredump_cleanup() - clean up dump_segments list * @rproc: the remote processor handle */ @@ -896,8 +1210,8 @@ static void rproc_resource_cleanup(struct rproc *rproc) /* clean up carveout allocations */ list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { - dma_free_coherent(dev->parent, entry->len, entry->va, - entry->dma); + if (entry->release) + entry->release(rproc, entry); list_del(&entry->node); kfree(entry); } @@ -1009,6 +1323,9 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) /* reset max_notifyid */ rproc->max_notifyid = -1; + /* reset handled vdev */ + rproc->nb_vdev = 0; + /* handle fw resources which are required to boot rproc */ ret = rproc_handle_resources(rproc, rproc_loading_handlers); if (ret) { @@ -1016,6 +1333,14 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) goto clean_up_resources; } + /* Allocate carveout resources associated to rproc */ + ret = rproc_alloc_registered_carveouts(rproc); + if (ret) { + dev_err(dev, "Failed to allocate associated carveouts: %d\n", + ret); + goto clean_up_resources; + } + ret = rproc_start(rproc, fw); if (ret) goto clean_up_resources; @@ -1122,6 +1447,44 @@ int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size) EXPORT_SYMBOL(rproc_coredump_add_segment); /** + * rproc_coredump_add_custom_segment() - add custom coredump segment + * @rproc: handle of a remote processor + * @da: device address + * @size: size of segment + * @dumpfn: custom dump function called for each segment during coredump + * @priv: private data + * + * Add device memory to the list of segments to be included in the coredump + * and associate the segment with the given custom dump function and private + * data. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_add_custom_segment(struct rproc *rproc, + dma_addr_t da, size_t size, + void (*dumpfn)(struct rproc *rproc, + struct rproc_dump_segment *segment, + void *dest), + void *priv) +{ + struct rproc_dump_segment *segment; + + segment = kzalloc(sizeof(*segment), GFP_KERNEL); + if (!segment) + return -ENOMEM; + + segment->da = da; + segment->size = size; + segment->priv = priv; + segment->dump = dumpfn; + + list_add_tail(&segment->node, &rproc->dump_segments); + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_add_custom_segment); + +/** * rproc_coredump() - perform coredump * @rproc: rproc handle * @@ -1183,14 +1546,18 @@ static void rproc_coredump(struct rproc *rproc) phdr->p_flags = PF_R | PF_W | PF_X; phdr->p_align = 0; - ptr = rproc_da_to_va(rproc, segment->da, segment->size); - if (!ptr) { - dev_err(&rproc->dev, - "invalid coredump segment (%pad, %zu)\n", - &segment->da, segment->size); - memset(data + offset, 0xff, segment->size); + if (segment->dump) { + segment->dump(rproc, segment, data + offset); } else { - memcpy(data + offset, ptr, segment->size); + ptr = rproc_da_to_va(rproc, segment->da, segment->size); + if (!ptr) { + dev_err(&rproc->dev, + "invalid coredump segment (%pad, %zu)\n", + &segment->da, segment->size); + memset(data + offset, 0xff, segment->size); + } else { + memcpy(data + offset, ptr, segment->size); + } } offset += phdr->p_filesz; |