diff options
author | Todd Poynor <toddpoynor@google.com> | 2018-07-31 13:24:37 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-08-01 20:07:02 +0200 |
commit | 00b60c8d9ed7e9505b70143af04497625d5ef019 (patch) | |
tree | 771942ff9d869cfcb781a70b3ab01f656864f771 /drivers | |
parent | d821f8eb92ef500cee280e9467e8d89c8de5da0b (diff) | |
download | linux-00b60c8d9ed7e9505b70143af04497625d5ef019.tar.bz2 |
staging: gasket: pg tbl: remove static function forward declarations
Remove forward declarations of static functions, move code to avoid
forward references, for kernel style.
Signed-off-by: Todd Poynor <toddpoynor@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/gasket/gasket_page_table.c | 1211 |
1 files changed, 573 insertions, 638 deletions
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c index b42f6637b909..aa036b2e8193 100644 --- a/drivers/staging/gasket/gasket_page_table.c +++ b/drivers/staging/gasket/gasket_page_table.c @@ -214,71 +214,6 @@ struct gasket_page_table { struct gasket_coherent_page_entry *coherent_pages; }; -/* Mapping declarations */ -static int gasket_map_simple_pages( - struct gasket_page_table *pg_tbl, ulong host_addr, - ulong dev_addr, uint num_pages); -static int gasket_map_extended_pages( - struct gasket_page_table *pg_tbl, ulong host_addr, - ulong dev_addr, uint num_pages); -static int gasket_perform_mapping( - struct gasket_page_table *pg_tbl, - struct gasket_page_table_entry *pte_base, u64 __iomem *att_base, - ulong host_addr, uint num_pages, int is_simple_mapping); - -static int gasket_alloc_simple_entries( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages); -static int gasket_alloc_extended_entries( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_entries); -static int gasket_alloc_extended_subtable( - struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, - u64 __iomem *att_reg); - -/* Unmapping declarations */ -static void gasket_page_table_unmap_nolock( - struct gasket_page_table *pg_tbl, ulong start_addr, uint num_pages); -static void gasket_page_table_unmap_all_nolock( - struct gasket_page_table *pg_tbl); -static void gasket_unmap_simple_pages( - struct gasket_page_table *pg_tbl, ulong start_addr, uint num_pages); -static void gasket_unmap_extended_pages( - struct gasket_page_table *pg_tbl, ulong start_addr, uint num_pages); -static void gasket_perform_unmapping( - struct gasket_page_table *pg_tbl, - struct gasket_page_table_entry *pte_base, u64 __iomem *att_base, - uint num_pages, int is_simple_mapping); - -static void gasket_free_extended_subtable( - struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, - u64 __iomem *att_reg); -static bool gasket_release_page(struct page *page); - -/* Other/utility declarations */ -static inline bool gasket_addr_is_simple( - struct gasket_page_table *pg_tbl, ulong addr); -static bool gasket_is_simple_dev_addr_bad( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages); -static bool gasket_is_extended_dev_addr_bad( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages); -static bool gasket_is_pte_range_free( - struct gasket_page_table_entry *pte, uint num_entries); -static void gasket_page_table_garbage_collect_nolock( - struct gasket_page_table *pg_tbl); - -/* Address format declarations */ -static ulong gasket_components_to_dev_address( - struct gasket_page_table *pg_tbl, int is_simple, uint page_index, - uint offset); -static int gasket_simple_page_idx( - struct gasket_page_table *pg_tbl, ulong dev_addr); -static ulong gasket_extended_lvl0_page_idx( - struct gasket_page_table *pg_tbl, ulong dev_addr); -static ulong gasket_extended_lvl1_page_idx( - struct gasket_page_table *pg_tbl, ulong dev_addr); - -static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr); - -/* Public/exported functions */ /* See gasket_page_table.h for description. */ int gasket_page_table_init( struct gasket_page_table **ppg_tbl, @@ -353,6 +288,85 @@ int gasket_page_table_init( return 0; } +/* + * Check if a range of PTEs is free. + * The page table mutex must be held by the caller. + */ +static bool gasket_is_pte_range_free( + struct gasket_page_table_entry *ptes, uint num_entries) +{ + int i; + + for (i = 0; i < num_entries; i++) { + if (ptes[i].status != PTE_FREE) + return false; + } + + return true; +} + +/* + * Free a second level page [sub]table. + * The page table mutex must be held before this call. + */ +static void gasket_free_extended_subtable( + struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, + u64 __iomem *slot) +{ + /* Release the page table from the driver */ + pte->status = PTE_FREE; + + /* Release the page table from the device */ + writeq(0, slot); + /* Force sync around the address release. */ + mb(); + + if (pte->dma_addr) + dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + vfree(pte->sublevel); + + if (pte->page) + free_page((ulong)page_address(pte->page)); + + memset(pte, 0, sizeof(struct gasket_page_table_entry)); +} + +/* + * Actually perform collection. + * The page table mutex must be held by the caller. + */ +static void gasket_page_table_garbage_collect_nolock( + struct gasket_page_table *pg_tbl) +{ + struct gasket_page_table_entry *pte; + u64 __iomem *slot; + + /* XXX FIX ME XXX -- more efficient to keep a usage count */ + /* rather than scanning the second level page tables */ + + for (pte = pg_tbl->entries + pg_tbl->num_simple_entries, + slot = pg_tbl->base_slot + pg_tbl->num_simple_entries; + pte < pg_tbl->entries + pg_tbl->config.total_entries; + pte++, slot++) { + if (pte->status == PTE_INUSE) { + if (gasket_is_pte_range_free( + pte->sublevel, GASKET_PAGES_PER_SUBTABLE)) + gasket_free_extended_subtable( + pg_tbl, pte, slot); + } + } +} + +/* See gasket_page_table.h for description. */ +void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl) +{ + mutex_lock(&pg_tbl->mutex); + gasket_page_table_garbage_collect_nolock(pg_tbl); + mutex_unlock(&pg_tbl->mutex); +} + /* See gasket_page_table.h for description. */ void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl) { @@ -404,321 +418,23 @@ int gasket_page_table_partition( EXPORT_SYMBOL(gasket_page_table_partition); /* - * See gasket_page_table.h for general description. - * - * gasket_page_table_map calls either gasket_map_simple_pages() or - * gasket_map_extended_pages() to actually perform the mapping. - * - * The page table mutex is held for the entire operation. - */ -int gasket_page_table_map( - struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, - uint num_pages) -{ - int ret; - - if (!num_pages) - return 0; - - mutex_lock(&pg_tbl->mutex); - - if (gasket_addr_is_simple(pg_tbl, dev_addr)) { - ret = gasket_map_simple_pages( - pg_tbl, host_addr, dev_addr, num_pages); - } else { - ret = gasket_map_extended_pages( - pg_tbl, host_addr, dev_addr, num_pages); - } - - mutex_unlock(&pg_tbl->mutex); - - dev_dbg(pg_tbl->device, - "%s done: ha %llx daddr %llx num %d, ret %d\n", - __func__, (unsigned long long)host_addr, - (unsigned long long)dev_addr, num_pages, ret); - return ret; -} -EXPORT_SYMBOL(gasket_page_table_map); - -/* - * See gasket_page_table.h for general description. - * - * gasket_page_table_unmap takes the page table lock and calls either - * gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to - * actually unmap the pages from device space. + * Return whether a host buffer was mapped as coherent memory. * - * The page table mutex is held for the entire operation. + * A Gasket page_table currently support one contiguous dma range, mapped to one + * contiguous virtual memory range. Check if the host_addr is within that range. */ -void gasket_page_table_unmap( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) -{ - if (!num_pages) - return; - - mutex_lock(&pg_tbl->mutex); - gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages); - mutex_unlock(&pg_tbl->mutex); -} -EXPORT_SYMBOL(gasket_page_table_unmap); - -static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl) -{ - gasket_unmap_simple_pages( - pg_tbl, gasket_components_to_dev_address(pg_tbl, 1, 0, 0), - pg_tbl->num_simple_entries); - gasket_unmap_extended_pages( - pg_tbl, gasket_components_to_dev_address(pg_tbl, 0, 0, 0), - pg_tbl->num_extended_entries * GASKET_PAGES_PER_SUBTABLE); -} - -/* See gasket_page_table.h for description. */ -void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl) -{ - mutex_lock(&pg_tbl->mutex); - gasket_page_table_unmap_all_nolock(pg_tbl); - mutex_unlock(&pg_tbl->mutex); -} -EXPORT_SYMBOL(gasket_page_table_unmap_all); - -/* See gasket_page_table.h for description. */ -void gasket_page_table_reset(struct gasket_page_table *pg_tbl) -{ - mutex_lock(&pg_tbl->mutex); - gasket_page_table_unmap_all_nolock(pg_tbl); - writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg); - mutex_unlock(&pg_tbl->mutex); -} - -/* See gasket_page_table.h for description. */ -void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl) -{ - mutex_lock(&pg_tbl->mutex); - gasket_page_table_garbage_collect_nolock(pg_tbl); - mutex_unlock(&pg_tbl->mutex); -} - -/* See gasket_page_table.h for description. */ -int gasket_page_table_lookup_page( - struct gasket_page_table *pg_tbl, ulong dev_addr, struct page **ppage, - ulong *poffset) -{ - uint page_num; - struct gasket_page_table_entry *pte; - - mutex_lock(&pg_tbl->mutex); - if (gasket_addr_is_simple(pg_tbl, dev_addr)) { - page_num = gasket_simple_page_idx(pg_tbl, dev_addr); - if (page_num >= pg_tbl->num_simple_entries) - goto fail; - - pte = pg_tbl->entries + page_num; - if (pte->status != PTE_INUSE) - goto fail; - } else { - /* Find the level 0 entry, */ - page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); - if (page_num >= pg_tbl->num_extended_entries) - goto fail; - - pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num; - if (pte->status != PTE_INUSE) - goto fail; - - /* and its contained level 1 entry. */ - page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); - pte = pte->sublevel + page_num; - if (pte->status != PTE_INUSE) - goto fail; - } - - *ppage = pte->page; - *poffset = pte->offset; - mutex_unlock(&pg_tbl->mutex); - return 0; - -fail: - *ppage = NULL; - *poffset = 0; - mutex_unlock(&pg_tbl->mutex); - return -1; -} - -/* See gasket_page_table.h for description. */ -bool gasket_page_table_are_addrs_bad( - struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, - ulong bytes) -{ - if (host_addr & (PAGE_SIZE - 1)) { - dev_err(pg_tbl->device, - "host mapping address 0x%lx must be page aligned\n", - host_addr); - return true; - } - - return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes); -} -EXPORT_SYMBOL(gasket_page_table_are_addrs_bad); - -/* See gasket_page_table.h for description. */ -bool gasket_page_table_is_dev_addr_bad( - struct gasket_page_table *pg_tbl, ulong dev_addr, ulong bytes) -{ - uint num_pages = bytes / PAGE_SIZE; - - if (bytes & (PAGE_SIZE - 1)) { - dev_err(pg_tbl->device, - "mapping size 0x%lX must be page aligned\n", bytes); - return true; - } - - if (num_pages == 0) { - dev_err(pg_tbl->device, - "requested mapping is less than one page: %lu / %lu\n", - bytes, PAGE_SIZE); - return true; - } - - if (gasket_addr_is_simple(pg_tbl, dev_addr)) - return gasket_is_simple_dev_addr_bad( - pg_tbl, dev_addr, num_pages); - return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages); -} -EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad); - -/* See gasket_page_table.h for description. */ -uint gasket_page_table_max_size(struct gasket_page_table *page_table) -{ - if (!page_table) - return 0; - return page_table->config.total_entries; -} -EXPORT_SYMBOL(gasket_page_table_max_size); - -/* See gasket_page_table.h for description. */ -uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl) -{ - if (!pg_tbl) - return 0; - return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries; -} -EXPORT_SYMBOL(gasket_page_table_num_entries); - -/* See gasket_page_table.h for description. */ -uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl) +static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr) { - if (!pg_tbl) - return 0; - return pg_tbl->num_simple_entries; -} -EXPORT_SYMBOL(gasket_page_table_num_simple_entries); + u64 min, max; -/* See gasket_page_table.h for description. */ -uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl) -{ - if (!pg_tbl) + /* whether the host address is within user virt range */ + if (!pg_tbl->coherent_pages) return 0; - return pg_tbl->num_active_pages; -} -EXPORT_SYMBOL(gasket_page_table_num_active_pages); - -/* See gasket_page_table.h */ -int gasket_page_table_system_status(struct gasket_page_table *page_table) -{ - if (!page_table) - return GASKET_STATUS_LAMED; - - if (gasket_page_table_num_entries(page_table) == 0) { - dev_dbg(page_table->device, "Page table size is 0\n"); - return GASKET_STATUS_LAMED; - } - - return GASKET_STATUS_ALIVE; -} -/* - * Allocate and map pages to simple addresses. - * If there is an error, no pages are mapped. - */ -static int gasket_map_simple_pages( - struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, - uint num_pages) -{ - int ret; - uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr); - - ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages); - if (ret) { - dev_err(pg_tbl->device, - "page table slots %u (@ 0x%lx) to %u are not available\n", - slot_idx, dev_addr, slot_idx + num_pages - 1); - return ret; - } - - ret = gasket_perform_mapping( - pg_tbl, pg_tbl->entries + slot_idx, - pg_tbl->base_slot + slot_idx, host_addr, num_pages, 1); - - if (ret) { - gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages); - dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret); - } - return ret; -} - -/* - * gasket_map_extended_pages - Get and map buffers to extended addresses. - * If there is an error, no pages are mapped. - */ -static int gasket_map_extended_pages( - struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, - uint num_pages) -{ - int ret; - ulong dev_addr_end; - uint slot_idx, remain, len; - struct gasket_page_table_entry *pte; - u64 __iomem *slot_base; - - ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages); - if (ret) { - dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1; - dev_err(pg_tbl->device, - "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are " - "not available\n", - gasket_extended_lvl0_page_idx(pg_tbl, dev_addr), - dev_addr, - gasket_extended_lvl1_page_idx(pg_tbl, dev_addr), - gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end), - gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end)); - return ret; - } - - remain = num_pages; - slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); - pte = pg_tbl->entries + pg_tbl->num_simple_entries + - gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); - - while (remain > 0) { - len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx); - - slot_base = - (u64 __iomem *)(page_address(pte->page) + pte->offset); - ret = gasket_perform_mapping( - pg_tbl, pte->sublevel + slot_idx, slot_base + slot_idx, - host_addr, len, 0); - if (ret) { - gasket_page_table_unmap_nolock( - pg_tbl, dev_addr, num_pages); - return ret; - } - - remain -= len; - slot_idx = 0; - pte++; - host_addr += len * PAGE_SIZE; - } + min = (u64)pg_tbl->coherent_pages[0].user_virt; + max = min + PAGE_SIZE * pg_tbl->num_coherent_pages; - return 0; + return min <= host_addr && host_addr < max; } /* @@ -813,6 +529,39 @@ static int gasket_perform_mapping( } /* + * Return the index of the page for the address in the simple table. + * Does not perform validity checking. + */ +static int gasket_simple_page_idx( + struct gasket_page_table *pg_tbl, ulong dev_addr) +{ + return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) & + (pg_tbl->config.total_entries - 1); +} + +/* + * Return the level 0 page index for the given address. + * Does not perform validity checking. + */ +static ulong gasket_extended_lvl0_page_idx( + struct gasket_page_table *pg_tbl, ulong dev_addr) +{ + return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) & + ((1 << GASKET_EXTENDED_LVL0_WIDTH) - 1); +} + +/* + * Return the level 1 page index for the given address. + * Does not perform validity checking. + */ +static ulong gasket_extended_lvl1_page_idx( + struct gasket_page_table *pg_tbl, ulong dev_addr) +{ + return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) & + (GASKET_PAGES_PER_SUBTABLE - 1); +} + +/* * Allocate page table entries in a simple table. * The page table mutex must be held by the caller. */ @@ -827,122 +576,55 @@ static int gasket_alloc_simple_entries( return 0; } -/* - * Allocate slots in an extended page table. Check to see if a range of page - * table slots are available. If necessary, memory is allocated for second level - * page tables. - * - * Note that memory for second level page tables is allocated as needed, but - * that memory is only freed on the final close of the device file, when the - * page tables are repartitioned, or the the device is removed. If there is an - * error or if the full range of slots is not available, any memory - * allocated for second level page tables remains allocated until final close, - * repartition, or device removal. - * - * The page table mutex must be held by the caller. - */ -static int gasket_alloc_extended_entries( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_entries) +/* Safely return a page to the OS. */ +static bool gasket_release_page(struct page *page) { - int ret = 0; - uint remain, subtable_slot_idx, len; - struct gasket_page_table_entry *pte; - u64 __iomem *slot; - - remain = num_entries; - subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); - pte = pg_tbl->entries + pg_tbl->num_simple_entries + - gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); - slot = pg_tbl->base_slot + pg_tbl->num_simple_entries + - gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); - - while (remain > 0) { - len = min(remain, - GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx); - - if (pte->status == PTE_FREE) { - ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot); - if (ret) { - dev_err(pg_tbl->device, - "no memory for extended addr subtable\n"); - return ret; - } - } else { - if (!gasket_is_pte_range_free( - pte->sublevel + subtable_slot_idx, len)) - return -EBUSY; - } + if (!page) + return false; - remain -= len; - subtable_slot_idx = 0; - pte++; - slot++; - } + if (!PageReserved(page)) + SetPageDirty(page); + put_page(page); - return 0; + return true; } /* - * Allocate a second level page table. + * Unmap and release mapped pages. * The page table mutex must be held by the caller. */ -static int gasket_alloc_extended_subtable( - struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, - u64 __iomem *slot) +static void gasket_perform_unmapping( + struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *ptes, + u64 __iomem *slots, uint num_pages, int is_simple_mapping) { - ulong page_addr, subtable_bytes; - dma_addr_t dma_addr; - - /* XXX FIX ME XXX this is inefficient for non-4K page sizes */ - - /* GFP_DMA flag must be passed to architectures for which - * part of the memory range is not considered DMA'able. - * This seems to be the case for Juno board with 4.5.0 Linaro kernel + int i; + /* + * For each page table entry and corresponding entry in the device's + * address translation table: */ - page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!page_addr) - return -ENOMEM; - pte->page = virt_to_page((void *)page_addr); - pte->offset = 0; - - subtable_bytes = sizeof(struct gasket_page_table_entry) * - GASKET_PAGES_PER_SUBTABLE; - pte->sublevel = vzalloc(subtable_bytes); - if (!pte->sublevel) { - free_page(page_addr); - memset(pte, 0, sizeof(struct gasket_page_table_entry)); - return -ENOMEM; - } - - /* Map the page into DMA space. */ - pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - /* Wait until the page is mapped. */ - mb(); - - /* make the addresses available to the device */ - dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG; - writeq(dma_addr, slot); - - pte->status = PTE_INUSE; - - return 0; -} + for (i = 0; i < num_pages; i++) { + /* release the address from the device, */ + if (is_simple_mapping || ptes[i].status == PTE_INUSE) + writeq(0, &slots[i]); + else + ((u64 __force *)slots)[i] = 0; + /* Force sync around the address release. */ + mb(); -/* - * Non-locking entry to unmapping routines. - * The page table mutex must be held by the caller. - */ -static void gasket_page_table_unmap_nolock( - struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) -{ - if (!num_pages) - return; + /* release the address from the driver, */ + if (ptes[i].status == PTE_INUSE) { + if (ptes[i].dma_addr) { + dma_unmap_page(pg_tbl->device, ptes[i].dma_addr, + PAGE_SIZE, DMA_FROM_DEVICE); + } + if (gasket_release_page(ptes[i].page)) + --pg_tbl->num_active_pages; + } + ptes[i].status = PTE_FREE; - if (gasket_addr_is_simple(pg_tbl, dev_addr)) - gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages); - else - gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages); + /* and clear the PTE. */ + memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry)); + } } /* @@ -992,90 +674,49 @@ static void gasket_unmap_extended_pages( } } -/* - * Unmap and release mapped pages. - * The page table mutex must be held by the caller. - */ -static void gasket_perform_unmapping( - struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *ptes, - u64 __iomem *slots, uint num_pages, int is_simple_mapping) +/* Evaluates to nonzero if the specified virtual address is simple. */ +static inline bool gasket_addr_is_simple( + struct gasket_page_table *pg_tbl, ulong addr) { - int i; - /* - * For each page table entry and corresponding entry in the device's - * address translation table: - */ - for (i = 0; i < num_pages; i++) { - /* release the address from the device, */ - if (is_simple_mapping || ptes[i].status == PTE_INUSE) - writeq(0, &slots[i]); - else - ((u64 __force *)slots)[i] = 0; - /* Force sync around the address release. */ - mb(); - - /* release the address from the driver, */ - if (ptes[i].status == PTE_INUSE) { - if (ptes[i].dma_addr) { - dma_unmap_page(pg_tbl->device, ptes[i].dma_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - } - if (gasket_release_page(ptes[i].page)) - --pg_tbl->num_active_pages; - } - ptes[i].status = PTE_FREE; - - /* and clear the PTE. */ - memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry)); - } + return !((addr) & (pg_tbl)->extended_flag); } /* - * Free a second level page [sub]table. - * The page table mutex must be held before this call. + * Convert (simple, page, offset) into a device address. + * Examples: + * Simple page 0, offset 32: + * Input (0, 0, 32), Output 0x20 + * Simple page 1000, offset 511: + * Input (0, 1000, 512), Output 0x3E81FF + * Extended page 0, offset 32: + * Input (0, 0, 32), Output 0x8000000020 + * Extended page 1000, offset 511: + * Input (1, 1000, 512), Output 0x8003E81FF */ -static void gasket_free_extended_subtable( - struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, - u64 __iomem *slot) -{ - /* Release the page table from the driver */ - pte->status = PTE_FREE; - - /* Release the page table from the device */ - writeq(0, slot); - /* Force sync around the address release. */ - mb(); - - if (pte->dma_addr) - dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); - - vfree(pte->sublevel); - - if (pte->page) - free_page((ulong)page_address(pte->page)); - - memset(pte, 0, sizeof(struct gasket_page_table_entry)); -} - -/* Safely return a page to the OS. */ -static bool gasket_release_page(struct page *page) +static ulong gasket_components_to_dev_address( + struct gasket_page_table *pg_tbl, int is_simple, uint page_index, + uint offset) { - if (!page) - return false; - - if (!PageReserved(page)) - SetPageDirty(page); - put_page(page); + ulong lvl0_index, lvl1_index; - return true; -} + if (is_simple) { + /* Return simple addresses directly. */ + lvl0_index = page_index & (pg_tbl->config.total_entries - 1); + return (lvl0_index << GASKET_SIMPLE_PAGE_SHIFT) | offset; + } -/* Evaluates to nonzero if the specified virtual address is simple. */ -static inline bool gasket_addr_is_simple( - struct gasket_page_table *pg_tbl, ulong addr) -{ - return !((addr) & (pg_tbl)->extended_flag); + /* + * This could be compressed into fewer statements, but + * A) the compiler should optimize it + * B) this is not slow + * C) this is an uncommon operation + * D) this is actually readable this way. + */ + lvl0_index = page_index / GASKET_PAGES_PER_SUBTABLE; + lvl1_index = page_index & (GASKET_PAGES_PER_SUBTABLE - 1); + return (pg_tbl)->extended_flag | + (lvl0_index << GASKET_EXTENDED_LVL0_SHIFT) | + (lvl1_index << GASKET_EXTENDED_LVL1_SHIFT) | offset; } /* @@ -1177,137 +818,431 @@ static bool gasket_is_extended_dev_addr_bad( } /* - * Check if a range of PTEs is free. + * Non-locking entry to unmapping routines. * The page table mutex must be held by the caller. */ -static bool gasket_is_pte_range_free( - struct gasket_page_table_entry *ptes, uint num_entries) +static void gasket_page_table_unmap_nolock( + struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) { - int i; + if (!num_pages) + return; - for (i = 0; i < num_entries; i++) { - if (ptes[i].status != PTE_FREE) - return false; + if (gasket_addr_is_simple(pg_tbl, dev_addr)) + gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages); + else + gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages); +} + +/* + * Allocate and map pages to simple addresses. + * If there is an error, no pages are mapped. + */ +static int gasket_map_simple_pages( + struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, + uint num_pages) +{ + int ret; + uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr); + + ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages); + if (ret) { + dev_err(pg_tbl->device, + "page table slots %u (@ 0x%lx) to %u are not available\n", + slot_idx, dev_addr, slot_idx + num_pages - 1); + return ret; } - return true; + ret = gasket_perform_mapping( + pg_tbl, pg_tbl->entries + slot_idx, + pg_tbl->base_slot + slot_idx, host_addr, num_pages, 1); + + if (ret) { + gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages); + dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret); + } + return ret; } /* - * Actually perform collection. + * Allocate a second level page table. * The page table mutex must be held by the caller. */ -static void gasket_page_table_garbage_collect_nolock( - struct gasket_page_table *pg_tbl) +static int gasket_alloc_extended_subtable( + struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, + u64 __iomem *slot) { + ulong page_addr, subtable_bytes; + dma_addr_t dma_addr; + + /* XXX FIX ME XXX this is inefficient for non-4K page sizes */ + + /* GFP_DMA flag must be passed to architectures for which + * part of the memory range is not considered DMA'able. + * This seems to be the case for Juno board with 4.5.0 Linaro kernel + */ + page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!page_addr) + return -ENOMEM; + pte->page = virt_to_page((void *)page_addr); + pte->offset = 0; + + subtable_bytes = sizeof(struct gasket_page_table_entry) * + GASKET_PAGES_PER_SUBTABLE; + pte->sublevel = vzalloc(subtable_bytes); + if (!pte->sublevel) { + free_page(page_addr); + memset(pte, 0, sizeof(struct gasket_page_table_entry)); + return -ENOMEM; + } + + /* Map the page into DMA space. */ + pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + /* Wait until the page is mapped. */ + mb(); + + /* make the addresses available to the device */ + dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG; + writeq(dma_addr, slot); + + pte->status = PTE_INUSE; + + return 0; +} + +/* + * Allocate slots in an extended page table. Check to see if a range of page + * table slots are available. If necessary, memory is allocated for second level + * page tables. + * + * Note that memory for second level page tables is allocated as needed, but + * that memory is only freed on the final close of the device file, when the + * page tables are repartitioned, or the the device is removed. If there is an + * error or if the full range of slots is not available, any memory + * allocated for second level page tables remains allocated until final close, + * repartition, or device removal. + * + * The page table mutex must be held by the caller. + */ +static int gasket_alloc_extended_entries( + struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_entries) +{ + int ret = 0; + uint remain, subtable_slot_idx, len; struct gasket_page_table_entry *pte; u64 __iomem *slot; - /* XXX FIX ME XXX -- more efficient to keep a usage count */ - /* rather than scanning the second level page tables */ + remain = num_entries; + subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); + pte = pg_tbl->entries + pg_tbl->num_simple_entries + + gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); + slot = pg_tbl->base_slot + pg_tbl->num_simple_entries + + gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); - for (pte = pg_tbl->entries + pg_tbl->num_simple_entries, - slot = pg_tbl->base_slot + pg_tbl->num_simple_entries; - pte < pg_tbl->entries + pg_tbl->config.total_entries; - pte++, slot++) { - if (pte->status == PTE_INUSE) { - if (gasket_is_pte_range_free( - pte->sublevel, GASKET_PAGES_PER_SUBTABLE)) - gasket_free_extended_subtable( - pg_tbl, pte, slot); + while (remain > 0) { + len = min(remain, + GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx); + + if (pte->status == PTE_FREE) { + ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot); + if (ret) { + dev_err(pg_tbl->device, + "no memory for extended addr subtable\n"); + return ret; + } + } else { + if (!gasket_is_pte_range_free( + pte->sublevel + subtable_slot_idx, len)) + return -EBUSY; } + + remain -= len; + subtable_slot_idx = 0; + pte++; + slot++; } + + return 0; } /* - * Convert (simple, page, offset) into a device address. - * Examples: - * Simple page 0, offset 32: - * Input (0, 0, 32), Output 0x20 - * Simple page 1000, offset 511: - * Input (0, 1000, 512), Output 0x3E81FF - * Extended page 0, offset 32: - * Input (0, 0, 32), Output 0x8000000020 - * Extended page 1000, offset 511: - * Input (1, 1000, 512), Output 0x8003E81FF + * gasket_map_extended_pages - Get and map buffers to extended addresses. + * If there is an error, no pages are mapped. */ -static ulong gasket_components_to_dev_address( - struct gasket_page_table *pg_tbl, int is_simple, uint page_index, - uint offset) +static int gasket_map_extended_pages( + struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, + uint num_pages) { - ulong lvl0_index, lvl1_index; + int ret; + ulong dev_addr_end; + uint slot_idx, remain, len; + struct gasket_page_table_entry *pte; + u64 __iomem *slot_base; - if (is_simple) { - /* Return simple addresses directly. */ - lvl0_index = page_index & (pg_tbl->config.total_entries - 1); - return (lvl0_index << GASKET_SIMPLE_PAGE_SHIFT) | offset; + ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages); + if (ret) { + dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1; + dev_err(pg_tbl->device, + "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are " + "not available\n", + gasket_extended_lvl0_page_idx(pg_tbl, dev_addr), + dev_addr, + gasket_extended_lvl1_page_idx(pg_tbl, dev_addr), + gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end), + gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end)); + return ret; } - /* - * This could be compressed into fewer statements, but - * A) the compiler should optimize it - * B) this is not slow - * C) this is an uncommon operation - * D) this is actually readable this way. - */ - lvl0_index = page_index / GASKET_PAGES_PER_SUBTABLE; - lvl1_index = page_index & (GASKET_PAGES_PER_SUBTABLE - 1); - return (pg_tbl)->extended_flag | - (lvl0_index << GASKET_EXTENDED_LVL0_SHIFT) | - (lvl1_index << GASKET_EXTENDED_LVL1_SHIFT) | offset; + remain = num_pages; + slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); + pte = pg_tbl->entries + pg_tbl->num_simple_entries + + gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); + + while (remain > 0) { + len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx); + + slot_base = + (u64 __iomem *)(page_address(pte->page) + pte->offset); + ret = gasket_perform_mapping( + pg_tbl, pte->sublevel + slot_idx, slot_base + slot_idx, + host_addr, len, 0); + if (ret) { + gasket_page_table_unmap_nolock( + pg_tbl, dev_addr, num_pages); + return ret; + } + + remain -= len; + slot_idx = 0; + pte++; + host_addr += len * PAGE_SIZE; + } + + return 0; } /* - * Return the index of the page for the address in the simple table. - * Does not perform validity checking. + * See gasket_page_table.h for general description. + * + * gasket_page_table_map calls either gasket_map_simple_pages() or + * gasket_map_extended_pages() to actually perform the mapping. + * + * The page table mutex is held for the entire operation. */ -static int gasket_simple_page_idx( - struct gasket_page_table *pg_tbl, ulong dev_addr) +int gasket_page_table_map( + struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, + uint num_pages) { - return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) & - (pg_tbl->config.total_entries - 1); + int ret; + + if (!num_pages) + return 0; + + mutex_lock(&pg_tbl->mutex); + + if (gasket_addr_is_simple(pg_tbl, dev_addr)) { + ret = gasket_map_simple_pages( + pg_tbl, host_addr, dev_addr, num_pages); + } else { + ret = gasket_map_extended_pages( + pg_tbl, host_addr, dev_addr, num_pages); + } + + mutex_unlock(&pg_tbl->mutex); + + dev_dbg(pg_tbl->device, + "%s done: ha %llx daddr %llx num %d, ret %d\n", + __func__, (unsigned long long)host_addr, + (unsigned long long)dev_addr, num_pages, ret); + return ret; } +EXPORT_SYMBOL(gasket_page_table_map); /* - * Return the level 0 page index for the given address. - * Does not perform validity checking. + * See gasket_page_table.h for general description. + * + * gasket_page_table_unmap takes the page table lock and calls either + * gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to + * actually unmap the pages from device space. + * + * The page table mutex is held for the entire operation. */ -static ulong gasket_extended_lvl0_page_idx( - struct gasket_page_table *pg_tbl, ulong dev_addr) +void gasket_page_table_unmap( + struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) { - return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) & - ((1 << GASKET_EXTENDED_LVL0_WIDTH) - 1); + if (!num_pages) + return; + + mutex_lock(&pg_tbl->mutex); + gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages); + mutex_unlock(&pg_tbl->mutex); } +EXPORT_SYMBOL(gasket_page_table_unmap); -/* - * Return the level 1 page index for the given address. - * Does not perform validity checking. - */ -static ulong gasket_extended_lvl1_page_idx( - struct gasket_page_table *pg_tbl, ulong dev_addr) +static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl) { - return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) & - (GASKET_PAGES_PER_SUBTABLE - 1); + gasket_unmap_simple_pages( + pg_tbl, gasket_components_to_dev_address(pg_tbl, 1, 0, 0), + pg_tbl->num_simple_entries); + gasket_unmap_extended_pages( + pg_tbl, gasket_components_to_dev_address(pg_tbl, 0, 0, 0), + pg_tbl->num_extended_entries * GASKET_PAGES_PER_SUBTABLE); } -/* - * Return whether a host buffer was mapped as coherent memory. - * - * A Gasket page_table currently support one contiguous dma range, mapped to one - * contiguous virtual memory range. Check if the host_addr is within that range. - */ -static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr) +/* See gasket_page_table.h for description. */ +void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl) { - u64 min, max; + mutex_lock(&pg_tbl->mutex); + gasket_page_table_unmap_all_nolock(pg_tbl); + mutex_unlock(&pg_tbl->mutex); +} +EXPORT_SYMBOL(gasket_page_table_unmap_all); - /* whether the host address is within user virt range */ - if (!pg_tbl->coherent_pages) +/* See gasket_page_table.h for description. */ +void gasket_page_table_reset(struct gasket_page_table *pg_tbl) +{ + mutex_lock(&pg_tbl->mutex); + gasket_page_table_unmap_all_nolock(pg_tbl); + writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg); + mutex_unlock(&pg_tbl->mutex); +} + +/* See gasket_page_table.h for description. */ +int gasket_page_table_lookup_page( + struct gasket_page_table *pg_tbl, ulong dev_addr, struct page **ppage, + ulong *poffset) +{ + uint page_num; + struct gasket_page_table_entry *pte; + + mutex_lock(&pg_tbl->mutex); + if (gasket_addr_is_simple(pg_tbl, dev_addr)) { + page_num = gasket_simple_page_idx(pg_tbl, dev_addr); + if (page_num >= pg_tbl->num_simple_entries) + goto fail; + + pte = pg_tbl->entries + page_num; + if (pte->status != PTE_INUSE) + goto fail; + } else { + /* Find the level 0 entry, */ + page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); + if (page_num >= pg_tbl->num_extended_entries) + goto fail; + + pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num; + if (pte->status != PTE_INUSE) + goto fail; + + /* and its contained level 1 entry. */ + page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); + pte = pte->sublevel + page_num; + if (pte->status != PTE_INUSE) + goto fail; + } + + *ppage = pte->page; + *poffset = pte->offset; + mutex_unlock(&pg_tbl->mutex); + return 0; + +fail: + *ppage = NULL; + *poffset = 0; + mutex_unlock(&pg_tbl->mutex); + return -1; +} + +/* See gasket_page_table.h for description. */ +bool gasket_page_table_are_addrs_bad( + struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, + ulong bytes) +{ + if (host_addr & (PAGE_SIZE - 1)) { + dev_err(pg_tbl->device, + "host mapping address 0x%lx must be page aligned\n", + host_addr); + return true; + } + + return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes); +} +EXPORT_SYMBOL(gasket_page_table_are_addrs_bad); + +/* See gasket_page_table.h for description. */ +bool gasket_page_table_is_dev_addr_bad( + struct gasket_page_table *pg_tbl, ulong dev_addr, ulong bytes) +{ + uint num_pages = bytes / PAGE_SIZE; + + if (bytes & (PAGE_SIZE - 1)) { + dev_err(pg_tbl->device, + "mapping size 0x%lX must be page aligned\n", bytes); + return true; + } + + if (num_pages == 0) { + dev_err(pg_tbl->device, + "requested mapping is less than one page: %lu / %lu\n", + bytes, PAGE_SIZE); + return true; + } + + if (gasket_addr_is_simple(pg_tbl, dev_addr)) + return gasket_is_simple_dev_addr_bad( + pg_tbl, dev_addr, num_pages); + return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages); +} +EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad); + +/* See gasket_page_table.h for description. */ +uint gasket_page_table_max_size(struct gasket_page_table *page_table) +{ + if (!page_table) return 0; + return page_table->config.total_entries; +} +EXPORT_SYMBOL(gasket_page_table_max_size); - min = (u64)pg_tbl->coherent_pages[0].user_virt; - max = min + PAGE_SIZE * pg_tbl->num_coherent_pages; +/* See gasket_page_table.h for description. */ +uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl) +{ + if (!pg_tbl) + return 0; + return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries; +} +EXPORT_SYMBOL(gasket_page_table_num_entries); - return min <= host_addr && host_addr < max; +/* See gasket_page_table.h for description. */ +uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl) +{ + if (!pg_tbl) + return 0; + return pg_tbl->num_simple_entries; +} +EXPORT_SYMBOL(gasket_page_table_num_simple_entries); + +/* See gasket_page_table.h for description. */ +uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl) +{ + if (!pg_tbl) + return 0; + return pg_tbl->num_active_pages; +} +EXPORT_SYMBOL(gasket_page_table_num_active_pages); + +/* See gasket_page_table.h */ +int gasket_page_table_system_status(struct gasket_page_table *page_table) +{ + if (!page_table) + return GASKET_STATUS_LAMED; + + if (gasket_page_table_num_entries(page_table) == 0) { + dev_dbg(page_table->device, "Page table size is 0\n"); + return GASKET_STATUS_LAMED; + } + + return GASKET_STATUS_ALIVE; } /* Record the host_addr to coherent dma memory mapping. */ |