diff options
Diffstat (limited to 'drivers/of')
-rw-r--r-- | drivers/of/Kconfig | 4 | ||||
-rw-r--r-- | drivers/of/address.c | 6 | ||||
-rw-r--r-- | drivers/of/base.c | 130 | ||||
-rw-r--r-- | drivers/of/dynamic.c | 2 | ||||
-rw-r--r-- | drivers/of/of_private.h | 6 | ||||
-rw-r--r-- | drivers/of/overlay.c | 11 |
6 files changed, 33 insertions, 126 deletions
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 37c2ccbefecd..d91618641be6 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -103,4 +103,8 @@ config OF_OVERLAY config OF_NUMA bool +config OF_DMA_DEFAULT_COHERENT + # arches should select this if DMA is coherent by default for OF devices + bool + endif # OF diff --git a/drivers/of/address.c b/drivers/of/address.c index 99c1b8058559..e8a39c3ec4d4 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -995,12 +995,16 @@ out: * @np: device node * * It returns true if "dma-coherent" property was found - * for this device in DT. + * for this device in the DT, or if DMA is coherent by + * default for OF devices on the current platform. */ bool of_dma_is_coherent(struct device_node *np) { struct device_node *node = of_node_get(np); + if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) + return true; + while (node) { if (of_property_read_bool(node, "dma-coherent")) { of_node_put(node); diff --git a/drivers/of/base.c b/drivers/of/base.c index db7fbc0c0893..8d173fb3552a 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -135,115 +135,38 @@ int __weak of_node_to_nid(struct device_node *np) } #endif -/* - * Assumptions behind phandle_cache implementation: - * - phandle property values are in a contiguous range of 1..n - * - * If the assumptions do not hold, then - * - the phandle lookup overhead reduction provided by the cache - * will likely be less - */ +#define OF_PHANDLE_CACHE_BITS 7 +#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) -static struct device_node **phandle_cache; -static u32 phandle_cache_mask; +static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; -/* - * Caller must hold devtree_lock. - */ -static void __of_free_phandle_cache(void) +static u32 of_phandle_cache_hash(phandle handle) { - u32 cache_entries = phandle_cache_mask + 1; - u32 k; - - if (!phandle_cache) - return; - - for (k = 0; k < cache_entries; k++) - of_node_put(phandle_cache[k]); - - kfree(phandle_cache); - phandle_cache = NULL; + return hash_32(handle, OF_PHANDLE_CACHE_BITS); } -int of_free_phandle_cache(void) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); - - __of_free_phandle_cache(); - - raw_spin_unlock_irqrestore(&devtree_lock, flags); - - return 0; -} -#if !defined(CONFIG_MODULES) -late_initcall_sync(of_free_phandle_cache); -#endif - /* * Caller must hold devtree_lock. */ -void __of_free_phandle_cache_entry(phandle handle) +void __of_phandle_cache_inv_entry(phandle handle) { - phandle masked_handle; + u32 handle_hash; struct device_node *np; if (!handle) return; - masked_handle = handle & phandle_cache_mask; - - if (phandle_cache) { - np = phandle_cache[masked_handle]; - if (np && handle == np->phandle) { - of_node_put(np); - phandle_cache[masked_handle] = NULL; - } - } -} - -void of_populate_phandle_cache(void) -{ - unsigned long flags; - u32 cache_entries; - struct device_node *np; - u32 phandles = 0; - - raw_spin_lock_irqsave(&devtree_lock, flags); - - __of_free_phandle_cache(); - - for_each_of_allnodes(np) - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) - phandles++; - - if (!phandles) - goto out; + handle_hash = of_phandle_cache_hash(handle); - cache_entries = roundup_pow_of_two(phandles); - phandle_cache_mask = cache_entries - 1; - - phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), - GFP_ATOMIC); - if (!phandle_cache) - goto out; - - for_each_of_allnodes(np) - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) { - of_node_get(np); - phandle_cache[np->phandle & phandle_cache_mask] = np; - } - -out: - raw_spin_unlock_irqrestore(&devtree_lock, flags); + np = phandle_cache[handle_hash]; + if (np && handle == np->phandle) + phandle_cache[handle_hash] = NULL; } void __init of_core_init(void) { struct device_node *np; - of_populate_phandle_cache(); /* Create the kset, and register existing nodes */ mutex_lock(&of_mutex); @@ -253,8 +176,11 @@ void __init of_core_init(void) pr_err("failed to register existing nodes\n"); return; } - for_each_of_allnodes(np) + for_each_of_allnodes(np) { __of_attach_node_sysfs(np); + if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) + phandle_cache[of_phandle_cache_hash(np->phandle)] = np; + } mutex_unlock(&of_mutex); /* Symlink in /proc as required by userspace ABI */ @@ -1235,36 +1161,24 @@ struct device_node *of_find_node_by_phandle(phandle handle) { struct device_node *np = NULL; unsigned long flags; - phandle masked_handle; + u32 handle_hash; if (!handle) return NULL; + handle_hash = of_phandle_cache_hash(handle); + raw_spin_lock_irqsave(&devtree_lock, flags); - masked_handle = handle & phandle_cache_mask; - - if (phandle_cache) { - if (phandle_cache[masked_handle] && - handle == phandle_cache[masked_handle]->phandle) - np = phandle_cache[masked_handle]; - if (np && of_node_check_flag(np, OF_DETACHED)) { - WARN_ON(1); /* did not uncache np on node removal */ - of_node_put(np); - phandle_cache[masked_handle] = NULL; - np = NULL; - } - } + if (phandle_cache[handle_hash] && + handle == phandle_cache[handle_hash]->phandle) + np = phandle_cache[handle_hash]; if (!np) { for_each_of_allnodes(np) if (np->phandle == handle && !of_node_check_flag(np, OF_DETACHED)) { - if (phandle_cache) { - /* will put when removed from cache */ - of_node_get(np); - phandle_cache[masked_handle] = np; - } + phandle_cache[handle_hash] = np; break; } } diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 49b16f76d78e..08fd823edac9 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -276,7 +276,7 @@ void __of_detach_node(struct device_node *np) of_node_set_flag(np, OF_DETACHED); /* race with of_find_node_by_phandle() prevented by devtree_lock */ - __of_free_phandle_cache_entry(np->phandle); + __of_phandle_cache_inv_entry(np->phandle); } /** diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 66294d29942a..207863c151a5 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -84,15 +84,11 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {} int of_resolve_phandles(struct device_node *tree); #endif -#if defined(CONFIG_OF_DYNAMIC) -void __of_free_phandle_cache_entry(phandle handle); -#endif +void __of_phandle_cache_inv_entry(phandle handle); #if defined(CONFIG_OF_OVERLAY) void of_overlay_mutex_lock(void); void of_overlay_mutex_unlock(void); -int of_free_phandle_cache(void); -void of_populate_phandle_cache(void); #else static inline void of_overlay_mutex_lock(void) {}; static inline void of_overlay_mutex_unlock(void) {}; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 9617b7df7c4d..c9219fddf44b 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -974,8 +974,6 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree, goto err_free_overlay_changeset; } - of_populate_phandle_cache(); - ret = __of_changeset_apply_notify(&ovcs->cset); if (ret) pr_err("overlay apply changeset entry notify error %d\n", ret); @@ -1218,17 +1216,8 @@ int of_overlay_remove(int *ovcs_id) list_del(&ovcs->ovcs_list); - /* - * Disable phandle cache. Avoids race condition that would arise - * from removing cache entry when the associated node is deleted. - */ - of_free_phandle_cache(); - ret_apply = 0; ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); - - of_populate_phandle_cache(); - if (ret) { if (ret_apply) devicetree_state_flags |= DTSF_REVERT_FAIL; |