From 0a65579cdd28bed3e84e1b4929c3080da4f06d79 Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:32 +0800 Subject: swiotlb: Refactor swiotlb init functions Add a new function, swiotlb_init_io_tlb_mem, for the io_tlb_mem struct initialization to make the code reusable. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Acked-by: Stefano Stabellini Tested-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index e50df8d8f87e..414db5fc8de9 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -168,9 +168,28 @@ void __init swiotlb_update_mem_attributes(void) memset(vaddr, 0, bytes); } -int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, + unsigned long nslabs, bool late_alloc) { + void *vaddr = phys_to_virt(start); unsigned long bytes = nslabs << IO_TLB_SHIFT, i; + + mem->nslabs = nslabs; + mem->start = start; + mem->end = mem->start + bytes; + mem->index = 0; + mem->late_alloc = late_alloc; + spin_lock_init(&mem->lock); + for (i = 0; i < mem->nslabs; i++) { + mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; + } + memset(vaddr, 0, bytes); +} + +int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +{ struct io_tlb_mem *mem; size_t alloc_size; @@ -186,16 +205,8 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) if (!mem) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - mem->nslabs = nslabs; - mem->start = __pa(tlb); - mem->end = mem->start + bytes; - mem->index = 0; - spin_lock_init(&mem->lock); - for (i = 0; i < mem->nslabs; i++) { - mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); - mem->slots[i].orig_addr = INVALID_PHYS_ADDR; - mem->slots[i].alloc_size = 0; - } + + swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); io_tlb_default_mem = mem; if (verbose) @@ -282,8 +293,8 @@ swiotlb_late_init_with_default_size(size_t default_size) int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { - unsigned long bytes = nslabs << IO_TLB_SHIFT, i; struct io_tlb_mem *mem; + unsigned long bytes = nslabs << IO_TLB_SHIFT; if (swiotlb_force == SWIOTLB_NO_FORCE) return 0; @@ -297,20 +308,9 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!mem) return -ENOMEM; - mem->nslabs = nslabs; - mem->start = virt_to_phys(tlb); - mem->end = mem->start + bytes; - mem->index = 0; - mem->late_alloc = 1; - spin_lock_init(&mem->lock); - for (i = 0; i < mem->nslabs; i++) { - mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); - mem->slots[i].orig_addr = INVALID_PHYS_ADDR; - mem->slots[i].alloc_size = 0; - } - + memset(mem, 0, sizeof(*mem)); set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); - memset(tlb, 0, bytes); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); io_tlb_default_mem = mem; swiotlb_print_info(); -- cgit v1.2.3 From 6e675a1c455ea7579c7eaf1a38fe64267039d6fe Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:33 +0800 Subject: swiotlb: Refactor swiotlb_create_debugfs Split the debugfs creation to make the code reusable for supporting different bounce buffer pools. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 414db5fc8de9..ae6a151d0a41 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -669,19 +669,26 @@ bool is_swiotlb_active(void) EXPORT_SYMBOL_GPL(is_swiotlb_active); #ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_dir; -static int __init swiotlb_create_debugfs(void) +static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem) { - struct io_tlb_mem *mem = io_tlb_default_mem; - - if (!mem) - return 0; - mem->debugfs = debugfs_create_dir("swiotlb", NULL); debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); +} + +static int __init swiotlb_create_default_debugfs(void) +{ + struct io_tlb_mem *mem = io_tlb_default_mem; + + debugfs_dir = debugfs_create_dir("swiotlb", NULL); + if (mem) { + mem->debugfs = debugfs_dir; + swiotlb_create_debugfs_files(mem); + } return 0; } -late_initcall(swiotlb_create_debugfs); +late_initcall(swiotlb_create_default_debugfs); #endif -- cgit v1.2.3 From 69031f500865ee3eee19566a1b9c40a189817eaa Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:34 +0800 Subject: swiotlb: Set dev->dma_io_tlb_mem to the swiotlb pool used Always have the pointer to the swiotlb pool used in struct device. This could help simplify the code for other pools. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Acked-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk --- drivers/base/core.c | 4 ++++ include/linux/device.h | 4 ++++ kernel/dma/swiotlb.c | 8 ++++---- 3 files changed, 12 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/drivers/base/core.c b/drivers/base/core.c index cadcade65825..ea5b85354526 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include /* for dma_default_coherent */ @@ -2846,6 +2847,9 @@ void device_initialize(struct device *dev) defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) dev->dma_coherent = dma_default_coherent; #endif +#ifdef CONFIG_SWIOTLB + dev->dma_io_tlb_mem = io_tlb_default_mem; +#endif } EXPORT_SYMBOL_GPL(device_initialize); diff --git a/include/linux/device.h b/include/linux/device.h index 59940f1744c1..2a22875238a6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -423,6 +423,7 @@ struct dev_links_info { * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations + * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. @@ -531,6 +532,9 @@ struct device { #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ +#endif +#ifdef CONFIG_SWIOTLB + struct io_tlb_mem *dma_io_tlb_mem; #endif /* arch specific additions */ struct dev_archdata archdata; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index ae6a151d0a41..33d413beddd4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -348,7 +348,7 @@ static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = mem->slots[index].orig_addr; size_t alloc_size = mem->slots[index].alloc_size; @@ -429,7 +429,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) static int find_slots(struct device *dev, phys_addr_t orig_addr, size_t alloc_size) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; @@ -506,7 +506,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int i; int index; @@ -557,7 +557,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, enum dma_data_direction dir, unsigned long attrs) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = hwdev->dma_io_tlb_mem; unsigned long flags; unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; -- cgit v1.2.3 From 7fd856aa7f4261ddac62ea59d8383fef22a0690e Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:35 +0800 Subject: swiotlb: Update is_swiotlb_buffer to add a struct device argument Update is_swiotlb_buffer to add a struct device argument. This will be useful later to allow for different pools. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Acked-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk --- drivers/iommu/dma-iommu.c | 12 ++++++------ drivers/xen/swiotlb-xen.c | 2 +- include/linux/swiotlb.h | 7 ++++--- kernel/dma/direct.c | 6 +++--- kernel/dma/direct.h | 6 +++--- 5 files changed, 17 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 98ba927aee1a..4e34e8b26579 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -506,7 +506,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, __iommu_dma_unmap(dev, dma_addr, size); - if (unlikely(is_swiotlb_buffer(phys))) + if (unlikely(is_swiotlb_buffer(dev, phys))) swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } @@ -577,7 +577,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, } iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); - if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys)) + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs); return iova; } @@ -783,7 +783,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_cpu(phys, size, dir); - if (is_swiotlb_buffer(phys)) + if (is_swiotlb_buffer(dev, phys)) swiotlb_sync_single_for_cpu(dev, phys, size, dir); } @@ -796,7 +796,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); - if (is_swiotlb_buffer(phys)) + if (is_swiotlb_buffer(dev, phys)) swiotlb_sync_single_for_device(dev, phys, size, dir); if (!dev_is_dma_coherent(dev)) @@ -817,7 +817,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); - if (is_swiotlb_buffer(sg_phys(sg))) + if (is_swiotlb_buffer(dev, sg_phys(sg))) swiotlb_sync_single_for_cpu(dev, sg_phys(sg), sg->length, dir); } @@ -834,7 +834,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, return; for_each_sg(sgl, sg, nelems, i) { - if (is_swiotlb_buffer(sg_phys(sg))) + if (is_swiotlb_buffer(dev, sg_phys(sg))) swiotlb_sync_single_for_device(dev, sg_phys(sg), sg->length, dir); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 24d11861ac7d..0c4fb34f11ab 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) * in our domain. Therefore _only_ check address within our domain. */ if (pfn_valid(PFN_DOWN(paddr))) - return is_swiotlb_buffer(paddr); + return is_swiotlb_buffer(dev, paddr); return 0; } diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 216854a5e513..d1f3d95881cd 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -2,6 +2,7 @@ #ifndef __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H +#include #include #include #include @@ -101,9 +102,9 @@ struct io_tlb_mem { }; extern struct io_tlb_mem *io_tlb_default_mem; -static inline bool is_swiotlb_buffer(phys_addr_t paddr) +static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; return mem && paddr >= mem->start && paddr < mem->end; } @@ -115,7 +116,7 @@ bool is_swiotlb_active(void); void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE -static inline bool is_swiotlb_buffer(phys_addr_t paddr) +static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { return false; } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f737e3347059..84c9feb5474a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -343,7 +343,7 @@ void dma_direct_sync_sg_for_device(struct device *dev, for_each_sg(sgl, sg, nents, i) { phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); - if (unlikely(is_swiotlb_buffer(paddr))) + if (unlikely(is_swiotlb_buffer(dev, paddr))) swiotlb_sync_single_for_device(dev, paddr, sg->length, dir); @@ -369,7 +369,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_cpu(paddr, sg->length, dir); - if (unlikely(is_swiotlb_buffer(paddr))) + if (unlikely(is_swiotlb_buffer(dev, paddr))) swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir); @@ -504,7 +504,7 @@ size_t dma_direct_max_mapping_size(struct device *dev) bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) { return !dev_is_dma_coherent(dev) || - is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); + is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); } /** diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index 50afc05b6f1d..13e9e7158d94 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -56,7 +56,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev, { phys_addr_t paddr = dma_to_phys(dev, addr); - if (unlikely(is_swiotlb_buffer(paddr))) + if (unlikely(is_swiotlb_buffer(dev, paddr))) swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) @@ -73,7 +73,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev, arch_sync_dma_for_cpu_all(); } - if (unlikely(is_swiotlb_buffer(paddr))) + if (unlikely(is_swiotlb_buffer(dev, paddr))) swiotlb_sync_single_for_cpu(dev, paddr, size, dir); if (dir == DMA_FROM_DEVICE) @@ -113,7 +113,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) dma_direct_sync_single_for_cpu(dev, addr, size, dir); - if (unlikely(is_swiotlb_buffer(phys))) + if (unlikely(is_swiotlb_buffer(dev, phys))) swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } #endif /* _KERNEL_DMA_DIRECT_H */ -- cgit v1.2.3 From 6f2beb268a5d35504a636c4a3b7aaa76ec32d96c Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:36 +0800 Subject: swiotlb: Update is_swiotlb_active to add a struct device argument Update is_swiotlb_active to add a struct device argument. This will be useful later to allow for different pools. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Acked-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk --- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 2 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +- drivers/pci/xen-pcifront.c | 2 +- include/linux/swiotlb.h | 4 ++-- kernel/dma/direct.c | 2 +- kernel/dma/swiotlb.c | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index ce6b664b10aa..89a894354263 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB - if (is_swiotlb_active()) { + if (is_swiotlb_active(obj->base.dev->dev)) { unsigned int max_segment; max_segment = swiotlb_max_segment(); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index f4c2e46b6fe1..2ca9d9a9e5d5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -276,7 +276,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) } #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - need_swiotlb = is_swiotlb_active(); + need_swiotlb = is_swiotlb_active(dev->dev); #endif ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index b7a8f3a1921f..0d56985bfe81 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev) spin_unlock(&pcifront_dev_lock); - if (!err && !is_swiotlb_active()) { + if (!err && !is_swiotlb_active(&pdev->xdev->dev)) { err = pci_xen_swiotlb_init_late(); if (err) dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n"); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index d1f3d95881cd..dd1c30a83058 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -112,7 +112,7 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); -bool is_swiotlb_active(void); +bool is_swiotlb_active(struct device *dev); void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE @@ -132,7 +132,7 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev) return SIZE_MAX; } -static inline bool is_swiotlb_active(void) +static inline bool is_swiotlb_active(struct device *dev) { return false; } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 84c9feb5474a..7a88c34d0867 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -495,7 +495,7 @@ int dma_direct_supported(struct device *dev, u64 mask) size_t dma_direct_max_mapping_size(struct device *dev) { /* If SWIOTLB is active, use its maximum mapping size */ - if (is_swiotlb_active() && + if (is_swiotlb_active(dev) && (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) return swiotlb_max_mapping_size(dev); return SIZE_MAX; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 33d413beddd4..d8677d6637dd 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -662,9 +662,9 @@ size_t swiotlb_max_mapping_size(struct device *dev) return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; } -bool is_swiotlb_active(void) +bool is_swiotlb_active(struct device *dev) { - return io_tlb_default_mem != NULL; + return dev->dma_io_tlb_mem != NULL; } EXPORT_SYMBOL_GPL(is_swiotlb_active); -- cgit v1.2.3 From 903cd0f315fe426c6a64c54ed389de0becb663dc Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Thu, 24 Jun 2021 23:55:20 +0800 Subject: swiotlb: Use is_swiotlb_force_bounce for swiotlb data bouncing Propagate the swiotlb_force into io_tlb_default_mem->force_bounce and use it to determine whether to bounce the data or not. This will be useful later to allow for different pools. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Acked-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk [v2: Includes Will's fix] --- drivers/xen/swiotlb-xen.c | 2 +- include/linux/swiotlb.h | 13 +++++++++++++ kernel/dma/direct.c | 2 +- kernel/dma/direct.h | 2 +- kernel/dma/swiotlb.c | 4 ++++ 5 files changed, 20 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 0c4fb34f11ab..785ec7e8be01 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -374,7 +374,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (dma_capable(dev, dev_addr, size, true) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, phys, dev_addr) && - swiotlb_force != SWIOTLB_FORCE) + !is_swiotlb_force_bounce(dev)) goto done; /* diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index dd1c30a83058..da348671b0d5 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -84,6 +84,7 @@ extern enum swiotlb_force swiotlb_force; * unmap calls. * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator + * @force_bounce: %true if swiotlb bouncing is forced */ struct io_tlb_mem { phys_addr_t start; @@ -94,6 +95,7 @@ struct io_tlb_mem { spinlock_t lock; struct dentry *debugfs; bool late_alloc; + bool force_bounce; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; @@ -109,6 +111,13 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) return mem && paddr >= mem->start && paddr < mem->end; } +static inline bool is_swiotlb_force_bounce(struct device *dev) +{ + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + + return mem && mem->force_bounce; +} + void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); @@ -120,6 +129,10 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { return false; } +static inline bool is_swiotlb_force_bounce(struct device *dev) +{ + return false; +} static inline void swiotlb_exit(void) { } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 7a88c34d0867..a92465b4eb12 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -496,7 +496,7 @@ size_t dma_direct_max_mapping_size(struct device *dev) { /* If SWIOTLB is active, use its maximum mapping size */ if (is_swiotlb_active(dev) && - (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) + (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) return swiotlb_max_mapping_size(dev); return SIZE_MAX; } diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index 13e9e7158d94..4632b0f4f72e 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -87,7 +87,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dma_addr = phys_to_dma(dev, phys); - if (unlikely(swiotlb_force == SWIOTLB_FORCE)) + if (is_swiotlb_force_bounce(dev)) return swiotlb_map(dev, phys, size, dir, attrs); if (unlikely(!dma_capable(dev, dma_addr, size, true))) { diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d8677d6637dd..04319dd22d28 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -179,6 +179,10 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->end = mem->start + bytes; mem->index = 0; mem->late_alloc = late_alloc; + + if (swiotlb_force == SWIOTLB_FORCE) + mem->force_bounce = true; + spin_lock_init(&mem->lock); for (i = 0; i < mem->nslabs; i++) { mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); -- cgit v1.2.3 From 36f7b2f3ca5f0bee00abf9ea52748ce0644a21c6 Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Thu, 24 Jun 2021 23:55:21 +0800 Subject: swiotlb: Move alloc_size to swiotlb_find_slots Rename find_slots to swiotlb_find_slots and move the maintenance of alloc_size to it for better code reusability later. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 04319dd22d28..0116a630dc13 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -430,8 +430,8 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) * Find a suitable number of IO TLB entries size that will fit this request and * allocate a buffer from that IO TLB pool. */ -static int find_slots(struct device *dev, phys_addr_t orig_addr, - size_t alloc_size) +static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, + size_t alloc_size) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); @@ -442,6 +442,7 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr, dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); unsigned int nslots = nr_slots(alloc_size), stride; unsigned int index, wrap, count = 0, i; + unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned long flags; BUG_ON(!nslots); @@ -486,8 +487,11 @@ not_found: return -1; found: - for (i = index; i < index + nslots; i++) + for (i = index; i < index + nslots; i++) { mem->slots[i].list = 0; + mem->slots[i].alloc_size = + alloc_size - (offset + ((i - index) << IO_TLB_SHIFT)); + } for (i = index - 1; io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; i--) @@ -528,7 +532,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, return (phys_addr_t)DMA_MAPPING_ERROR; } - index = find_slots(dev, orig_addr, alloc_size + offset); + index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset); if (index == -1) { if (!(attrs & DMA_ATTR_NO_WARN)) dev_warn_ratelimited(dev, @@ -542,11 +546,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, * This is needed when we sync the memory. Then we sync the buffer if * needed. */ - for (i = 0; i < nr_slots(alloc_size + offset); i++) { + for (i = 0; i < nr_slots(alloc_size + offset); i++) mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); - mem->slots[index + i].alloc_size = - alloc_size - (i << IO_TLB_SHIFT); - } tlb_addr = slot_addr(mem->start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) -- cgit v1.2.3 From 70347877231eeb505a27abe80af7ae3d3a281519 Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:39 +0800 Subject: swiotlb: Refactor swiotlb_tbl_unmap_single Add a new function, swiotlb_release_slots, to make the code reusable for supporting different bounce buffer pools. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 0116a630dc13..23b6df3a6ab7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -555,27 +555,15 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, return tlb_addr; } -/* - * tlb_addr is the physical address of the bounce buffer to unmap. - */ -void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t mapping_size, enum dma_data_direction dir, - unsigned long attrs) +static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) { - struct io_tlb_mem *mem = hwdev->dma_io_tlb_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned long flags; - unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); + unsigned int offset = swiotlb_align_offset(dev, tlb_addr); int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; int nslots = nr_slots(mem->slots[index].alloc_size + offset); int count, i; - /* - * First, sync the memory before unmapping the entry - */ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE); - /* * Return the buffer to the free list by setting the corresponding * entries to indicate the number of contiguous entries available. @@ -610,6 +598,23 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, spin_unlock_irqrestore(&mem->lock, flags); } +/* + * tlb_addr is the physical address of the bounce buffer to unmap. + */ +void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, + size_t mapping_size, enum dma_data_direction dir, + unsigned long attrs) +{ + /* + * First, sync the memory before unmapping the entry + */ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); + + swiotlb_release_slots(dev, tlb_addr); +} + void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { -- cgit v1.2.3 From f4111e39a52aa5d5136d890bbd1aa87c1c8fe3bc Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:40 +0800 Subject: swiotlb: Add restricted DMA alloc/free support Add the functions, swiotlb_{alloc,free} and is_swiotlb_for_alloc to support the memory allocation from restricted DMA pool. The restricted DMA pool is preferred if available. Note that since coherent allocation needs remapping, one must set up another device coherent pool by shared-dma-pool and use dma_alloc_from_dev_coherent instead for atomic coherent allocation. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Acked-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk --- include/linux/swiotlb.h | 26 ++++++++++++++++++++++++++ kernel/dma/direct.c | 49 +++++++++++++++++++++++++++++++++++++------------ kernel/dma/swiotlb.c | 38 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 99 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index da348671b0d5..3b9454d1e498 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -85,6 +85,7 @@ extern enum swiotlb_force swiotlb_force; * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator * @force_bounce: %true if swiotlb bouncing is forced + * @for_alloc: %true if the pool is used for memory allocation */ struct io_tlb_mem { phys_addr_t start; @@ -96,6 +97,7 @@ struct io_tlb_mem { struct dentry *debugfs; bool late_alloc; bool force_bounce; + bool for_alloc; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; @@ -158,4 +160,28 @@ static inline void swiotlb_adjust_size(unsigned long size) extern void swiotlb_print_info(void); extern void swiotlb_set_max_segment(unsigned int); +#ifdef CONFIG_DMA_RESTRICTED_POOL +struct page *swiotlb_alloc(struct device *dev, size_t size); +bool swiotlb_free(struct device *dev, struct page *page, size_t size); + +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return dev->dma_io_tlb_mem->for_alloc; +} +#else +static inline struct page *swiotlb_alloc(struct device *dev, size_t size) +{ + return NULL; +} +static inline bool swiotlb_free(struct device *dev, struct page *page, + size_t size) +{ + return false; +} +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return false; +} +#endif /* CONFIG_DMA_RESTRICTED_POOL */ + #endif /* __LINUX_SWIOTLB_H */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index a92465b4eb12..2de33e5d302b 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -75,6 +75,15 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); } +static void __dma_direct_free_pages(struct device *dev, struct page *page, + size_t size) +{ + if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && + swiotlb_free(dev, page, size)) + return; + dma_free_contiguous(dev, page, size); +} + static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp) { @@ -86,6 +95,16 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_limit); + if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && + is_swiotlb_for_alloc(dev)) { + page = swiotlb_alloc(dev, size); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + __dma_direct_free_pages(dev, page, size); + return NULL; + } + return page; + } + page = dma_alloc_contiguous(dev, size, gfp); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { dma_free_contiguous(dev, page, size); @@ -142,7 +161,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, gfp |= __GFP_NOWARN; if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && - !force_dma_unencrypted(dev)) { + !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); if (!page) return NULL; @@ -155,18 +174,23 @@ void *dma_direct_alloc(struct device *dev, size_t size, } if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && - !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !dev_is_dma_coherent(dev)) + !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) && + !is_swiotlb_for_alloc(dev)) return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); /* * Remapping or decrypting memory may block. If either is required and * we can't block, allocate the memory from the atomic pools. + * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must + * set up another device coherent pool by shared-dma-pool and use + * dma_alloc_from_dev_coherent instead. */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && !gfpflags_allow_blocking(gfp) && (force_dma_unencrypted(dev) || - (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev)))) + (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !dev_is_dma_coherent(dev))) && + !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); /* we always manually zero the memory once we are done */ @@ -237,7 +261,7 @@ out_encrypt_pages: return NULL; } out_free_pages: - dma_free_contiguous(dev, page, size); + __dma_direct_free_pages(dev, page, size); return NULL; } @@ -247,15 +271,15 @@ void dma_direct_free(struct device *dev, size_t size, unsigned int page_order = get_order(size); if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && - !force_dma_unencrypted(dev)) { + !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ dma_free_contiguous(dev, cpu_addr, size); return; } if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && - !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !dev_is_dma_coherent(dev)) { + !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) && + !is_swiotlb_for_alloc(dev)) { arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); return; } @@ -273,7 +297,7 @@ void dma_direct_free(struct device *dev, size_t size, else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) arch_dma_clear_uncached(cpu_addr, size); - dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); + __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); } struct page *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -283,7 +307,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, void *ret; if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && - force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && + !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); page = __dma_direct_alloc_pages(dev, size, gfp); @@ -310,7 +335,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); return page; out_free_pages: - dma_free_contiguous(dev, page, size); + __dma_direct_free_pages(dev, page, size); return NULL; } @@ -329,7 +354,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, if (force_dma_unencrypted(dev)) set_memory_encrypted((unsigned long)vaddr, 1 << page_order); - dma_free_contiguous(dev, page, size); + __dma_direct_free_pages(dev, page, size); } #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 23b6df3a6ab7..44fc3d10f017 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -462,8 +462,9 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); do { - if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != - (orig_addr & iotlb_align_mask)) { + if (orig_addr && + (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != + (orig_addr & iotlb_align_mask)) { index = wrap_index(mem, index + 1); continue; } @@ -702,3 +703,36 @@ static int __init swiotlb_create_default_debugfs(void) late_initcall(swiotlb_create_default_debugfs); #endif + +#ifdef CONFIG_DMA_RESTRICTED_POOL +struct page *swiotlb_alloc(struct device *dev, size_t size) +{ + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + phys_addr_t tlb_addr; + int index; + + if (!mem) + return NULL; + + index = swiotlb_find_slots(dev, 0, size); + if (index == -1) + return NULL; + + tlb_addr = slot_addr(mem->start, index); + + return pfn_to_page(PFN_DOWN(tlb_addr)); +} + +bool swiotlb_free(struct device *dev, struct page *page, size_t size) +{ + phys_addr_t tlb_addr = page_to_phys(page); + + if (!is_swiotlb_buffer(dev, tlb_addr)) + return false; + + swiotlb_release_slots(dev, tlb_addr); + + return true; +} + +#endif /* CONFIG_DMA_RESTRICTED_POOL */ -- cgit v1.2.3 From 0b84e4f8b793eb4045fd64f6f514165a7974cd16 Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Sat, 19 Jun 2021 11:40:41 +0800 Subject: swiotlb: Add restricted DMA pool initialization Add the initialization function to create restricted DMA pools from matching reserved-memory nodes. Regardless of swiotlb setting, the restricted DMA pool is preferred if available. The restricted DMA pools provide a basic level of protection against the DMA overwriting buffer contents at unexpected times. However, to protect against general data leakage and system memory corruption, the system needs to provide a way to lock down the memory access, e.g., MPU. Signed-off-by: Claire Chang Reviewed-by: Christoph Hellwig Tested-by: Stefano Stabellini Tested-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- include/linux/swiotlb.h | 3 +- kernel/dma/Kconfig | 14 +++++++++ kernel/dma/swiotlb.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 3b9454d1e498..39284ff2a6cd 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -73,7 +73,8 @@ extern enum swiotlb_force swiotlb_force; * range check to see if the memory was in fact allocated by this * API. * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and - * @end. This is command line adjustable via setup_io_tlb_npages. + * @end. For default swiotlb, this is command line adjustable via + * setup_io_tlb_npages. * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 77b405508743..3e961dc39634 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -80,6 +80,20 @@ config SWIOTLB bool select NEED_DMA_MAP_STATE +config DMA_RESTRICTED_POOL + bool "DMA Restricted Pool" + depends on OF && OF_RESERVED_MEM + select SWIOTLB + help + This enables support for restricted DMA pools which provide a level of + DMA memory protection on systems with limited hardware protection + capabilities, such as those lacking an IOMMU. + + For more information see + + and . + If unsure, say "n". + # # Should be selected if we can mmap non-coherent mappings to userspace. # The only thing that is really required is a way to set an uncached bit diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 44fc3d10f017..0ffbaae9fba2 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -39,6 +39,13 @@ #ifdef CONFIG_DEBUG_FS #include #endif +#ifdef CONFIG_DMA_RESTRICTED_POOL +#include +#include +#include +#include +#include +#endif #include #include @@ -735,4 +742,73 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size) return true; } +static int rmem_swiotlb_device_init(struct reserved_mem *rmem, + struct device *dev) +{ + struct io_tlb_mem *mem = rmem->priv; + unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; + + /* + * Since multiple devices can share the same pool, the private data, + * io_tlb_mem struct, will be initialized by the first device attached + * to it. + */ + if (!mem) { + mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL); + if (!mem) + return -ENOMEM; + + set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), + rmem->size >> PAGE_SHIFT); + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); + mem->force_bounce = true; + mem->for_alloc = true; + + rmem->priv = mem; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + mem->debugfs = + debugfs_create_dir(rmem->name, debugfs_dir); + swiotlb_create_debugfs_files(mem); + } + } + + dev->dma_io_tlb_mem = mem; + + return 0; +} + +static void rmem_swiotlb_device_release(struct reserved_mem *rmem, + struct device *dev) +{ + dev->dma_io_tlb_mem = io_tlb_default_mem; +} + +static const struct reserved_mem_ops rmem_swiotlb_ops = { + .device_init = rmem_swiotlb_device_init, + .device_release = rmem_swiotlb_device_release, +}; + +static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) +{ + unsigned long node = rmem->fdt_node; + + if (of_get_flat_dt_prop(node, "reusable", NULL) || + of_get_flat_dt_prop(node, "linux,cma-default", NULL) || + of_get_flat_dt_prop(node, "linux,dma-default", NULL) || + of_get_flat_dt_prop(node, "no-map", NULL)) + return -EINVAL; + + if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { + pr_err("Restricted DMA pool must be accessible within the linear mapping."); + return -EINVAL; + } + + rmem->ops = &rmem_swiotlb_ops; + pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return 0; +} + +RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); #endif /* CONFIG_DMA_RESTRICTED_POOL */ -- cgit v1.2.3 From 09a4a79d42ced8ca7fc250b05e45ac1cb23a9c52 Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Thu, 1 Jul 2021 11:31:30 +0800 Subject: swiotlb: fix implicit debugfs declarations Factor out the debugfs bits from rmem_swiotlb_device_init() into a separate rmem_swiotlb_debugfs_init() to fix the implicit debugfs declarations. Fixes: 461021875c50 ("swiotlb: Add restricted DMA pool initialization") Reported-by: kernel test robot Signed-off-by: Claire Chang Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 0ffbaae9fba2..b7f76bca89bf 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -712,6 +712,21 @@ late_initcall(swiotlb_create_default_debugfs); #endif #ifdef CONFIG_DMA_RESTRICTED_POOL + +#ifdef CONFIG_DEBUG_FS +static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem) +{ + struct io_tlb_mem *mem = rmem->priv; + + mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir); + swiotlb_create_debugfs_files(mem); +} +#else +static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem) +{ +} +#endif + struct page *swiotlb_alloc(struct device *dev, size_t size) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; @@ -766,11 +781,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, rmem->priv = mem; - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - mem->debugfs = - debugfs_create_dir(rmem->name, debugfs_dir); - swiotlb_create_debugfs_files(mem); - } + rmem_swiotlb_debugfs_init(rmem); } dev->dma_io_tlb_mem = mem; -- cgit v1.2.3 From 868c9ddc182bc6728bb380cbfb3170734f72c599 Mon Sep 17 00:00:00 2001 From: Dominique Martinet Date: Wed, 7 Jul 2021 14:12:54 +0900 Subject: swiotlb: add overflow checks to swiotlb_bounce This is a follow-up on 5f89468e2f06 ("swiotlb: manipulate orig_addr when tlb_addr has offset") which fixed unaligned dma mappings, making sure the following overflows are caught: - offset of the start of the slot within the device bigger than requested address' offset, in other words if the base address given in swiotlb_tbl_map_single to create the mapping (orig_addr) was after the requested address for the sync (tlb_offset) in the same block: |------------------------------------------| block <----------------------------> mapped part of the block ^ orig_addr ^ invalid tlb_addr for sync - if the resulting offset was bigger than the allocation size this one could happen if the mapping was not until the end. e.g. |------------------------------------------| block <---------------------> mapped part of the block ^ ^ orig_addr invalid tlb_addr Both should never happen so print a warning and bail out without trying to adjust the sizes/offsets: the first one could try to sync from orig_addr to whatever is left of the requested size, but the later really has nothing to sync there... Signed-off-by: Dominique Martinet Cc: Konrad Rzeszutek Wilk Reviewed-by: Bumyong Lee Cc: Christoph Hellwig Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index b7f76bca89bf..f1a9ae7fad8f 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -365,13 +365,27 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size size_t alloc_size = mem->slots[index].alloc_size; unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); - unsigned int tlb_offset; + unsigned int tlb_offset, orig_addr_offset; if (orig_addr == INVALID_PHYS_ADDR) return; - tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) - - swiotlb_align_offset(dev, orig_addr); + tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); + orig_addr_offset = swiotlb_align_offset(dev, orig_addr); + if (tlb_offset < orig_addr_offset) { + dev_WARN_ONCE(dev, 1, + "Access before mapping start detected. orig offset %u, requested offset %u.\n", + orig_addr_offset, tlb_offset); + return; + } + + tlb_offset -= orig_addr_offset; + if (tlb_offset > alloc_size) { + dev_WARN_ONCE(dev, 1, + "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", + alloc_size, size, tlb_offset); + return; + } orig_addr += tlb_offset; alloc_size -= tlb_offset; -- cgit v1.2.3 From a7a73697360ea81244eea550138b8f614348860c Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:48 +0200 Subject: kcsan: Remove CONFIG_KCSAN_DEBUG By this point CONFIG_KCSAN_DEBUG is pretty useless, as the system just isn't usable with it due to spamming console (I imagine a randconfig test robot will run into this sooner or later). Remove it. Back in 2019 I used it occasionally to record traces of watchpoints and verify the encoding is correct, but these days we have proper tests. If something similar is needed in future, just add it back ad-hoc. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 9 --------- lib/Kconfig.kcsan | 3 --- 2 files changed, 12 deletions(-) (limited to 'kernel') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 26709ea65c71..d92977ede7e1 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -479,15 +479,6 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) break; /* ignore; we do not diff the values */ } - if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) { - kcsan_disable_current(); - pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n", - is_write ? "write" : "read", size, ptr, - watchpoint_slot((unsigned long)ptr), - encode_watchpoint((unsigned long)ptr, size, is_write)); - kcsan_enable_current(); - } - /* * Delay this thread, to increase probability of observing a racy * conflicting access. diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 6152fbd5cbb4..5304f211f81f 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -62,9 +62,6 @@ config KCSAN_VERBOSE generated from any one of them, system stability may suffer due to deadlocks or recursion. If in doubt, say N. -config KCSAN_DEBUG - bool "Debugging of KCSAN internals" - config KCSAN_SELFTEST bool "Perform short selftests on boot" default y -- cgit v1.2.3 From 08cac6049412061e571fadadc3e23464dc46d0f2 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:50 +0200 Subject: kcsan: Reduce get_ctx() uses in kcsan_found_watchpoint() There are a number get_ctx() calls that are close to each other, which results in poor codegen (repeated preempt_count loads). Specifically in kcsan_found_watchpoint() (even though it's a slow-path) it is beneficial to keep the race-window small until the watchpoint has actually been consumed to avoid missed opportunities to report a race. Let's clean it up a bit before we add more code in kcsan_found_watchpoint(). Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index d92977ede7e1..906100923b88 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -301,9 +301,9 @@ static inline void reset_kcsan_skip(void) this_cpu_write(kcsan_skip, skip_count); } -static __always_inline bool kcsan_is_enabled(void) +static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx) { - return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0; + return READ_ONCE(kcsan_enabled) && !ctx->disable_count; } /* Introduce delay depending on context and configuration. */ @@ -353,10 +353,17 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_t *watchpoint, long encoded_watchpoint) { + struct kcsan_ctx *ctx = get_ctx(); unsigned long flags; bool consumed; - if (!kcsan_is_enabled()) + /* + * We know a watchpoint exists. Let's try to keep the race-window + * between here and finally consuming the watchpoint below as small as + * possible -- avoid unneccessarily complex code until consumed. + */ + + if (!kcsan_is_enabled(ctx)) return; /* @@ -364,14 +371,12 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, * reporting a race where e.g. the writer set up the watchpoint, but the * reader has access_mask!=0, we have to ignore the found watchpoint. */ - if (get_ctx()->access_mask != 0) + if (ctx->access_mask) return; /* - * Consume the watchpoint as soon as possible, to minimize the chances - * of !consumed. Consuming the watchpoint must always be guarded by - * kcsan_is_enabled() check, as otherwise we might erroneously - * triggering reports when disabled. + * Consuming the watchpoint must be guarded by kcsan_is_enabled() to + * avoid erroneously triggering reports if the context is disabled. */ consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint); @@ -409,6 +414,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) unsigned long access_mask; enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; unsigned long ua_flags = user_access_save(); + struct kcsan_ctx *ctx = get_ctx(); unsigned long irq_flags = 0; /* @@ -417,7 +423,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) */ reset_kcsan_skip(); - if (!kcsan_is_enabled()) + if (!kcsan_is_enabled(ctx)) goto out; /* @@ -489,7 +495,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * Re-read value, and check if it is as expected; if not, we infer a * racy access. */ - access_mask = get_ctx()->access_mask; + access_mask = ctx->access_mask; new = 0; switch (size) { case 1: -- cgit v1.2.3 From 49f72d5358dd3c0d28bcd2232c513000b15480f0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:51 +0200 Subject: kcsan: Rework atomic.h into permissive.h Rework atomic.h into permissive.h to better reflect its purpose, and introduce kcsan_ignore_address() and kcsan_ignore_data_race(). Introduce CONFIG_KCSAN_PERMISSIVE and update the stub functions in preparation for subsequent changes. As before, developers who choose to use KCSAN in "strict" mode will see all data races and are not affected. Furthermore, by relying on the value-change filter logic for kcsan_ignore_data_race(), even if the permissive rules are enabled, the opt-outs in report.c:skip_report() override them (such as for RCU-related functions by default). The option CONFIG_KCSAN_PERMISSIVE is disabled by default, so that the documented default behaviour of KCSAN does not change. Instead, like CONFIG_KCSAN_IGNORE_ATOMICS, the option needs to be explicitly opted in. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 8 +++++++ kernel/kcsan/atomic.h | 23 ------------------- kernel/kcsan/core.c | 33 +++++++++++++++++++-------- kernel/kcsan/permissive.h | 47 +++++++++++++++++++++++++++++++++++++++ lib/Kconfig.kcsan | 10 +++++++++ 5 files changed, 89 insertions(+), 32 deletions(-) delete mode 100644 kernel/kcsan/atomic.h create mode 100644 kernel/kcsan/permissive.h (limited to 'kernel') diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index 69dc9c502ccc..7db43c7c09b8 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -127,6 +127,14 @@ Kconfig options: causes KCSAN to not report data races due to conflicts where the only plain accesses are aligned writes up to word size. +* ``CONFIG_KCSAN_PERMISSIVE``: Enable additional permissive rules to ignore + certain classes of common data races. Unlike the above, the rules are more + complex involving value-change patterns, access type, and address. This + option depends on ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=y``. For details + please see the ``kernel/kcsan/permissive.h``. Testers and maintainers that + only focus on reports from specific subsystems and not the whole kernel are + recommended to disable this option. + To use the strictest possible rules, select ``CONFIG_KCSAN_STRICT=y``, which configures KCSAN to follow the Linux-kernel memory consistency model (LKMM) as closely as possible. diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h deleted file mode 100644 index 530ae1bda8e7..000000000000 --- a/kernel/kcsan/atomic.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Rules for implicitly atomic memory accesses. - * - * Copyright (C) 2019, Google LLC. - */ - -#ifndef _KERNEL_KCSAN_ATOMIC_H -#define _KERNEL_KCSAN_ATOMIC_H - -#include - -/* - * Special rules for certain memory where concurrent conflicting accesses are - * common, however, the current convention is to not mark them; returns true if - * access to @ptr should be considered atomic. Called from slow-path. - */ -static bool kcsan_is_atomic_special(const volatile void *ptr) -{ - return false; -} - -#endif /* _KERNEL_KCSAN_ATOMIC_H */ diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 906100923b88..439edb9dcbb1 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -20,9 +20,9 @@ #include #include -#include "atomic.h" #include "encoding.h" #include "kcsan.h" +#include "permissive.h" static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; @@ -353,6 +353,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_t *watchpoint, long encoded_watchpoint) { + const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; struct kcsan_ctx *ctx = get_ctx(); unsigned long flags; bool consumed; @@ -374,6 +375,16 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (ctx->access_mask) return; + /* + * If the other thread does not want to ignore the access, and there was + * a value change as a result of this thread's operation, we will still + * generate a report of unknown origin. + * + * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter. + */ + if (!is_assert && kcsan_ignore_address(ptr)) + return; + /* * Consuming the watchpoint must be guarded by kcsan_is_enabled() to * avoid erroneously triggering reports if the context is disabled. @@ -396,7 +407,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]); } - if ((type & KCSAN_ACCESS_ASSERT) != 0) + if (is_assert) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); else atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]); @@ -427,12 +438,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) goto out; /* - * Special atomic rules: unlikely to be true, so we check them here in - * the slow-path, and not in the fast-path in is_atomic(). Call after - * kcsan_is_enabled(), as we may access memory that is not yet - * initialized during early boot. + * Check to-ignore addresses after kcsan_is_enabled(), as we may access + * memory that is not yet initialized during early boot. */ - if (!is_assert && kcsan_is_atomic_special(ptr)) + if (!is_assert && kcsan_ignore_address(ptr)) goto out; if (!check_encodable((unsigned long)ptr, size)) { @@ -518,8 +527,14 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (access_mask) diff &= access_mask; - /* Were we able to observe a value-change? */ - if (diff != 0) + /* + * Check if we observed a value change. + * + * Also check if the data race should be ignored (the rules depend on + * non-zero diff); if it is to be ignored, the below rules for + * KCSAN_VALUE_CHANGE_MAYBE apply. + */ + if (diff && !kcsan_ignore_data_race(size, type, old, new, diff)) value_change = KCSAN_VALUE_CHANGE_TRUE; /* Check if this access raced with another. */ diff --git a/kernel/kcsan/permissive.h b/kernel/kcsan/permissive.h new file mode 100644 index 000000000000..f90e30800c11 --- /dev/null +++ b/kernel/kcsan/permissive.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Special rules for ignoring entire classes of data-racy memory accesses. None + * of the rules here imply that such data races are generally safe! + * + * All rules in this file can be configured via CONFIG_KCSAN_PERMISSIVE. Keep + * them separate from core code to make it easier to audit. + * + * Copyright (C) 2019, Google LLC. + */ + +#ifndef _KERNEL_KCSAN_PERMISSIVE_H +#define _KERNEL_KCSAN_PERMISSIVE_H + +#include + +/* + * Access ignore rules based on address. + */ +static __always_inline bool kcsan_ignore_address(const volatile void *ptr) +{ + if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + return false; + + return false; +} + +/* + * Data race ignore rules based on access type and value change patterns. + */ +static bool +kcsan_ignore_data_race(size_t size, int type, u64 old, u64 new, u64 diff) +{ + if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + return false; + + /* + * Rules here are only for plain read accesses, so that we still report + * data races between plain read-write accesses. + */ + if (type || size > sizeof(long)) + return false; + + return false; +} + +#endif /* _KERNEL_KCSAN_PERMISSIVE_H */ diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index c76fbb3ee09e..26f03c754d39 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -231,4 +231,14 @@ config KCSAN_IGNORE_ATOMICS due to two conflicting plain writes will be reported (aligned and unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n). +config KCSAN_PERMISSIVE + bool "Enable all additional permissive rules" + depends on KCSAN_REPORT_VALUE_CHANGE_ONLY + help + Enable additional permissive rules to ignore certain classes of data + races (also see kernel/kcsan/permissive.h). None of the permissive + rules imply that such data races are generally safe, but can be used + to further reduce reported data races due to data-racy patterns + common across the kernel. + endif # KCSAN -- cgit v1.2.3 From 9c827cd1fcdf54bb50f874f91af0d5de2aceb035 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:52 +0200 Subject: kcsan: Print if strict or non-strict during init Show a brief message if KCSAN is strict or non-strict, and if non-strict also say that CONFIG_KCSAN_STRICT=y can be used to see all data races. This is to hint to users of KCSAN who blindly use the default config that their configuration might miss data races of interest. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 439edb9dcbb1..76e67d1e02d4 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -656,6 +656,15 @@ void __init kcsan_init(void) pr_info("enabled early\n"); WRITE_ONCE(kcsan_enabled, true); } + + if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) || + IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) || + IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) || + IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { + pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n"); + } else { + pr_info("strict mode configured\n"); + } } /* === Exported interface =================================================== */ -- cgit v1.2.3 From d8fd74d35a8d3c602232e3238e916bda9d03d520 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:53 +0200 Subject: kcsan: permissive: Ignore data-racy 1-bit value changes Add rules to ignore data-racy reads with only 1-bit value changes. Details about the rules are captured in comments in kernel/kcsan/permissive.h. More background follows. While investigating a number of data races, we've encountered data-racy accesses on flags variables to be very common. The typical pattern is a reader masking all but one bit, and/or the writer setting/clearing only 1 bit (current->flags being a frequently encountered case; more examples in mm/sl[au]b.c, which disable KCSAN for this reason). Since these types of data-racy accesses are common (with the assumption they are intentional and hard to miscompile) having the option (with CONFIG_KCSAN_PERMISSIVE=y) to filter them will avoid forcing everyone to mark them, and deliberately left to preference at this time. One important motivation for having this option built-in is to move closer to being able to enable KCSAN on CI systems or for testers wishing to test the whole kernel, while more easily filtering less interesting data races with higher probability. For the implementation, we considered several alternatives, but had one major requirement: that the rules be kept together with the Linux-kernel tree. Adding them to the compiler would preclude us from making changes quickly; if the rules require tweaks, having them part of the compiler requires waiting another ~1 year for the next release -- that's not realistic. We are left with the following options: 1. Maintain compiler plugins as part of the kernel-tree that removes instrumentation for some accesses (e.g. plain-& with 1-bit mask). The analysis would be reader-side focused, as no assumption can be made about racing writers. Because it seems unrealistic to maintain 2 plugins, one for LLVM and GCC, we would likely pick LLVM. Furthermore, no kernel infrastructure exists to maintain LLVM plugins, and the build-system implications and maintenance overheads do not look great (historically, plugins written against old LLVM APIs are not guaranteed to work with newer LLVM APIs). 2. Find a set of rules that can be expressed in terms of observed value changes, and make it part of the KCSAN runtime. The analysis is writer-side focused, given we rely on observed value changes. The approach taken here is (2). While a complete approach requires both (1) and (2), experiments show that the majority of data races involving trivial bit operations on flags variables can be removed with (2) alone. It goes without saying that the filtering of data races using (1) or (2) does _not_ guarantee they are safe! Therefore, limiting ourselves to (2) for now is the conservative choice for setups that wish to enable CONFIG_KCSAN_PERMISSIVE=y. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/kcsan_test.c | 32 +++++++++++++++++++++++++++++++ kernel/kcsan/permissive.h | 49 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 80 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 8bcffbdef3d3..dc55fd5a36fc 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -414,6 +414,14 @@ static noinline void test_kernel_atomic_builtins(void) __atomic_load_n(&test_var, __ATOMIC_RELAXED); } +static noinline void test_kernel_xor_1bit(void) +{ + /* Do not report data races between the read-writes. */ + kcsan_nestable_atomic_begin(); + test_var ^= 0x10000; + kcsan_nestable_atomic_end(); +} + /* ===== Test cases ===== */ /* Simple test with normal data race. */ @@ -952,6 +960,29 @@ static void test_atomic_builtins(struct kunit *test) KUNIT_EXPECT_FALSE(test, match_never); } +__no_kcsan +static void test_1bit_value_change(struct kunit *test) +{ + const struct expect_report expect = { + .access = { + { test_kernel_read, &test_var, sizeof(test_var), 0 }, + { test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, + }, + }; + bool match = false; + + begin_test_checks(test_kernel_read, test_kernel_xor_1bit); + do { + match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) + ? report_available() + : report_matches(&expect); + } while (!end_test_checks(match)); + if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + KUNIT_EXPECT_FALSE(test, match); + else + KUNIT_EXPECT_TRUE(test, match); +} + /* * Generate thread counts for all test cases. Values generated are in interval * [2, 5] followed by exponentially increasing thread counts from 8 to 32. @@ -1024,6 +1055,7 @@ static struct kunit_case kcsan_test_cases[] = { KCSAN_KUNIT_CASE(test_jiffies_noreport), KCSAN_KUNIT_CASE(test_seqlock_noreport), KCSAN_KUNIT_CASE(test_atomic_builtins), + KCSAN_KUNIT_CASE(test_1bit_value_change), {}, }; diff --git a/kernel/kcsan/permissive.h b/kernel/kcsan/permissive.h index f90e30800c11..2c01fe4a59ee 100644 --- a/kernel/kcsan/permissive.h +++ b/kernel/kcsan/permissive.h @@ -12,6 +12,8 @@ #ifndef _KERNEL_KCSAN_PERMISSIVE_H #define _KERNEL_KCSAN_PERMISSIVE_H +#include +#include #include /* @@ -22,7 +24,11 @@ static __always_inline bool kcsan_ignore_address(const volatile void *ptr) if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) return false; - return false; + /* + * Data-racy bitops on current->flags are too common, ignore completely + * for now. + */ + return ptr == ¤t->flags; } /* @@ -41,6 +47,47 @@ kcsan_ignore_data_race(size_t size, int type, u64 old, u64 new, u64 diff) if (type || size > sizeof(long)) return false; + /* + * A common pattern is checking/setting just 1 bit in a variable; for + * example: + * + * if (flags & SOME_FLAG) { ... } + * + * and elsewhere flags is updated concurrently: + * + * flags |= SOME_OTHER_FLAG; // just 1 bit + * + * While it is still recommended that such accesses be marked + * appropriately, in many cases these types of data races are so common + * that marking them all is often unrealistic and left to maintainer + * preference. + * + * The assumption in all cases is that with all known compiler + * optimizations (including those that tear accesses), because no more + * than 1 bit changed, the plain accesses are safe despite the presence + * of data races. + * + * The rules here will ignore the data races if we observe no more than + * 1 bit changed. + * + * Of course many operations can effecively change just 1 bit, but the + * general assuption that data races involving 1-bit changes can be + * tolerated still applies. + * + * And in case a true bug is missed, the bug likely manifests as a + * reportable data race elsewhere. + */ + if (hweight64(diff) == 1) { + /* + * Exception: Report data races where the values look like + * ordinary booleans (one of them was 0 and the 0th bit was + * changed) More often than not, they come with interesting + * memory ordering requirements, so let's report them. + */ + if (!((!old || !new) && diff == 1)) + return true; + } + return false; } -- cgit v1.2.3 From 463e862ac63ef27fca423782536f6465abc3f180 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Jul 2021 14:38:24 +0100 Subject: swiotlb: Convert io_default_tlb_mem to static allocation Since commit 69031f500865 ("swiotlb: Set dev->dma_io_tlb_mem to the swiotlb pool used"), 'struct device' may hold a copy of the global 'io_default_tlb_mem' pointer if the device is using swiotlb for DMA. A subsequent call to swiotlb_exit() will therefore leave dangling pointers behind in these device structures, resulting in KASAN splats such as: | BUG: KASAN: use-after-free in __iommu_dma_unmap_swiotlb+0x64/0xb0 | Read of size 8 at addr ffff8881d7830000 by task swapper/0/0 | | CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.12.0-rc3-debug #1 | Hardware name: HP HP Desktop M01-F1xxx/87D6, BIOS F.12 12/17/2020 | Call Trace: | | dump_stack+0x9c/0xcf | print_address_description.constprop.0+0x18/0x130 | kasan_report.cold+0x7f/0x111 | __iommu_dma_unmap_swiotlb+0x64/0xb0 | nvme_pci_complete_rq+0x73/0x130 | blk_complete_reqs+0x6f/0x80 | __do_softirq+0xfc/0x3be Convert 'io_default_tlb_mem' to a static structure, so that the per-device pointers remain valid after swiotlb_exit() has been invoked. All users are updated to reference the static structure directly, using the 'nslabs' field to determine whether swiotlb has been initialised. The 'slots' array is still allocated dynamically and referenced via a pointer rather than a flexible array member. Cc: Claire Chang Cc: Christoph Hellwig Cc: Robin Murphy Cc: Konrad Rzeszutek Wilk Fixes: 69031f500865 ("swiotlb: Set dev->dma_io_tlb_mem to the swiotlb pool used") Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Tested-by: Claire Chang Reviewed-by: Christoph Hellwig Signed-off-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- drivers/base/core.c | 2 +- drivers/xen/swiotlb-xen.c | 4 +-- include/linux/swiotlb.h | 4 +-- kernel/dma/swiotlb.c | 66 ++++++++++++++++++++++++++--------------------- 4 files changed, 41 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/drivers/base/core.c b/drivers/base/core.c index ea5b85354526..b49824001cfa 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2848,7 +2848,7 @@ void device_initialize(struct device *dev) dev->dma_coherent = dma_default_coherent; #endif #ifdef CONFIG_SWIOTLB - dev->dma_io_tlb_mem = io_tlb_default_mem; + dev->dma_io_tlb_mem = &io_tlb_default_mem; #endif } EXPORT_SYMBOL_GPL(device_initialize); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 785ec7e8be01..f06d9b4f1e0f 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -164,7 +164,7 @@ int __ref xen_swiotlb_init(void) int rc = -ENOMEM; char *start; - if (io_tlb_default_mem != NULL) { + if (io_tlb_default_mem.nslabs) { pr_warn("swiotlb buffer already initialized\n"); return -EEXIST; } @@ -547,7 +547,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, static int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask; + return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask; } const struct dma_map_ops xen_swiotlb_dma_ops = { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 39284ff2a6cd..b0cb2a9973f4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -103,9 +103,9 @@ struct io_tlb_mem { phys_addr_t orig_addr; size_t alloc_size; unsigned int list; - } slots[]; + } *slots; }; -extern struct io_tlb_mem *io_tlb_default_mem; +extern struct io_tlb_mem io_tlb_default_mem; static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index f1a9ae7fad8f..7948f274f9bb 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -70,7 +70,7 @@ enum swiotlb_force swiotlb_force; -struct io_tlb_mem *io_tlb_default_mem; +struct io_tlb_mem io_tlb_default_mem; /* * Max segment that we can provide which (if pages are contingous) will @@ -101,7 +101,7 @@ early_param("swiotlb", setup_io_tlb_npages); unsigned int swiotlb_max_segment(void) { - return io_tlb_default_mem ? max_segment : 0; + return io_tlb_default_mem.nslabs ? max_segment : 0; } EXPORT_SYMBOL_GPL(swiotlb_max_segment); @@ -134,9 +134,9 @@ void __init swiotlb_adjust_size(unsigned long size) void swiotlb_print_info(void) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = &io_tlb_default_mem; - if (!mem) { + if (!mem->nslabs) { pr_warn("No low mem\n"); return; } @@ -163,11 +163,11 @@ static inline unsigned long nr_slots(u64 val) */ void __init swiotlb_update_mem_attributes(void) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = &io_tlb_default_mem; void *vaddr; unsigned long bytes; - if (!mem || mem->late_alloc) + if (!mem->nslabs || mem->late_alloc) return; vaddr = phys_to_virt(mem->start); bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); @@ -201,25 +201,24 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { - struct io_tlb_mem *mem; + struct io_tlb_mem *mem = &io_tlb_default_mem; size_t alloc_size; if (swiotlb_force == SWIOTLB_NO_FORCE) return 0; /* protect against double initialization */ - if (WARN_ON_ONCE(io_tlb_default_mem)) + if (WARN_ON_ONCE(mem->nslabs)) return -ENOMEM; - alloc_size = PAGE_ALIGN(struct_size(mem, slots, nslabs)); - mem = memblock_alloc(alloc_size, PAGE_SIZE); - if (!mem) + alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); + mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem->slots) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); - io_tlb_default_mem = mem; if (verbose) swiotlb_print_info(); swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); @@ -304,26 +303,24 @@ swiotlb_late_init_with_default_size(size_t default_size) int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { - struct io_tlb_mem *mem; + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long bytes = nslabs << IO_TLB_SHIFT; if (swiotlb_force == SWIOTLB_NO_FORCE) return 0; /* protect against double initialization */ - if (WARN_ON_ONCE(io_tlb_default_mem)) + if (WARN_ON_ONCE(mem->nslabs)) return -ENOMEM; - mem = (void *)__get_free_pages(GFP_KERNEL, - get_order(struct_size(mem, slots, nslabs))); - if (!mem) + mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(array_size(sizeof(*mem->slots), nslabs))); + if (!mem->slots) return -ENOMEM; - memset(mem, 0, sizeof(*mem)); set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); - io_tlb_default_mem = mem; swiotlb_print_info(); swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; @@ -331,18 +328,18 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) void __init swiotlb_exit(void) { - struct io_tlb_mem *mem = io_tlb_default_mem; size_t size; + struct io_tlb_mem *mem = &io_tlb_default_mem; - if (!mem) + if (!mem->nslabs) return; - size = struct_size(mem, slots, mem->nslabs); + size = array_size(sizeof(*mem->slots), mem->nslabs); if (mem->late_alloc) - free_pages((unsigned long)mem, get_order(size)); + free_pages((unsigned long)mem->slots, get_order(size)); else - memblock_free_late(__pa(mem), PAGE_ALIGN(size)); - io_tlb_default_mem = NULL; + memblock_free_late(__pa(mem->slots), PAGE_ALIGN(size)); + memset(mem, 0, sizeof(*mem)); } /* @@ -696,7 +693,9 @@ size_t swiotlb_max_mapping_size(struct device *dev) bool is_swiotlb_active(struct device *dev) { - return dev->dma_io_tlb_mem != NULL; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + + return mem && mem->nslabs; } EXPORT_SYMBOL_GPL(is_swiotlb_active); @@ -711,10 +710,10 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem) static int __init swiotlb_create_default_debugfs(void) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = &io_tlb_default_mem; debugfs_dir = debugfs_create_dir("swiotlb", NULL); - if (mem) { + if (mem->nslabs) { mem->debugfs = debugfs_dir; swiotlb_create_debugfs_files(mem); } @@ -783,10 +782,17 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, * to it. */ if (!mem) { - mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL); + mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; + mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs), + GFP_KERNEL); + if (!mem->slots) { + kfree(mem); + return -ENOMEM; + } + set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), rmem->size >> PAGE_SHIFT); swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); @@ -806,7 +812,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, static void rmem_swiotlb_device_release(struct reserved_mem *rmem, struct device *dev) { - dev->dma_io_tlb_mem = io_tlb_default_mem; + dev->dma_io_tlb_mem = &io_tlb_default_mem; } static const struct reserved_mem_ops rmem_swiotlb_ops = { -- cgit v1.2.3 From 1efd3fc0ccf52e1aa5f0bf5b0d82847180d20951 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Jul 2021 14:38:25 +0100 Subject: swiotlb: Emit diagnostic in swiotlb_exit() A recent debugging session would have been made a little bit easier if we had noticed sooner that swiotlb_exit() was being called during boot. Add a simple diagnostic message to swiotlb_exit() to complement the one from swiotlb_print_info() during initialisation. Cc: Claire Chang Cc: Christoph Hellwig Cc: Robin Murphy Link: https://lore.kernel.org/r/20210705190352.GA19461@willie-the-truck Suggested-by: Konrad Rzeszutek Wilk Tested-by: Nathan Chancellor Tested-by: Claire Chang Reviewed-by: Christoph Hellwig Signed-off-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 7948f274f9bb..b3c793ed9e64 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -334,6 +334,7 @@ void __init swiotlb_exit(void) if (!mem->nslabs) return; + pr_info("tearing down default memory pool\n"); size = array_size(sizeof(*mem->slots), mem->nslabs); if (mem->late_alloc) free_pages((unsigned long)mem->slots, get_order(size)); -- cgit v1.2.3 From ad6c00283163cb7ad52cdf97d2850547446f7d98 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Jul 2021 14:38:26 +0100 Subject: swiotlb: Free tbl memory in swiotlb_exit() Although swiotlb_exit() frees the 'slots' metadata array referenced by 'io_tlb_default_mem', it leaves the underlying buffer pages allocated despite no longer being usable. Extend swiotlb_exit() to free the buffer pages as well as the slots array. Cc: Claire Chang Cc: Christoph Hellwig Cc: Robin Murphy Cc: Konrad Rzeszutek Wilk Tested-by: Nathan Chancellor Tested-by: Claire Chang Reviewed-by: Christoph Hellwig Signed-off-by: Will Deacon Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index b3c793ed9e64..87c40517e822 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -328,18 +328,27 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) void __init swiotlb_exit(void) { - size_t size; struct io_tlb_mem *mem = &io_tlb_default_mem; + unsigned long tbl_vaddr; + size_t tbl_size, slots_size; if (!mem->nslabs) return; pr_info("tearing down default memory pool\n"); - size = array_size(sizeof(*mem->slots), mem->nslabs); - if (mem->late_alloc) - free_pages((unsigned long)mem->slots, get_order(size)); - else - memblock_free_late(__pa(mem->slots), PAGE_ALIGN(size)); + tbl_vaddr = (unsigned long)phys_to_virt(mem->start); + tbl_size = PAGE_ALIGN(mem->end - mem->start); + slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); + + set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); + if (mem->late_alloc) { + free_pages(tbl_vaddr, get_order(tbl_size)); + free_pages((unsigned long)mem->slots, get_order(slots_size)); + } else { + memblock_free_late(mem->start, tbl_size); + memblock_free_late(__pa(mem->slots), slots_size); + } + memset(mem, 0, sizeof(*mem)); } -- cgit v1.2.3 From 95f7f15461fa482a05237669507b4c9b06865b73 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Wed, 14 Jul 2021 11:26:20 +0530 Subject: kdb: Get rid of custom debug heap allocator Currently the only user for debug heap is kdbnearsym() which can be modified to rather use statically allocated buffer for symbol name as per it's current usage. So do that and hence remove custom debug heap allocator. Note that this change puts a restriction on kdbnearsym() callers to carefully use shared namebuf such that a caller should consume the symbol returned immediately prior to another call to fetch a different symbol. Also, this change uses standard KSYM_NAME_LEN macro for namebuf allocation instead of local variable: knt1_size which should avoid any conflicts caused by changes to KSYM_NAME_LEN macro value. This change has been tested using kgdbtest on arm64 which doesn't show any regressions. Suggested-by: Daniel Thompson Signed-off-by: Sumit Garg Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20210714055620.369915-1-sumit.garg@linaro.org Signed-off-by: Daniel Thompson --- kernel/debug/kdb/kdb_debugger.c | 1 - kernel/debug/kdb/kdb_private.h | 5 - kernel/debug/kdb/kdb_support.c | 329 ++++------------------------------------ 3 files changed, 28 insertions(+), 307 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index 0220afda3200..e91fc3e4edd5 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c @@ -140,7 +140,6 @@ int kdb_stub(struct kgdb_state *ks) */ kdb_common_deinit_state(); KDB_STATE_CLEAR(PAGER); - kdbnearsym_cleanup(); if (error == KDB_CMD_KGDB) { if (KDB_STATE(DOING_KGDB)) KDB_STATE_CLEAR(DOING_KGDB); diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 170c69aedebb..8dbc840113c9 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -109,7 +109,6 @@ extern int kdbgetaddrarg(int, const char **, int*, unsigned long *, long *, char **); extern int kdbgetsymval(const char *, kdb_symtab_t *); extern int kdbnearsym(unsigned long, kdb_symtab_t *); -extern void kdbnearsym_cleanup(void); extern char *kdb_strdup(const char *str, gfp_t type); extern void kdb_symbol_print(unsigned long, const kdb_symtab_t *, unsigned int); @@ -233,10 +232,6 @@ extern struct task_struct *kdb_curr_task(int); #define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL) -extern void *debug_kmalloc(size_t size, gfp_t flags); -extern void debug_kfree(void *); -extern void debug_kusage(void); - extern struct task_struct *kdb_current_task; extern struct pt_regs *kdb_current_regs; diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 9f50d22d68e6..c605b17b2a0d 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -52,48 +52,48 @@ int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) } EXPORT_SYMBOL(kdbgetsymval); -static char *kdb_name_table[100]; /* arbitrary size */ - -/* - * kdbnearsym - Return the name of the symbol with the nearest address - * less than 'addr'. +/** + * kdbnearsym() - Return the name of the symbol with the nearest address + * less than @addr. + * @addr: Address to check for near symbol + * @symtab: Structure to receive results * - * Parameters: - * addr Address to check for symbol near - * symtab Structure to receive results - * Returns: - * 0 No sections contain this address, symtab zero filled - * 1 Address mapped to module/symbol/section, data in symtab - * Remarks: - * 2.6 kallsyms has a "feature" where it unpacks the name into a - * string. If that string is reused before the caller expects it - * then the caller sees its string change without warning. To - * avoid cluttering up the main kdb code with lots of kdb_strdup, - * tests and kfree calls, kdbnearsym maintains an LRU list of the - * last few unique strings. The list is sized large enough to - * hold active strings, no kdb caller of kdbnearsym makes more - * than ~20 later calls before using a saved value. + * WARNING: This function may return a pointer to a single statically + * allocated buffer (namebuf). kdb's unusual calling context (single + * threaded, all other CPUs halted) provides us sufficient locking for + * this to be safe. The only constraint imposed by the static buffer is + * that the caller must consume any previous reply prior to another call + * to lookup a new symbol. + * + * Note that, strictly speaking, some architectures may re-enter the kdb + * trap if the system turns out to be very badly damaged and this breaks + * the single-threaded assumption above. In these circumstances successful + * continuation and exit from the inner trap is unlikely to work and any + * user attempting this receives a prominent warning before being allowed + * to progress. In these circumstances we remain memory safe because + * namebuf[KSYM_NAME_LEN-1] will never change from '\0' although we do + * tolerate the possibility of garbled symbol display from the outer kdb + * trap. + * + * Return: + * * 0 - No sections contain this address, symtab zero filled + * * 1 - Address mapped to module/symbol/section, data in symtab */ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) { int ret = 0; unsigned long symbolsize = 0; unsigned long offset = 0; -#define knt1_size 128 /* must be >= kallsyms table size */ - char *knt1 = NULL; + static char namebuf[KSYM_NAME_LEN]; kdb_dbg_printf(AR, "addr=0x%lx, symtab=%px\n", addr, symtab); memset(symtab, 0, sizeof(*symtab)); if (addr < 4096) goto out; - knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC); - if (!knt1) { - kdb_func_printf("addr=0x%lx cannot kmalloc knt1\n", addr); - goto out; - } + symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset, - (char **)(&symtab->mod_name), knt1); + (char **)(&symtab->mod_name), namebuf); if (offset > 8*1024*1024) { symtab->sym_name = NULL; addr = offset = symbolsize = 0; @@ -102,63 +102,14 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) symtab->sym_end = symtab->sym_start + symbolsize; ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0'; - if (ret) { - int i; - /* Another 2.6 kallsyms "feature". Sometimes the sym_name is - * set but the buffer passed into kallsyms_lookup is not used, - * so it contains garbage. The caller has to work out which - * buffer needs to be saved. - * - * What was Rusty smoking when he wrote that code? - */ - if (symtab->sym_name != knt1) { - strncpy(knt1, symtab->sym_name, knt1_size); - knt1[knt1_size-1] = '\0'; - } - for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { - if (kdb_name_table[i] && - strcmp(kdb_name_table[i], knt1) == 0) - break; - } - if (i >= ARRAY_SIZE(kdb_name_table)) { - debug_kfree(kdb_name_table[0]); - memmove(kdb_name_table, kdb_name_table+1, - sizeof(kdb_name_table[0]) * - (ARRAY_SIZE(kdb_name_table)-1)); - } else { - debug_kfree(knt1); - knt1 = kdb_name_table[i]; - memmove(kdb_name_table+i, kdb_name_table+i+1, - sizeof(kdb_name_table[0]) * - (ARRAY_SIZE(kdb_name_table)-i-1)); - } - i = ARRAY_SIZE(kdb_name_table) - 1; - kdb_name_table[i] = knt1; - symtab->sym_name = kdb_name_table[i]; - knt1 = NULL; - } - if (symtab->mod_name == NULL) symtab->mod_name = "kernel"; kdb_dbg_printf(AR, "returns %d symtab->sym_start=0x%lx, symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret, symtab->sym_start, symtab->mod_name, symtab->sym_name, symtab->sym_name); - out: - debug_kfree(knt1); return ret; } -void kdbnearsym_cleanup(void) -{ - int i; - for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { - if (kdb_name_table[i]) { - debug_kfree(kdb_name_table[i]); - kdb_name_table[i] = NULL; - } - } -} - static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1]; /* @@ -656,230 +607,6 @@ unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask) return (mask & kdb_task_state_string(state)) != 0; } -/* Last ditch allocator for debugging, so we can still debug even when - * the GFP_ATOMIC pool has been exhausted. The algorithms are tuned - * for space usage, not for speed. One smallish memory pool, the free - * chain is always in ascending address order to allow coalescing, - * allocations are done in brute force best fit. - */ - -struct debug_alloc_header { - u32 next; /* offset of next header from start of pool */ - u32 size; - void *caller; -}; - -/* The memory returned by this allocator must be aligned, which means - * so must the header size. Do not assume that sizeof(struct - * debug_alloc_header) is a multiple of the alignment, explicitly - * calculate the overhead of this header, including the alignment. - * The rest of this code must not use sizeof() on any header or - * pointer to a header. - */ -#define dah_align 8 -#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align) - -static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */ -static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned; -static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max; - -/* Locking is awkward. The debug code is called from all contexts, - * including non maskable interrupts. A normal spinlock is not safe - * in NMI context. Try to get the debug allocator lock, if it cannot - * be obtained after a second then give up. If the lock could not be - * previously obtained on this cpu then only try once. - * - * sparse has no annotation for "this function _sometimes_ acquires a - * lock", so fudge the acquire/release notation. - */ -static DEFINE_SPINLOCK(dap_lock); -static int get_dap_lock(void) - __acquires(dap_lock) -{ - static int dap_locked = -1; - int count; - if (dap_locked == smp_processor_id()) - count = 1; - else - count = 1000; - while (1) { - if (spin_trylock(&dap_lock)) { - dap_locked = -1; - return 1; - } - if (!count--) - break; - udelay(1000); - } - dap_locked = smp_processor_id(); - __acquire(dap_lock); - return 0; -} - -void *debug_kmalloc(size_t size, gfp_t flags) -{ - unsigned int rem, h_offset; - struct debug_alloc_header *best, *bestprev, *prev, *h; - void *p = NULL; - if (!get_dap_lock()) { - __release(dap_lock); /* we never actually got it */ - return NULL; - } - h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); - if (dah_first_call) { - h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead; - dah_first_call = 0; - } - size = ALIGN(size, dah_align); - prev = best = bestprev = NULL; - while (1) { - if (h->size >= size && (!best || h->size < best->size)) { - best = h; - bestprev = prev; - if (h->size == size) - break; - } - if (!h->next) - break; - prev = h; - h = (struct debug_alloc_header *)(debug_alloc_pool + h->next); - } - if (!best) - goto out; - rem = best->size - size; - /* The pool must always contain at least one header */ - if (best->next == 0 && bestprev == NULL && rem < dah_overhead) - goto out; - if (rem >= dah_overhead) { - best->size = size; - h_offset = ((char *)best - debug_alloc_pool) + - dah_overhead + best->size; - h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset); - h->size = rem - dah_overhead; - h->next = best->next; - } else - h_offset = best->next; - best->caller = __builtin_return_address(0); - dah_used += best->size; - dah_used_max = max(dah_used, dah_used_max); - if (bestprev) - bestprev->next = h_offset; - else - dah_first = h_offset; - p = (char *)best + dah_overhead; - memset(p, POISON_INUSE, best->size - 1); - *((char *)p + best->size - 1) = POISON_END; -out: - spin_unlock(&dap_lock); - return p; -} - -void debug_kfree(void *p) -{ - struct debug_alloc_header *h; - unsigned int h_offset; - if (!p) - return; - if ((char *)p < debug_alloc_pool || - (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) { - kfree(p); - return; - } - if (!get_dap_lock()) { - __release(dap_lock); /* we never actually got it */ - return; /* memory leak, cannot be helped */ - } - h = (struct debug_alloc_header *)((char *)p - dah_overhead); - memset(p, POISON_FREE, h->size - 1); - *((char *)p + h->size - 1) = POISON_END; - h->caller = NULL; - dah_used -= h->size; - h_offset = (char *)h - debug_alloc_pool; - if (h_offset < dah_first) { - h->next = dah_first; - dah_first = h_offset; - } else { - struct debug_alloc_header *prev; - unsigned int prev_offset; - prev = (struct debug_alloc_header *)(debug_alloc_pool + - dah_first); - while (1) { - if (!prev->next || prev->next > h_offset) - break; - prev = (struct debug_alloc_header *) - (debug_alloc_pool + prev->next); - } - prev_offset = (char *)prev - debug_alloc_pool; - if (prev_offset + dah_overhead + prev->size == h_offset) { - prev->size += dah_overhead + h->size; - memset(h, POISON_FREE, dah_overhead - 1); - *((char *)h + dah_overhead - 1) = POISON_END; - h = prev; - h_offset = prev_offset; - } else { - h->next = prev->next; - prev->next = h_offset; - } - } - if (h_offset + dah_overhead + h->size == h->next) { - struct debug_alloc_header *next; - next = (struct debug_alloc_header *) - (debug_alloc_pool + h->next); - h->size += dah_overhead + next->size; - h->next = next->next; - memset(next, POISON_FREE, dah_overhead - 1); - *((char *)next + dah_overhead - 1) = POISON_END; - } - spin_unlock(&dap_lock); -} - -void debug_kusage(void) -{ - struct debug_alloc_header *h_free, *h_used; -#ifdef CONFIG_IA64 - /* FIXME: using dah for ia64 unwind always results in a memory leak. - * Fix that memory leak first, then set debug_kusage_one_time = 1 for - * all architectures. - */ - static int debug_kusage_one_time; -#else - static int debug_kusage_one_time = 1; -#endif - if (!get_dap_lock()) { - __release(dap_lock); /* we never actually got it */ - return; - } - h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); - if (dah_first == 0 && - (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead || - dah_first_call)) - goto out; - if (!debug_kusage_one_time) - goto out; - debug_kusage_one_time = 0; - kdb_func_printf("debug_kmalloc memory leak dah_first %d\n", dah_first); - if (dah_first) { - h_used = (struct debug_alloc_header *)debug_alloc_pool; - kdb_func_printf("h_used %px size %d\n", h_used, h_used->size); - } - do { - h_used = (struct debug_alloc_header *) - ((char *)h_free + dah_overhead + h_free->size); - kdb_func_printf("h_used %px size %d caller %px\n", - h_used, h_used->size, h_used->caller); - h_free = (struct debug_alloc_header *) - (debug_alloc_pool + h_free->next); - } while (h_free->next); - h_used = (struct debug_alloc_header *) - ((char *)h_free + dah_overhead + h_free->size); - if ((char *)h_used - debug_alloc_pool != - sizeof(debug_alloc_pool_aligned)) - kdb_func_printf("h_used %px size %d caller %px\n", - h_used, h_used->size, h_used->caller); -out: - spin_unlock(&dap_lock); -} - /* Maintain a small stack of kdb_flags to allow recursion without disturbing * the global kdb state. */ -- cgit v1.2.3 From b39cded834154cf54442489b56b33d047edd6d8f Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Mon, 12 Jul 2021 19:16:17 +0530 Subject: kdb: Rename struct defcmd_set to struct kdb_macro Rename struct defcmd_set to struct kdb_macro as that sounds more appropriate given its purpose. Suggested-by: Daniel Thompson Signed-off-by: Sumit Garg Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20210712134620.276667-2-sumit.garg@linaro.org Signed-off-by: Daniel Thompson --- kernel/debug/kdb/kdb_main.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index d8ee5647b732..5cf9867fa118 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -654,7 +654,7 @@ static void kdb_cmderror(int diag) * Returns: * zero for success, a kdb diagnostic if error */ -struct defcmd_set { +struct kdb_macro { int count; bool usable; char *name; @@ -662,8 +662,8 @@ struct defcmd_set { char *help; char **command; }; -static struct defcmd_set *defcmd_set; -static int defcmd_set_count; +static struct kdb_macro *kdb_macro; +static int kdb_macro_count; static bool defcmd_in_progress; /* Forward references */ @@ -671,7 +671,7 @@ static int kdb_exec_defcmd(int argc, const char **argv); static int kdb_defcmd2(const char *cmdstr, const char *argv0) { - struct defcmd_set *s = defcmd_set + defcmd_set_count - 1; + struct kdb_macro *s = kdb_macro + kdb_macro_count - 1; char **save_command = s->command; if (strcmp(argv0, "endefcmd") == 0) { defcmd_in_progress = false; @@ -704,7 +704,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) static int kdb_defcmd(int argc, const char **argv) { - struct defcmd_set *save_defcmd_set = defcmd_set, *s; + struct kdb_macro *save_kdb_macro = kdb_macro, *s; if (defcmd_in_progress) { kdb_printf("kdb: nested defcmd detected, assuming missing " "endefcmd\n"); @@ -712,7 +712,7 @@ static int kdb_defcmd(int argc, const char **argv) } if (argc == 0) { int i; - for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) { + for (s = kdb_macro; s < kdb_macro + kdb_macro_count; ++s) { kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name, s->usage, s->help); for (i = 0; i < s->count; ++i) @@ -727,13 +727,13 @@ static int kdb_defcmd(int argc, const char **argv) kdb_printf("Command only available during kdb_init()\n"); return KDB_NOTIMP; } - defcmd_set = kmalloc_array(defcmd_set_count + 1, sizeof(*defcmd_set), - GFP_KDB); - if (!defcmd_set) + kdb_macro = kmalloc_array(kdb_macro_count + 1, sizeof(*kdb_macro), + GFP_KDB); + if (!kdb_macro) goto fail_defcmd; - memcpy(defcmd_set, save_defcmd_set, - defcmd_set_count * sizeof(*defcmd_set)); - s = defcmd_set + defcmd_set_count; + memcpy(kdb_macro, save_kdb_macro, + kdb_macro_count * sizeof(*kdb_macro)); + s = kdb_macro + kdb_macro_count; memset(s, 0, sizeof(*s)); s->usable = true; s->name = kdb_strdup(argv[1], GFP_KDB); @@ -753,19 +753,19 @@ static int kdb_defcmd(int argc, const char **argv) strcpy(s->help, argv[3]+1); s->help[strlen(s->help)-1] = '\0'; } - ++defcmd_set_count; + ++kdb_macro_count; defcmd_in_progress = true; - kfree(save_defcmd_set); + kfree(save_kdb_macro); return 0; fail_help: kfree(s->usage); fail_usage: kfree(s->name); fail_name: - kfree(defcmd_set); + kfree(kdb_macro); fail_defcmd: - kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]); - defcmd_set = save_defcmd_set; + kdb_printf("Could not allocate new kdb_macro entry for %s\n", argv[1]); + kdb_macro = save_kdb_macro; return KDB_NOTIMP; } @@ -781,14 +781,14 @@ fail_defcmd: static int kdb_exec_defcmd(int argc, const char **argv) { int i, ret; - struct defcmd_set *s; + struct kdb_macro *s; if (argc != 0) return KDB_ARGCOUNT; - for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) { + for (s = kdb_macro, i = 0; i < kdb_macro_count; ++i, ++s) { if (strcmp(s->name, argv[0]) == 0) break; } - if (i == defcmd_set_count) { + if (i == kdb_macro_count) { kdb_printf("kdb_exec_defcmd: could not find commands for %s\n", argv[0]); return KDB_NOTIMP; -- cgit v1.2.3 From c25abcd625505f53b72dc156bac32b5120826742 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Mon, 12 Jul 2021 19:16:18 +0530 Subject: kdb: Get rid of redundant kdb_register_flags() Commit e4f291b3f7bb ("kdb: Simplify kdb commands registration") allowed registration of pre-allocated kdb commands with pointer to struct kdbtab_t. Lets switch other users as well to register pre- allocated kdb commands via: - Changing prototype for kdb_register() to pass a pointer to struct kdbtab_t instead. - Embed kdbtab_t structure in kdb_macro_t rather than individual params. With these changes kdb_register_flags() becomes redundant and hence removed. Also, since we have switched all users to register pre-allocated commands, "is_dynamic" flag in struct kdbtab_t becomes redundant and hence removed as well. Suggested-by: Daniel Thompson Signed-off-by: Sumit Garg Acked-by: Steven Rostedt (VMware) Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20210712134620.276667-3-sumit.garg@linaro.org Signed-off-by: Daniel Thompson --- include/linux/kdb.h | 27 ++++--- kernel/debug/kdb/kdb_main.c | 167 +++++++++++++---------------------------- kernel/debug/kdb/kdb_private.h | 13 ---- kernel/trace/trace_kdb.c | 12 ++- samples/kdb/kdb_hello.c | 20 +++-- 5 files changed, 88 insertions(+), 151 deletions(-) (limited to 'kernel') diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 0125a677b67f..de858edfb3b8 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -13,6 +13,8 @@ * Copyright (C) 2009 Jason Wessel */ +#include + /* Shifted versions of the command enable bits are be used if the command * has no arguments (see kdb_check_flags). This allows commands, such as * go, to have different permissions depending upon whether it is called @@ -64,6 +66,17 @@ typedef enum { typedef int (*kdb_func_t)(int, const char **); +/* The KDB shell command table */ +typedef struct _kdbtab { + char *cmd_name; /* Command name */ + kdb_func_t cmd_func; /* Function to execute command */ + char *cmd_usage; /* Usage String for this command */ + char *cmd_help; /* Help message for this command */ + short cmd_minlen; /* Minimum legal # cmd chars required */ + kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ + struct list_head list_node; /* Command list */ +} kdbtab_t; + #ifdef CONFIG_KGDB_KDB #include #include @@ -193,19 +206,13 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) #endif /* ! CONFIG_KALLSYMS */ /* Dynamic kdb shell command registration */ -extern int kdb_register(char *, kdb_func_t, char *, char *, short); -extern int kdb_register_flags(char *, kdb_func_t, char *, char *, - short, kdb_cmdflags_t); -extern int kdb_unregister(char *); +extern int kdb_register(kdbtab_t *cmd); +extern void kdb_unregister(kdbtab_t *cmd); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} -static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } -static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen, - kdb_cmdflags_t flags) { return 0; } -static inline int kdb_unregister(char *cmd) { return 0; } +static inline int kdb_register(kdbtab_t *cmd) { return 0; } +static inline void kdb_unregister(kdbtab_t *cmd) {} #endif /* CONFIG_KGDB_KDB */ enum { KDB_NOT_INITIALIZED, diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 5cf9867fa118..b2880fad26d4 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -657,9 +656,7 @@ static void kdb_cmderror(int diag) struct kdb_macro { int count; bool usable; - char *name; - char *usage; - char *help; + kdbtab_t cmd; char **command; }; static struct kdb_macro *kdb_macro; @@ -678,13 +675,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) if (!s->count) s->usable = false; if (s->usable) - /* macros are always safe because when executed each - * internal command re-enters kdb_parse() and is - * safety checked individually. - */ - kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, - s->help, 0, - KDB_ENABLE_ALWAYS_SAFE); + kdb_register(&s->cmd); return 0; } if (!s->usable) @@ -705,6 +696,8 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) static int kdb_defcmd(int argc, const char **argv) { struct kdb_macro *save_kdb_macro = kdb_macro, *s; + kdbtab_t *mp; + if (defcmd_in_progress) { kdb_printf("kdb: nested defcmd detected, assuming missing " "endefcmd\n"); @@ -713,8 +706,8 @@ static int kdb_defcmd(int argc, const char **argv) if (argc == 0) { int i; for (s = kdb_macro; s < kdb_macro + kdb_macro_count; ++s) { - kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name, - s->usage, s->help); + kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->cmd.cmd_name, + s->cmd.cmd_usage, s->cmd.cmd_help); for (i = 0; i < s->count; ++i) kdb_printf("%s", s->command[i]); kdb_printf("endefcmd\n"); @@ -736,31 +729,36 @@ static int kdb_defcmd(int argc, const char **argv) s = kdb_macro + kdb_macro_count; memset(s, 0, sizeof(*s)); s->usable = true; - s->name = kdb_strdup(argv[1], GFP_KDB); - if (!s->name) + + mp = &s->cmd; + mp->cmd_func = kdb_exec_defcmd; + mp->cmd_minlen = 0; + mp->cmd_flags = KDB_ENABLE_ALWAYS_SAFE; + mp->cmd_name = kdb_strdup(argv[1], GFP_KDB); + if (!mp->cmd_name) goto fail_name; - s->usage = kdb_strdup(argv[2], GFP_KDB); - if (!s->usage) + mp->cmd_usage = kdb_strdup(argv[2], GFP_KDB); + if (!mp->cmd_usage) goto fail_usage; - s->help = kdb_strdup(argv[3], GFP_KDB); - if (!s->help) + mp->cmd_help = kdb_strdup(argv[3], GFP_KDB); + if (!mp->cmd_help) goto fail_help; - if (s->usage[0] == '"') { - strcpy(s->usage, argv[2]+1); - s->usage[strlen(s->usage)-1] = '\0'; + if (mp->cmd_usage[0] == '"') { + strcpy(mp->cmd_usage, argv[2]+1); + mp->cmd_usage[strlen(mp->cmd_usage)-1] = '\0'; } - if (s->help[0] == '"') { - strcpy(s->help, argv[3]+1); - s->help[strlen(s->help)-1] = '\0'; + if (mp->cmd_help[0] == '"') { + strcpy(mp->cmd_help, argv[3]+1); + mp->cmd_help[strlen(mp->cmd_help)-1] = '\0'; } ++kdb_macro_count; defcmd_in_progress = true; kfree(save_kdb_macro); return 0; fail_help: - kfree(s->usage); + kfree(mp->cmd_usage); fail_usage: - kfree(s->name); + kfree(mp->cmd_name); fail_name: kfree(kdb_macro); fail_defcmd: @@ -785,7 +783,7 @@ static int kdb_exec_defcmd(int argc, const char **argv) if (argc != 0) return KDB_ARGCOUNT; for (s = kdb_macro, i = 0; i < kdb_macro_count; ++i, ++s) { - if (strcmp(s->name, argv[0]) == 0) + if (strcmp(s->cmd.cmd_name, argv[0]) == 0) break; } if (i == kdb_macro_count) { @@ -797,7 +795,7 @@ static int kdb_exec_defcmd(int argc, const char **argv) /* Recursive use of kdb_parse, do not use argv after * this point */ argv = NULL; - kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]); + kdb_printf("[%s]kdb> %s\n", s->cmd.cmd_name, s->command[i]); ret = kdb_parse(s->command[i]); if (ret) return ret; @@ -2613,56 +2611,32 @@ static int kdb_grep_help(int argc, const char **argv) return 0; } -/* - * kdb_register_flags - This function is used to register a kernel - * debugger command. - * Inputs: - * cmd Command name - * func Function to execute the command - * usage A simple usage string showing arguments - * help A simple help string describing command - * repeat Does the command auto repeat on enter? - * Returns: - * zero for success, one if a duplicate command. +/** + * kdb_register() - This function is used to register a kernel debugger + * command. + * @cmd: pointer to kdb command + * + * Note that it's the job of the caller to keep the memory for the cmd + * allocated until unregister is called. */ -int kdb_register_flags(char *cmd, - kdb_func_t func, - char *usage, - char *help, - short minlen, - kdb_cmdflags_t flags) +int kdb_register(kdbtab_t *cmd) { kdbtab_t *kp; list_for_each_entry(kp, &kdb_cmds_head, list_node) { - if (strcmp(kp->cmd_name, cmd) == 0) { - kdb_printf("Duplicate kdb command registered: " - "%s, func %px help %s\n", cmd, func, help); + if (strcmp(kp->cmd_name, cmd->cmd_name) == 0) { + kdb_printf("Duplicate kdb cmd: %s, func %p help %s\n", + cmd->cmd_name, cmd->cmd_func, cmd->cmd_help); return 1; } } - kp = kmalloc(sizeof(*kp), GFP_KDB); - if (!kp) { - kdb_printf("Could not allocate new kdb_command table\n"); - return 1; - } - - kp->cmd_name = cmd; - kp->cmd_func = func; - kp->cmd_usage = usage; - kp->cmd_help = help; - kp->cmd_minlen = minlen; - kp->cmd_flags = flags; - kp->is_dynamic = true; - - list_add_tail(&kp->list_node, &kdb_cmds_head); - + list_add_tail(&cmd->list_node, &kdb_cmds_head); return 0; } -EXPORT_SYMBOL_GPL(kdb_register_flags); +EXPORT_SYMBOL_GPL(kdb_register); -/* +/** * kdb_register_table() - This function is used to register a kdb command * table. * @kp: pointer to kdb command table @@ -2676,55 +2650,15 @@ void kdb_register_table(kdbtab_t *kp, size_t len) } } -/* - * kdb_register - Compatibility register function for commands that do - * not need to specify a repeat state. Equivalent to - * kdb_register_flags with flags set to 0. - * Inputs: - * cmd Command name - * func Function to execute the command - * usage A simple usage string showing arguments - * help A simple help string describing command - * Returns: - * zero for success, one if a duplicate command. - */ -int kdb_register(char *cmd, - kdb_func_t func, - char *usage, - char *help, - short minlen) -{ - return kdb_register_flags(cmd, func, usage, help, minlen, 0); -} -EXPORT_SYMBOL_GPL(kdb_register); - -/* - * kdb_unregister - This function is used to unregister a kernel - * debugger command. It is generally called when a module which - * implements kdb commands is unloaded. - * Inputs: - * cmd Command name - * Returns: - * zero for success, one command not registered. +/** + * kdb_unregister() - This function is used to unregister a kernel debugger + * command. It is generally called when a module which + * implements kdb command is unloaded. + * @cmd: pointer to kdb command */ -int kdb_unregister(char *cmd) +void kdb_unregister(kdbtab_t *cmd) { - kdbtab_t *kp; - - /* - * find the command. - */ - list_for_each_entry(kp, &kdb_cmds_head, list_node) { - if (strcmp(kp->cmd_name, cmd) == 0) { - list_del(&kp->list_node); - if (kp->is_dynamic) - kfree(kp); - return 0; - } - } - - /* Couldn't find it. */ - return 1; + list_del(&cmd->list_node); } EXPORT_SYMBOL_GPL(kdb_unregister); @@ -2900,6 +2834,11 @@ static kdbtab_t maintab[] = { .cmd_func = kdb_defcmd, .cmd_usage = "name \"usage\" \"help\"", .cmd_help = "Define a set of commands, down to endefcmd", + /* + * Macros are always safe because when executed each + * internal command re-enters kdb_parse() and is safety + * checked individually. + */ .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, }, { .cmd_name = "kill", diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 8dbc840113c9..629590084a0d 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -164,19 +164,6 @@ typedef struct _kdb_bp { #ifdef CONFIG_KGDB_KDB extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */]; -/* The KDB shell command table */ -typedef struct _kdbtab { - char *cmd_name; /* Command name */ - kdb_func_t cmd_func; /* Function to execute command */ - char *cmd_usage; /* Usage String for this command */ - char *cmd_help; /* Help message for this command */ - short cmd_minlen; /* Minimum legal # command - * chars required */ - kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ - struct list_head list_node; /* Command list */ - bool is_dynamic; /* Command table allocation type */ -} kdbtab_t; - extern void kdb_register_table(kdbtab_t *kp, size_t len); extern int kdb_bt(int, const char **); /* KDB display back trace */ diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 9da76104f7a2..6c4f92c79e43 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -147,11 +147,17 @@ static int kdb_ftdump(int argc, const char **argv) return 0; } +static kdbtab_t ftdump_cmd = { + .cmd_name = "ftdump", + .cmd_func = kdb_ftdump, + .cmd_usage = "[skip_#entries] [cpu]", + .cmd_help = "Dump ftrace log; -skip dumps last #entries", + .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, +}; + static __init int kdb_ftrace_register(void) { - kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]", - "Dump ftrace log; -skip dumps last #entries", 0, - KDB_ENABLE_ALWAYS_SAFE); + kdb_register(&ftdump_cmd); return 0; } diff --git a/samples/kdb/kdb_hello.c b/samples/kdb/kdb_hello.c index c1c2fa0f62c2..9ad514a6648b 100644 --- a/samples/kdb/kdb_hello.c +++ b/samples/kdb/kdb_hello.c @@ -28,28 +28,26 @@ static int kdb_hello_cmd(int argc, const char **argv) return 0; } +static kdbtab_t hello_cmd = { + .cmd_name = "hello", + .cmd_func = kdb_hello_cmd, + .cmd_usage = "[string]", + .cmd_help = "Say Hello World or Hello [string]", +}; static int __init kdb_hello_cmd_init(void) { /* * Registration of a dynamically added kdb command is done with - * kdb_register() with the arguments being: - * 1: The name of the shell command - * 2: The function that processes the command - * 3: Description of the usage of any arguments - * 4: Descriptive text when you run help - * 5: Number of characters to complete the command - * 0 == type the whole command - * 1 == match both "g" and "go" for example + * kdb_register(). */ - kdb_register("hello", kdb_hello_cmd, "[string]", - "Say Hello World or Hello [string]", 0); + kdb_register(&hello_cmd); return 0; } static void __exit kdb_hello_cmd_exit(void) { - kdb_unregister("hello"); + kdb_unregister(&hello_cmd); } module_init(kdb_hello_cmd_init); -- cgit v1.2.3 From 9a5db530aa7d98b10c4f5104027565c98cca49e6 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Mon, 12 Jul 2021 19:16:19 +0530 Subject: kdb: Simplify kdb_defcmd macro logic Switch to use a linked list instead of dynamic array which makes allocation of kdb macro and traversing the kdb macro commands list simpler. Suggested-by: Daniel Thompson Signed-off-by: Sumit Garg Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20210712134620.276667-4-sumit.garg@linaro.org Signed-off-by: Daniel Thompson --- kernel/debug/kdb/kdb_main.c | 107 ++++++++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 49 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index b2880fad26d4..7c7a2ef834fc 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -654,13 +654,16 @@ static void kdb_cmderror(int diag) * zero for success, a kdb diagnostic if error */ struct kdb_macro { - int count; - bool usable; - kdbtab_t cmd; - char **command; + kdbtab_t cmd; /* Macro command */ + struct list_head statements; /* Associated statement list */ }; + +struct kdb_macro_statement { + char *statement; /* Statement text */ + struct list_head list_node; /* Statement list node */ +}; + static struct kdb_macro *kdb_macro; -static int kdb_macro_count; static bool defcmd_in_progress; /* Forward references */ @@ -668,34 +671,33 @@ static int kdb_exec_defcmd(int argc, const char **argv); static int kdb_defcmd2(const char *cmdstr, const char *argv0) { - struct kdb_macro *s = kdb_macro + kdb_macro_count - 1; - char **save_command = s->command; + struct kdb_macro_statement *kms; + + if (!kdb_macro) + return KDB_NOTIMP; + if (strcmp(argv0, "endefcmd") == 0) { defcmd_in_progress = false; - if (!s->count) - s->usable = false; - if (s->usable) - kdb_register(&s->cmd); + if (!list_empty(&kdb_macro->statements)) + kdb_register(&kdb_macro->cmd); return 0; } - if (!s->usable) - return KDB_NOTIMP; - s->command = kcalloc(s->count + 1, sizeof(*(s->command)), GFP_KDB); - if (!s->command) { - kdb_printf("Could not allocate new kdb_defcmd table for %s\n", + + kms = kmalloc(sizeof(*kms), GFP_KDB); + if (!kms) { + kdb_printf("Could not allocate new kdb macro command: %s\n", cmdstr); - s->usable = false; return KDB_NOTIMP; } - memcpy(s->command, save_command, s->count * sizeof(*(s->command))); - s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB); - kfree(save_command); + + kms->statement = kdb_strdup(cmdstr, GFP_KDB); + list_add_tail(&kms->list_node, &kdb_macro->statements); + return 0; } static int kdb_defcmd(int argc, const char **argv) { - struct kdb_macro *save_kdb_macro = kdb_macro, *s; kdbtab_t *mp; if (defcmd_in_progress) { @@ -704,13 +706,21 @@ static int kdb_defcmd(int argc, const char **argv) kdb_defcmd2("endefcmd", "endefcmd"); } if (argc == 0) { - int i; - for (s = kdb_macro; s < kdb_macro + kdb_macro_count; ++s) { - kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->cmd.cmd_name, - s->cmd.cmd_usage, s->cmd.cmd_help); - for (i = 0; i < s->count; ++i) - kdb_printf("%s", s->command[i]); - kdb_printf("endefcmd\n"); + kdbtab_t *kp; + struct kdb_macro *kmp; + struct kdb_macro_statement *kms; + + list_for_each_entry(kp, &kdb_cmds_head, list_node) { + if (kp->cmd_func == kdb_exec_defcmd) { + kdb_printf("defcmd %s \"%s\" \"%s\"\n", + kp->cmd_name, kp->cmd_usage, + kp->cmd_help); + kmp = container_of(kp, struct kdb_macro, cmd); + list_for_each_entry(kms, &kmp->statements, + list_node) + kdb_printf("%s", kms->statement); + kdb_printf("endefcmd\n"); + } } return 0; } @@ -720,17 +730,11 @@ static int kdb_defcmd(int argc, const char **argv) kdb_printf("Command only available during kdb_init()\n"); return KDB_NOTIMP; } - kdb_macro = kmalloc_array(kdb_macro_count + 1, sizeof(*kdb_macro), - GFP_KDB); + kdb_macro = kzalloc(sizeof(*kdb_macro), GFP_KDB); if (!kdb_macro) goto fail_defcmd; - memcpy(kdb_macro, save_kdb_macro, - kdb_macro_count * sizeof(*kdb_macro)); - s = kdb_macro + kdb_macro_count; - memset(s, 0, sizeof(*s)); - s->usable = true; - mp = &s->cmd; + mp = &kdb_macro->cmd; mp->cmd_func = kdb_exec_defcmd; mp->cmd_minlen = 0; mp->cmd_flags = KDB_ENABLE_ALWAYS_SAFE; @@ -751,9 +755,9 @@ static int kdb_defcmd(int argc, const char **argv) strcpy(mp->cmd_help, argv[3]+1); mp->cmd_help[strlen(mp->cmd_help)-1] = '\0'; } - ++kdb_macro_count; + + INIT_LIST_HEAD(&kdb_macro->statements); defcmd_in_progress = true; - kfree(save_kdb_macro); return 0; fail_help: kfree(mp->cmd_usage); @@ -763,7 +767,6 @@ fail_name: kfree(kdb_macro); fail_defcmd: kdb_printf("Could not allocate new kdb_macro entry for %s\n", argv[1]); - kdb_macro = save_kdb_macro; return KDB_NOTIMP; } @@ -778,25 +781,31 @@ fail_defcmd: */ static int kdb_exec_defcmd(int argc, const char **argv) { - int i, ret; - struct kdb_macro *s; + int ret; + kdbtab_t *kp; + struct kdb_macro *kmp; + struct kdb_macro_statement *kms; + if (argc != 0) return KDB_ARGCOUNT; - for (s = kdb_macro, i = 0; i < kdb_macro_count; ++i, ++s) { - if (strcmp(s->cmd.cmd_name, argv[0]) == 0) + + list_for_each_entry(kp, &kdb_cmds_head, list_node) { + if (strcmp(kp->cmd_name, argv[0]) == 0) break; } - if (i == kdb_macro_count) { + if (list_entry_is_head(kp, &kdb_cmds_head, list_node)) { kdb_printf("kdb_exec_defcmd: could not find commands for %s\n", argv[0]); return KDB_NOTIMP; } - for (i = 0; i < s->count; ++i) { - /* Recursive use of kdb_parse, do not use argv after - * this point */ + kmp = container_of(kp, struct kdb_macro, cmd); + list_for_each_entry(kms, &kmp->statements, list_node) { + /* + * Recursive use of kdb_parse, do not use argv after this point. + */ argv = NULL; - kdb_printf("[%s]kdb> %s\n", s->cmd.cmd_name, s->command[i]); - ret = kdb_parse(s->command[i]); + kdb_printf("[%s]kdb> %s\n", kmp->cmd.cmd_name, kms->statement); + ret = kdb_parse(kms->statement); if (ret) return ret; } -- cgit v1.2.3 From e868f0a3c4b9c1d7721f08b703142a876814a3f8 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Mon, 12 Jul 2021 19:16:20 +0530 Subject: kdb: Rename members of struct kdbtab_t Remove redundant prefix "cmd_" from name of members in struct kdbtab_t for better readibility. Suggested-by: Doug Anderson Signed-off-by: Sumit Garg Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20210712134620.276667-5-sumit.garg@linaro.org Signed-off-by: Daniel Thompson --- include/linux/kdb.h | 12 +- kernel/debug/kdb/kdb_bp.c | 72 ++++---- kernel/debug/kdb/kdb_main.c | 404 ++++++++++++++++++++++---------------------- kernel/trace/trace_kdb.c | 10 +- samples/kdb/kdb_hello.c | 8 +- 5 files changed, 252 insertions(+), 254 deletions(-) (limited to 'kernel') diff --git a/include/linux/kdb.h b/include/linux/kdb.h index de858edfb3b8..ea0f5e580fac 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -68,12 +68,12 @@ typedef int (*kdb_func_t)(int, const char **); /* The KDB shell command table */ typedef struct _kdbtab { - char *cmd_name; /* Command name */ - kdb_func_t cmd_func; /* Function to execute command */ - char *cmd_usage; /* Usage String for this command */ - char *cmd_help; /* Help message for this command */ - short cmd_minlen; /* Minimum legal # cmd chars required */ - kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ + char *name; /* Command name */ + kdb_func_t func; /* Function to execute command */ + char *usage; /* Usage String for this command */ + char *help; /* Help message for this command */ + short minlen; /* Minimum legal # cmd chars required */ + kdb_cmdflags_t flags; /* Command behaviour flags */ struct list_head list_node; /* Command list */ } kdbtab_t; diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 2168f8dacb99..372025cf1ca3 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c @@ -523,51 +523,51 @@ static int kdb_ss(int argc, const char **argv) } static kdbtab_t bptab[] = { - { .cmd_name = "bp", - .cmd_func = kdb_bp, - .cmd_usage = "[]", - .cmd_help = "Set/Display breakpoints", - .cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, + { .name = "bp", + .func = kdb_bp, + .usage = "[]", + .help = "Set/Display breakpoints", + .flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "bl", - .cmd_func = kdb_bp, - .cmd_usage = "[]", - .cmd_help = "Display breakpoints", - .cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, + { .name = "bl", + .func = kdb_bp, + .usage = "[]", + .help = "Display breakpoints", + .flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "bc", - .cmd_func = kdb_bc, - .cmd_usage = "", - .cmd_help = "Clear Breakpoint", - .cmd_flags = KDB_ENABLE_FLOW_CTRL, + { .name = "bc", + .func = kdb_bc, + .usage = "", + .help = "Clear Breakpoint", + .flags = KDB_ENABLE_FLOW_CTRL, }, - { .cmd_name = "be", - .cmd_func = kdb_bc, - .cmd_usage = "", - .cmd_help = "Enable Breakpoint", - .cmd_flags = KDB_ENABLE_FLOW_CTRL, + { .name = "be", + .func = kdb_bc, + .usage = "", + .help = "Enable Breakpoint", + .flags = KDB_ENABLE_FLOW_CTRL, }, - { .cmd_name = "bd", - .cmd_func = kdb_bc, - .cmd_usage = "", - .cmd_help = "Disable Breakpoint", - .cmd_flags = KDB_ENABLE_FLOW_CTRL, + { .name = "bd", + .func = kdb_bc, + .usage = "", + .help = "Disable Breakpoint", + .flags = KDB_ENABLE_FLOW_CTRL, }, - { .cmd_name = "ss", - .cmd_func = kdb_ss, - .cmd_usage = "", - .cmd_help = "Single Step", - .cmd_minlen = 1, - .cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, + { .name = "ss", + .func = kdb_ss, + .usage = "", + .help = "Single Step", + .minlen = 1, + .flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, }, }; static kdbtab_t bphcmd = { - .cmd_name = "bph", - .cmd_func = kdb_bp, - .cmd_usage = "[]", - .cmd_help = "[datar [length]|dataw [length]] Set hw brk", - .cmd_flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, + .name = "bph", + .func = kdb_bp, + .usage = "[]", + .help = "[datar [length]|dataw [length]] Set hw brk", + .flags = KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS, }; /* Initialize the breakpoint table and register breakpoint commands. */ diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 7c7a2ef834fc..fa6deda894a1 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -711,10 +711,9 @@ static int kdb_defcmd(int argc, const char **argv) struct kdb_macro_statement *kms; list_for_each_entry(kp, &kdb_cmds_head, list_node) { - if (kp->cmd_func == kdb_exec_defcmd) { + if (kp->func == kdb_exec_defcmd) { kdb_printf("defcmd %s \"%s\" \"%s\"\n", - kp->cmd_name, kp->cmd_usage, - kp->cmd_help); + kp->name, kp->usage, kp->help); kmp = container_of(kp, struct kdb_macro, cmd); list_for_each_entry(kms, &kmp->statements, list_node) @@ -735,34 +734,34 @@ static int kdb_defcmd(int argc, const char **argv) goto fail_defcmd; mp = &kdb_macro->cmd; - mp->cmd_func = kdb_exec_defcmd; - mp->cmd_minlen = 0; - mp->cmd_flags = KDB_ENABLE_ALWAYS_SAFE; - mp->cmd_name = kdb_strdup(argv[1], GFP_KDB); - if (!mp->cmd_name) + mp->func = kdb_exec_defcmd; + mp->minlen = 0; + mp->flags = KDB_ENABLE_ALWAYS_SAFE; + mp->name = kdb_strdup(argv[1], GFP_KDB); + if (!mp->name) goto fail_name; - mp->cmd_usage = kdb_strdup(argv[2], GFP_KDB); - if (!mp->cmd_usage) + mp->usage = kdb_strdup(argv[2], GFP_KDB); + if (!mp->usage) goto fail_usage; - mp->cmd_help = kdb_strdup(argv[3], GFP_KDB); - if (!mp->cmd_help) + mp->help = kdb_strdup(argv[3], GFP_KDB); + if (!mp->help) goto fail_help; - if (mp->cmd_usage[0] == '"') { - strcpy(mp->cmd_usage, argv[2]+1); - mp->cmd_usage[strlen(mp->cmd_usage)-1] = '\0'; + if (mp->usage[0] == '"') { + strcpy(mp->usage, argv[2]+1); + mp->usage[strlen(mp->usage)-1] = '\0'; } - if (mp->cmd_help[0] == '"') { - strcpy(mp->cmd_help, argv[3]+1); - mp->cmd_help[strlen(mp->cmd_help)-1] = '\0'; + if (mp->help[0] == '"') { + strcpy(mp->help, argv[3]+1); + mp->help[strlen(mp->help)-1] = '\0'; } INIT_LIST_HEAD(&kdb_macro->statements); defcmd_in_progress = true; return 0; fail_help: - kfree(mp->cmd_usage); + kfree(mp->usage); fail_usage: - kfree(mp->cmd_name); + kfree(mp->name); fail_name: kfree(kdb_macro); fail_defcmd: @@ -790,7 +789,7 @@ static int kdb_exec_defcmd(int argc, const char **argv) return KDB_ARGCOUNT; list_for_each_entry(kp, &kdb_cmds_head, list_node) { - if (strcmp(kp->cmd_name, argv[0]) == 0) + if (strcmp(kp->name, argv[0]) == 0) break; } if (list_entry_is_head(kp, &kdb_cmds_head, list_node)) { @@ -804,7 +803,7 @@ static int kdb_exec_defcmd(int argc, const char **argv) * Recursive use of kdb_parse, do not use argv after this point. */ argv = NULL; - kdb_printf("[%s]kdb> %s\n", kmp->cmd.cmd_name, kms->statement); + kdb_printf("[%s]kdb> %s\n", kmp->cmd.name, kms->statement); ret = kdb_parse(kms->statement); if (ret) return ret; @@ -1016,11 +1015,11 @@ int kdb_parse(const char *cmdstr) * If this command is allowed to be abbreviated, * check to see if this is it. */ - if (tp->cmd_minlen && (strlen(argv[0]) <= tp->cmd_minlen) && - (strncmp(argv[0], tp->cmd_name, tp->cmd_minlen) == 0)) + if (tp->minlen && (strlen(argv[0]) <= tp->minlen) && + (strncmp(argv[0], tp->name, tp->minlen) == 0)) break; - if (strcmp(argv[0], tp->cmd_name) == 0) + if (strcmp(argv[0], tp->name) == 0) break; } @@ -1031,8 +1030,7 @@ int kdb_parse(const char *cmdstr) */ if (list_entry_is_head(tp, &kdb_cmds_head, list_node)) { list_for_each_entry(tp, &kdb_cmds_head, list_node) { - if (strncmp(argv[0], tp->cmd_name, - strlen(tp->cmd_name)) == 0) + if (strncmp(argv[0], tp->name, strlen(tp->name)) == 0) break; } } @@ -1040,19 +1038,19 @@ int kdb_parse(const char *cmdstr) if (!list_entry_is_head(tp, &kdb_cmds_head, list_node)) { int result; - if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) + if (!kdb_check_flags(tp->flags, kdb_cmd_enabled, argc <= 1)) return KDB_NOPERM; KDB_STATE_SET(CMD); - result = (*tp->cmd_func)(argc-1, (const char **)argv); + result = (*tp->func)(argc-1, (const char **)argv); if (result && ignore_errors && result > KDB_CMD_GO) result = 0; KDB_STATE_CLEAR(CMD); - if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) + if (tp->flags & KDB_REPEAT_WITH_ARGS) return result; - argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; + argc = tp->flags & KDB_REPEAT_NO_ARGS ? 1 : 0; if (argv[argc]) *(argv[argc]) = '\0'; return result; @@ -2419,12 +2417,12 @@ static int kdb_help(int argc, const char **argv) char *space = ""; if (KDB_FLAG(CMD_INTERRUPT)) return 0; - if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) + if (!kdb_check_flags(kt->flags, kdb_cmd_enabled, true)) continue; - if (strlen(kt->cmd_usage) > 20) + if (strlen(kt->usage) > 20) space = "\n "; - kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, - kt->cmd_usage, space, kt->cmd_help); + kdb_printf("%-15.15s %-20s%s%s\n", kt->name, + kt->usage, space, kt->help); } return 0; } @@ -2633,9 +2631,9 @@ int kdb_register(kdbtab_t *cmd) kdbtab_t *kp; list_for_each_entry(kp, &kdb_cmds_head, list_node) { - if (strcmp(kp->cmd_name, cmd->cmd_name) == 0) { + if (strcmp(kp->name, cmd->name) == 0) { kdb_printf("Duplicate kdb cmd: %s, func %p help %s\n", - cmd->cmd_name, cmd->cmd_func, cmd->cmd_help); + cmd->name, cmd->func, cmd->help); return 1; } } @@ -2672,218 +2670,218 @@ void kdb_unregister(kdbtab_t *cmd) EXPORT_SYMBOL_GPL(kdb_unregister); static kdbtab_t maintab[] = { - { .cmd_name = "md", - .cmd_func = kdb_md, - .cmd_usage = "", - .cmd_help = "Display Memory Contents, also mdWcN, e.g. md8c1", - .cmd_minlen = 1, - .cmd_flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, + { .name = "md", + .func = kdb_md, + .usage = "", + .help = "Display Memory Contents, also mdWcN, e.g. md8c1", + .minlen = 1, + .flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "mdr", - .cmd_func = kdb_md, - .cmd_usage = " ", - .cmd_help = "Display Raw Memory", - .cmd_flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, + { .name = "mdr", + .func = kdb_md, + .usage = " ", + .help = "Display Raw Memory", + .flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "mdp", - .cmd_func = kdb_md, - .cmd_usage = " ", - .cmd_help = "Display Physical Memory", - .cmd_flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, + { .name = "mdp", + .func = kdb_md, + .usage = " ", + .help = "Display Physical Memory", + .flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "mds", - .cmd_func = kdb_md, - .cmd_usage = "", - .cmd_help = "Display Memory Symbolically", - .cmd_flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, + { .name = "mds", + .func = kdb_md, + .usage = "", + .help = "Display Memory Symbolically", + .flags = KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "mm", - .cmd_func = kdb_mm, - .cmd_usage = " ", - .cmd_help = "Modify Memory Contents", - .cmd_flags = KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS, + { .name = "mm", + .func = kdb_mm, + .usage = " ", + .help = "Modify Memory Contents", + .flags = KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS, }, - { .cmd_name = "go", - .cmd_func = kdb_go, - .cmd_usage = "[]", - .cmd_help = "Continue Execution", - .cmd_minlen = 1, - .cmd_flags = KDB_ENABLE_REG_WRITE | + { .name = "go", + .func = kdb_go, + .usage = "[]", + .help = "Continue Execution", + .minlen = 1, + .flags = KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS, }, - { .cmd_name = "rd", - .cmd_func = kdb_rd, - .cmd_usage = "", - .cmd_help = "Display Registers", - .cmd_flags = KDB_ENABLE_REG_READ, + { .name = "rd", + .func = kdb_rd, + .usage = "", + .help = "Display Registers", + .flags = KDB_ENABLE_REG_READ, }, - { .cmd_name = "rm", - .cmd_func = kdb_rm, - .cmd_usage = " ", - .cmd_help = "Modify Registers", - .cmd_flags = KDB_ENABLE_REG_WRITE, + { .name = "rm", + .func = kdb_rm, + .usage = " ", + .help = "Modify Registers", + .flags = KDB_ENABLE_REG_WRITE, }, - { .cmd_name = "ef", - .cmd_func = kdb_ef, - .cmd_usage = "", - .cmd_help = "Display exception frame", - .cmd_flags = KDB_ENABLE_MEM_READ, + { .name = "ef", + .func = kdb_ef, + .usage = "", + .help = "Display exception frame", + .flags = KDB_ENABLE_MEM_READ, }, - { .cmd_name = "bt", - .cmd_func = kdb_bt, - .cmd_usage = "[]", - .cmd_help = "Stack traceback", - .cmd_minlen = 1, - .cmd_flags = KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS, + { .name = "bt", + .func = kdb_bt, + .usage = "[]", + .help = "Stack traceback", + .minlen = 1, + .flags = KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS, }, - { .cmd_name = "btp", - .cmd_func = kdb_bt, - .cmd_usage = "", - .cmd_help = "Display stack for process ", - .cmd_flags = KDB_ENABLE_INSPECT, + { .name = "btp", + .func = kdb_bt, + .usage = "", + .help = "Display stack for process ", + .flags = KDB_ENABLE_INSPECT, }, - { .cmd_name = "bta", - .cmd_func = kdb_bt, - .cmd_usage = "[D|R|S|T|C|Z|E|U|I|M|A]", - .cmd_help = "Backtrace all processes matching state flag", - .cmd_flags = KDB_ENABLE_INSPECT, + { .name = "bta", + .func = kdb_bt, + .usage = "[D|R|S|T|C|Z|E|U|I|M|A]", + .help = "Backtrace all processes matching state flag", + .flags = KDB_ENABLE_INSPECT, }, - { .cmd_name = "btc", - .cmd_func = kdb_bt, - .cmd_usage = "", - .cmd_help = "Backtrace current process on each cpu", - .cmd_flags = KDB_ENABLE_INSPECT, + { .name = "btc", + .func = kdb_bt, + .usage = "", + .help = "Backtrace current process on each cpu", + .flags = KDB_ENABLE_INSPECT, }, - { .cmd_name = "btt", - .cmd_func = kdb_bt, - .cmd_usage = "", - .cmd_help = "Backtrace process given its struct task address", - .cmd_flags = KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS, + { .name = "btt", + .func = kdb_bt, + .usage = "", + .help = "Backtrace process given its struct task address", + .flags = KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS, }, - { .cmd_name = "env", - .cmd_func = kdb_env, - .cmd_usage = "", - .cmd_help = "Show environment variables", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "env", + .func = kdb_env, + .usage = "", + .help = "Show environment variables", + .flags = KDB_ENABLE_ALWAYS_SAFE, }, - { .cmd_name = "set", - .cmd_func = kdb_set, - .cmd_usage = "", - .cmd_help = "Set environment variables", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "set", + .func = kdb_set, + .usage = "", + .help = "Set environment variables", + .flags = KDB_ENABLE_ALWAYS_SAFE, }, - { .cmd_name = "help", - .cmd_func = kdb_help, - .cmd_usage = "", - .cmd_help = "Display Help Message", - .cmd_minlen = 1, - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "help", + .func = kdb_help, + .usage = "", + .help = "Display Help Message", + .minlen = 1, + .flags = KDB_ENABLE_ALWAYS_SAFE, }, - { .cmd_name = "?", - .cmd_func = kdb_help, - .cmd_usage = "", - .cmd_help = "Display Help Message", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "?", + .func = kdb_help, + .usage = "", + .help = "Display Help Message", + .flags = KDB_ENABLE_ALWAYS_SAFE, }, - { .cmd_name = "cpu", - .cmd_func = kdb_cpu, - .cmd_usage = "", - .cmd_help = "Switch to new cpu", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE_NO_ARGS, + { .name = "cpu", + .func = kdb_cpu, + .usage = "", + .help = "Switch to new cpu", + .flags = KDB_ENABLE_ALWAYS_SAFE_NO_ARGS, }, - { .cmd_name = "kgdb", - .cmd_func = kdb_kgdb, - .cmd_usage = "", - .cmd_help = "Enter kgdb mode", - .cmd_flags = 0, + { .name = "kgdb", + .func = kdb_kgdb, + .usage = "", + .help = "Enter kgdb mode", + .flags = 0, }, - { .cmd_name = "ps", - .cmd_func = kdb_ps, - .cmd_usage = "[|A]", - .cmd_help = "Display active task list", - .cmd_flags = KDB_ENABLE_INSPECT, + { .name = "ps", + .func = kdb_ps, + .usage = "[|A]", + .help = "Display active task list", + .flags = KDB_ENABLE_INSPECT, }, - { .cmd_name = "pid", - .cmd_func = kdb_pid, - .cmd_usage = "", - .cmd_help = "Switch to another task", - .cmd_flags = KDB_ENABLE_INSPECT, + { .name = "pid", + .func = kdb_pid, + .usage = "", + .help = "Switch to another task", + .flags = KDB_ENABLE_INSPECT, }, - { .cmd_name = "reboot", - .cmd_func = kdb_reboot, - .cmd_usage = "", - .cmd_help = "Reboot the machine immediately", - .cmd_flags = KDB_ENABLE_REBOOT, + { .name = "reboot", + .func = kdb_reboot, + .usage = "", + .help = "Reboot the machine immediately", + .flags = KDB_ENABLE_REBOOT, }, #if defined(CONFIG_MODULES) - { .cmd_name = "lsmod", - .cmd_func = kdb_lsmod, - .cmd_usage = "", - .cmd_help = "List loaded kernel modules", - .cmd_flags = KDB_ENABLE_INSPECT, + { .name = "lsmod", + .func = kdb_lsmod, + .usage = "", + .help = "List loaded kernel modules", + .flags = KDB_ENABLE_INSPECT, }, #endif #if defined(CONFIG_MAGIC_SYSRQ) - { .cmd_name = "sr", - .cmd_func = kdb_sr, - .cmd_usage = "", - .cmd_help = "Magic SysRq key", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "sr", + .func = kdb_sr, + .usage = "", + .help = "Magic SysRq key", + .flags = KDB_ENABLE_ALWAYS_SAFE, }, #endif #if defined(CONFIG_PRINTK) - { .cmd_name = "dmesg", - .cmd_func = kdb_dmesg, - .cmd_usage = "[lines]", - .cmd_help = "Display syslog buffer", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "dmesg", + .func = kdb_dmesg, + .usage = "[lines]", + .help = "Display syslog buffer", + .flags = KDB_ENABLE_ALWAYS_SAFE, }, #endif - { .cmd_name = "defcmd", - .cmd_func = kdb_defcmd, - .cmd_usage = "name \"usage\" \"help\"", - .cmd_help = "Define a set of commands, down to endefcmd", + { .name = "defcmd", + .func = kdb_defcmd, + .usage = "name \"usage\" \"help\"", + .help = "Define a set of commands, down to endefcmd", /* * Macros are always safe because when executed each * internal command re-enters kdb_parse() and is safety * checked individually. */ - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + .flags = KDB_ENABLE_ALWAYS_SAFE, }, - { .cmd_name = "kill", - .cmd_func = kdb_kill, - .cmd_usage = "<-signal> ", - .cmd_help = "Send a signal to a process", - .cmd_flags = KDB_ENABLE_SIGNAL, + { .name = "kill", + .func = kdb_kill, + .usage = "<-signal> ", + .help = "Send a signal to a process", + .flags = KDB_ENABLE_SIGNAL, }, - { .cmd_name = "summary", - .cmd_func = kdb_summary, - .cmd_usage = "", - .cmd_help = "Summarize the system", - .cmd_minlen = 4, - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "summary", + .func = kdb_summary, + .usage = "", + .help = "Summarize the system", + .minlen = 4, + .flags = KDB_ENABLE_ALWAYS_SAFE, }, - { .cmd_name = "per_cpu", - .cmd_func = kdb_per_cpu, - .cmd_usage = " [] []", - .cmd_help = "Display per_cpu variables", - .cmd_minlen = 3, - .cmd_flags = KDB_ENABLE_MEM_READ, + { .name = "per_cpu", + .func = kdb_per_cpu, + .usage = " [] []", + .help = "Display per_cpu variables", + .minlen = 3, + .flags = KDB_ENABLE_MEM_READ, }, - { .cmd_name = "grephelp", - .cmd_func = kdb_grep_help, - .cmd_usage = "", - .cmd_help = "Display help on | grep", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + { .name = "grephelp", + .func = kdb_grep_help, + .usage = "", + .help = "Display help on | grep", + .flags = KDB_ENABLE_ALWAYS_SAFE, }, }; static kdbtab_t nmicmd = { - .cmd_name = "disable_nmi", - .cmd_func = kdb_disable_nmi, - .cmd_usage = "", - .cmd_help = "Disable NMI entry to KDB", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + .name = "disable_nmi", + .func = kdb_disable_nmi, + .usage = "", + .help = "Disable NMI entry to KDB", + .flags = KDB_ENABLE_ALWAYS_SAFE, }; /* Initialize the kdb command table. */ diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 6c4f92c79e43..59857a1ee44c 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -148,11 +148,11 @@ static int kdb_ftdump(int argc, const char **argv) } static kdbtab_t ftdump_cmd = { - .cmd_name = "ftdump", - .cmd_func = kdb_ftdump, - .cmd_usage = "[skip_#entries] [cpu]", - .cmd_help = "Dump ftrace log; -skip dumps last #entries", - .cmd_flags = KDB_ENABLE_ALWAYS_SAFE, + .name = "ftdump", + .func = kdb_ftdump, + .usage = "[skip_#entries] [cpu]", + .help = "Dump ftrace log; -skip dumps last #entries", + .flags = KDB_ENABLE_ALWAYS_SAFE, }; static __init int kdb_ftrace_register(void) diff --git a/samples/kdb/kdb_hello.c b/samples/kdb/kdb_hello.c index 9ad514a6648b..82736e5a5e32 100644 --- a/samples/kdb/kdb_hello.c +++ b/samples/kdb/kdb_hello.c @@ -29,10 +29,10 @@ static int kdb_hello_cmd(int argc, const char **argv) } static kdbtab_t hello_cmd = { - .cmd_name = "hello", - .cmd_func = kdb_hello_cmd, - .cmd_usage = "[string]", - .cmd_help = "Say Hello World or Hello [string]", + .name = "hello", + .func = kdb_hello_cmd, + .usage = "[string]", + .help = "Say Hello World or Hello [string]", }; static int __init kdb_hello_cmd_init(void) -- cgit v1.2.3 From 1d7db834a027ecfb5ac93be2e8bb45bfcbbf6eaa Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 22 Jul 2021 18:55:12 +0800 Subject: dma-debug: use memory_intersects() directly There is already a memory_intersects() helper in sections.h, use memory_intersects() directly instead of private overlap(). Signed-off-by: Kefeng Wang Signed-off-by: Christoph Hellwig --- kernel/dma/debug.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index dadae6255d05..fe6efd181614 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -1064,20 +1064,10 @@ static void check_for_stack(struct device *dev, } } -static inline bool overlap(void *addr, unsigned long len, void *start, void *end) -{ - unsigned long a1 = (unsigned long)addr; - unsigned long b1 = a1 + len; - unsigned long a2 = (unsigned long)start; - unsigned long b2 = (unsigned long)end; - - return !(b1 <= a2 || a1 >= b2); -} - static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) { - if (overlap(addr, len, _stext, _etext) || - overlap(addr, len, __start_rodata, __end_rodata)) + if (memory_intersects(_stext, _etext, addr, len) || + memory_intersects(__start_rodata, __end_rodata, addr, len)) err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); } -- cgit v1.2.3 From 173735c346c412d9f084825ecb04f24ada0e2986 Mon Sep 17 00:00:00 2001 From: Anthony Iliopoulos Date: Thu, 22 Jul 2021 16:10:55 +0200 Subject: dma-debug: fix debugfs initialization order Due to link order, dma_debug_init is called before debugfs has a chance to initialize (via debugfs_init which also happens in the core initcall stage), so the directories for dma-debug are never created. Decouple dma_debug_fs_init from dma_debug_init and defer its init until core_initcall_sync (after debugfs has been initialized) while letting dma-debug initialization occur as soon as possible to catch any early mappings, as suggested in [1]. [1] https://lore.kernel.org/linux-iommu/YIgGa6yF%2Fadg8OSN@kroah.com/ Fixes: 15b28bbcd567 ("dma-debug: move initialization to common code") Signed-off-by: Anthony Iliopoulos Signed-off-by: Christoph Hellwig --- kernel/dma/debug.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index fe6efd181614..6c90c69e5311 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -792,7 +792,7 @@ static int dump_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(dump); -static void dma_debug_fs_init(void) +static int __init dma_debug_fs_init(void) { struct dentry *dentry = debugfs_create_dir("dma-api", NULL); @@ -805,7 +805,10 @@ static void dma_debug_fs_init(void) debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); + + return 0; } +core_initcall_sync(dma_debug_fs_init); static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) { @@ -890,8 +893,6 @@ static int dma_debug_init(void) spin_lock_init(&dma_entry_hash[i].lock); } - dma_debug_fs_init(); - nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); for (i = 0; i < nr_pages; ++i) dma_debug_create_entries(GFP_KERNEL); -- cgit v1.2.3 From fffe3cc8c2194f60c4af4fac7f27d25e8828f001 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Thu, 29 Jul 2021 14:15:19 -0600 Subject: dma-mapping: allow map_sg() ops to return negative error codes Allow dma_map_sgtable() to pass errors from the map_sg() ops. This will be required for returning appropriate error codes when mapping P2PDMA memory. Introduce __dma_map_sg_attrs() which will return the raw error code from the map_sg operation (whether it be negative or zero). Then add a dma_map_sg_attrs() wrapper to convert any negative errors to zero to satisfy the existing calling convention. dma_map_sgtable() defines three error codes that .map_sg implementations are allowed to return: -EINVAL, -ENOMEM and -EIO. The latter of which is a generic return for cases that are passing DMA_MAPPING_ERROR through. dma_map_sgtable() will convert a zero error return for old map_sg() ops into a -EIO return and return any negative errors as reported. This allows map_sg implementations to start returning multiple negative error codes. Legacy map_sg implementations can continue to return zero until they are all converted. Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- include/linux/dma-map-ops.h | 5 +-- include/linux/dma-mapping.h | 35 ++++--------------- kernel/dma/mapping.c | 82 ++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 84 insertions(+), 38 deletions(-) (limited to 'kernel') diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d53a96a3d64..2f842498c448 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -41,8 +41,9 @@ struct dma_map_ops { size_t size, enum dma_data_direction dir, unsigned long attrs); /* - * map_sg returns 0 on error and a value > 0 on success. - * It should never return a value < 0. + * map_sg should return a negative error code on error. See + * dma_map_sgtable() for a list of appropriate error codes + * and their meanings. */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 183e7103a66d..daa1e360f0ee 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -110,6 +110,8 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); +int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, @@ -174,6 +176,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev, unsigned long attrs) { } +static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + return -EOPNOTSUPP; +} static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -343,34 +350,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev, return dma_sync_single_for_device(dev, addr + offset, size, dir); } -/** - * dma_map_sgtable - Map the given buffer for DMA - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * @attrs: Optional DMA attributes for the map operation - * - * Maps a buffer described by a scatterlist stored in the given sg_table - * object for the @dir DMA operation by the @dev device. After success the - * ownership for the buffer is transferred to the DMA domain. One has to - * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the - * ownership of the buffer back to the CPU domain before touching the - * buffer by the CPU. - * - * Returns 0 on success or -EINVAL on error during mapping the buffer. - */ -static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - int nents; - - nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); - if (nents <= 0) - return -EINVAL; - sgt->nents = nents; - return 0; -} - /** * dma_unmap_sgtable - Unmap the given buffer for DMA * @dev: The device for which to perform the DMA operation diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 2b06a809d0b9..21e550076be5 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -177,12 +177,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, } EXPORT_SYMBOL(dma_unmap_page_attrs); -/* - * dma_maps_sg_attrs returns 0 on error and > 0 on success. - * It should never return a value < 0. - */ -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs) +static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); int ents; @@ -197,13 +193,83 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); else ents = ops->map_sg(dev, sg, nents, dir, attrs); - BUG_ON(ents < 0); - debug_dma_map_sg(dev, sg, nents, ents, dir); + + if (ents > 0) + debug_dma_map_sg(dev, sg, nents, ents, dir); + else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && + ents != -EIO && ents != 0)) + return -EIO; return ents; } + +/** + * dma_map_sg_attrs - Map the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sg: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Maps a buffer described by a scatterlist passed in the sg argument with + * nents segments for the @dir DMA operation by the @dev device. + * + * Returns the number of mapped entries (which can be less than nents) + * on success. Zero is returned for any error. + * + * dma_unmap_sg_attrs() should be used to unmap the buffer with the + * original sg and original nents (not the value returned by this funciton). + */ +int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + int ret; + + ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); + if (ret < 0) + return 0; + return ret; +} EXPORT_SYMBOL(dma_map_sg_attrs); +/** + * dma_map_sgtable - Map the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Maps a buffer described by a scatterlist stored in the given sg_table + * object for the @dir DMA operation by the @dev device. After success, the + * ownership for the buffer is transferred to the DMA domain. One has to + * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the + * ownership of the buffer back to the CPU domain before touching the + * buffer by the CPU. + * + * Returns 0 on success or a negative error code on error. The following + * error codes are supported with the given meaning: + * + * -EINVAL - An invalid argument, unaligned access or other error + * in usage. Will not succeed if retried. + * -ENOMEM - Insufficient resources (like memory or IOVA space) to + * complete the mapping. Should succeed if retried later. + * -EIO - Legacy error code with an unknown meaning. eg. this is + * returned if a lower level call returned DMA_MAPPING_ERROR. + */ +int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); + if (nents == 0) + return -EIO; + if (nents < 0) + return nents; + sgt->nents = nents; + return 0; +} +EXPORT_SYMBOL_GPL(dma_map_sgtable); + void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) -- cgit v1.2.3 From c81be74e7d790f090d3c8a52e20a334dc2506a3f Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Thu, 29 Jul 2021 14:15:20 -0600 Subject: dma-direct: return appropriate error code from dma_direct_map_sg() Now that the map_sg() op expects error codes instead of return zero on error, convert dma_direct_map_sg() to return an error code. Per the documentation for dma_map_sgtable(), -EIO is returned due to an DMA_MAPPING_ERROR with unknown cause. Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f737e3347059..f33ceb68aef2 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -411,7 +411,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, out_unmap: dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); - return 0; + return -EIO; } dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, -- cgit v1.2.3 From 66ab63104f9cab09209851fef168f5972d791903 Mon Sep 17 00:00:00 2001 From: Martin Oliveira Date: Thu, 29 Jul 2021 14:15:38 -0600 Subject: dma-mapping: return error code from dma_dummy_map_sg() The .map_sg() op now expects an error code instead of zero on failure. The only errno to return is -EINVAL in the case when DMA is not supported. Signed-off-by: Martin Oliveira Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- kernel/dma/dummy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c index eacd4c5b10bf..b492d59ac77e 100644 --- a/kernel/dma/dummy.c +++ b/kernel/dma/dummy.c @@ -22,7 +22,7 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { - return 0; + return -EINVAL; } static int dma_dummy_supported(struct device *hwdev, u64 mask) -- cgit v1.2.3 From d03c54419274f96434e2ad74e59e67ec6d54ca86 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 9 Aug 2021 17:13:31 +0200 Subject: dma-mapping: disallow .map_sg operations from returning zero on error Now that all the .map_sg operations have been converted to returning proper error codes, drop the code to handle a zero return value, add a warning if a zero is returned. Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- kernel/dma/mapping.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 21e550076be5..967b62692102 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -197,7 +197,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, if (ents > 0) debug_dma_map_sg(dev, sg, nents, ents, dir); else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && - ents != -EIO && ents != 0)) + ents != -EIO)) return -EIO; return ents; @@ -261,8 +261,6 @@ int dma_map_sgtable(struct device *dev, struct sg_table *sgt, int nents; nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); - if (nents == 0) - return -EIO; if (nents < 0) return nents; sgt->nents = nents; -- cgit v1.2.3 From 51be9e51a8000ffc6a33083ceca9da9303ed4dc5 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 1 Jul 2021 15:27:33 +0200 Subject: KVM: PPC: Book3S HV: XIVE: Fix mapping of passthrough interrupts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PCI MSI interrupt numbers are now mapped in a PCI-MSI domain but the underlying calls handling the passthrough of the interrupt in the guest need a number in the XIVE IRQ domain. Use the IRQ data mapped in the XIVE IRQ domain and not the one in the PCI-MSI domain. Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210701132750.1475580-16-clg@kaod.org --- arch/powerpc/kvm/book3s_xive.c | 3 ++- kernel/irq/irqdomain.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 45a0434e9d85..6878026ee8ec 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -927,7 +927,8 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, struct kvmppc_xive *xive = kvm->arch.xive; struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; - struct irq_data *host_data = irq_get_irq_data(host_irq); + struct irq_data *host_data = + irq_domain_get_irq_data(irq_get_default_host(), host_irq); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); u16 idx; u8 prio; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 51c483ce2447..0eee4816edac 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -491,6 +491,7 @@ struct irq_domain *irq_get_default_host(void) { return irq_default_domain; } +EXPORT_SYMBOL_GPL(irq_get_default_host); static bool irq_domain_is_nomap(struct irq_domain *domain) { -- cgit v1.2.3 From 2a047e0662aee1bd773e0415accd785ad26a9398 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 9 Aug 2021 17:19:36 +0200 Subject: dma-mapping: return an unsigned int from dma_map_sg{,_attrs} These can only return 0 for failure or the number of entries, so turn the return value into an unsigned int. Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe --- include/linux/dma-mapping.h | 9 +++++---- kernel/dma/mapping.c | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index daa1e360f0ee..dca2b1355bb1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -105,8 +105,8 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, unsigned long attrs); void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); +unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); @@ -166,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) +static inline unsigned int dma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 967b62692102..7ee5284bff58 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -219,7 +219,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, * dma_unmap_sg_attrs() should be used to unmap the buffer with the * original sg and original nents (not the value returned by this funciton). */ -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, +unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { int ret; -- cgit v1.2.3 From 6fe7c745f2acb73e4cc961d7f91125eef5a8861f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 10 Aug 2021 11:07:14 +0900 Subject: tracing/boot: Fix a hist trigger dependency for boot time tracing Fixes a build error when CONFIG_HIST_TRIGGERS=n with boot-time tracing. Since the trigger_process_regex() is defined only when CONFIG_HIST_TRIGGERS=y, if it is disabled, the 'actions' event option also must be disabled. Link: https://lkml.kernel.org/r/162856123376.203126.582144262622247352.stgit@devnote2 Fixes: 81a59555ff15 ("tracing/boot: Add per-event settings") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 94ef2d099e32..d713714cba67 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -205,12 +205,15 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, pr_err("Failed to apply filter: %s\n", buf); } - xbc_node_for_each_array_value(enode, "actions", anode, p) { - if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) - pr_err("action string is too long: %s\n", p); - else if (trigger_process_regex(file, buf) < 0) - pr_err("Failed to apply an action: %s\n", buf); - } + if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) { + xbc_node_for_each_array_value(enode, "actions", anode, p) { + if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) + pr_err("action string is too long: %s\n", p); + else if (trigger_process_regex(file, buf) < 0) + pr_err("Failed to apply an action: %s\n", buf); + } + } else if (xbc_node_find_value(enode, "actions", NULL)) + pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n"); if (xbc_node_find_value(enode, "enable", NULL)) { if (trace_event_enable_disable(file, 1, 0) < 0) -- cgit v1.2.3 From de9a48a360b70d5318061cf1237431d1869555e4 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 7 Jul 2021 17:36:24 -0400 Subject: tracing: Add linear buckets to histogram logic There's been several times I wished the histogram logic had a "grouping" feature for the buckets. Currently, each bucket has a size of one. That is, if you trace the amount of requested allocations, each allocation is its own bucket, even if you are interested in what allocates 100 bytes or less, 100 to 200, 200 to 300, etc. Also, without grouping, it fills up the allocated histogram buckets quickly. If you are tracking latency, and don't care if something is 200 microseconds off, or 201 microseconds off, but want to track them by say 10 microseconds each. This can not currently be done. There is a log2 but that grouping get's too big too fast for a lot of cases. Introduce a "buckets=SIZE" command to each field where it will record in a rounded number. For example: ># echo 'hist:keys=bytes_req.buckets=100:sort=bytes_req' > events/kmem/kmalloc/trigger ># cat events/kmem/kmalloc/hist # event histogram # # trigger info: hist:keys=bytes_req.buckets=100:vals=hitcount:sort=bytes_req.buckets=100:size=2048 [active] # { bytes_req: ~ 0-99 } hitcount: 3149 { bytes_req: ~ 100-199 } hitcount: 1468 { bytes_req: ~ 200-299 } hitcount: 39 { bytes_req: ~ 300-399 } hitcount: 306 { bytes_req: ~ 400-499 } hitcount: 364 { bytes_req: ~ 500-599 } hitcount: 32 { bytes_req: ~ 600-699 } hitcount: 69 { bytes_req: ~ 700-799 } hitcount: 37 { bytes_req: ~ 1200-1299 } hitcount: 16 { bytes_req: ~ 1400-1499 } hitcount: 30 { bytes_req: ~ 2000-2099 } hitcount: 6 { bytes_req: ~ 4000-4099 } hitcount: 2168 { bytes_req: ~ 5000-5099 } hitcount: 6 Totals: Hits: 7690 Entries: 13 Dropped: 0 Link: https://lkml.kernel.org/r/20210707213921.980359719@goodmis.org Acked-by: Namhyung Kim Reviewed-by: Tom Zanussi Reviewed-by: Masami Hiramatsu Tested-by: Daniel Bristot de Oliveira Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 65 +++++++++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index a48aa2a2875b..8e87c4a429fd 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -121,6 +121,7 @@ struct hist_field { unsigned int size; unsigned int offset; unsigned int is_signed; + unsigned long buckets; const char *type; struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; struct hist_trigger_data *hist_data; @@ -219,6 +220,27 @@ static u64 hist_field_log2(struct hist_field *hist_field, return (u64) ilog2(roundup_pow_of_two(val)); } +static u64 hist_field_bucket(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_field *operand = hist_field->operands[0]; + unsigned long buckets = hist_field->buckets; + + u64 val = operand->fn(operand, elt, buffer, rbe, event); + + if (WARN_ON_ONCE(!buckets)) + return val; + + if (val >= LONG_MAX) + val = div64_ul(val, buckets); + else + val = (u64)((unsigned long)val / buckets); + return val * buckets; +} + static u64 hist_field_plus(struct hist_field *hist_field, struct tracing_map_elt *elt, struct trace_buffer *buffer, @@ -318,6 +340,7 @@ enum hist_field_flags { HIST_FIELD_FL_VAR_REF = 1 << 14, HIST_FIELD_FL_CPU = 1 << 15, HIST_FIELD_FL_ALIAS = 1 << 16, + HIST_FIELD_FL_BUCKET = 1 << 17, }; struct var_defs { @@ -1109,7 +1132,8 @@ static const char *hist_field_name(struct hist_field *field, if (field->field) field_name = field->field->name; else if (field->flags & HIST_FIELD_FL_LOG2 || - field->flags & HIST_FIELD_FL_ALIAS) + field->flags & HIST_FIELD_FL_ALIAS || + field->flags & HIST_FIELD_FL_BUCKET) field_name = hist_field_name(field->operands[0], ++level); else if (field->flags & HIST_FIELD_FL_CPU) field_name = "common_cpu"; @@ -1470,6 +1494,8 @@ static const char *get_hist_field_flags(struct hist_field *hist_field) flags_str = "syscall"; else if (hist_field->flags & HIST_FIELD_FL_LOG2) flags_str = "log2"; + else if (hist_field->flags & HIST_FIELD_FL_BUCKET) + flags_str = "buckets"; else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) flags_str = "usecs"; @@ -1658,9 +1684,10 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, goto out; } - if (flags & HIST_FIELD_FL_LOG2) { - unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; - hist_field->fn = hist_field_log2; + if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) { + unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET); + hist_field->fn = flags & HIST_FIELD_FL_LOG2 ? hist_field_log2 : + hist_field_bucket; hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); hist_field->size = hist_field->operands[0]->size; hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); @@ -1953,7 +1980,7 @@ static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, static struct ftrace_event_field * parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, - char *field_str, unsigned long *flags) + char *field_str, unsigned long *flags, unsigned long *buckets) { struct ftrace_event_field *field = NULL; char *field_name, *modifier, *str; @@ -1980,7 +2007,22 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, *flags |= HIST_FIELD_FL_LOG2; else if (strcmp(modifier, "usecs") == 0) *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; - else { + else if (strncmp(modifier, "bucket", 6) == 0) { + int ret; + + modifier += 6; + + if (*modifier == 's') + modifier++; + if (*modifier != '=') + goto error; + modifier++; + ret = kstrtoul(modifier, 0, buckets); + if (ret || !(*buckets)) + goto error; + *flags |= HIST_FIELD_FL_BUCKET; + } else { + error: hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); field = ERR_PTR(-EINVAL); goto out; @@ -2049,6 +2091,7 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; struct ftrace_event_field *field = NULL; struct hist_field *hist_field = NULL; + unsigned long buckets = 0; int ret = 0; s = strchr(str, '.'); @@ -2086,7 +2129,7 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, } else str = s; - field = parse_field(hist_data, file, str, flags); + field = parse_field(hist_data, file, str, flags, &buckets); if (IS_ERR(field)) { ret = PTR_ERR(field); goto out; @@ -2097,6 +2140,7 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, ret = -ENOMEM; goto out; } + hist_field->buckets = buckets; return hist_field; out: @@ -4698,6 +4742,11 @@ static void hist_trigger_print_key(struct seq_file *m, } else if (key_field->flags & HIST_FIELD_FL_LOG2) { seq_printf(m, "%s: ~ 2^%-2llu", field_name, *(u64 *)(key + key_field->offset)); + } else if (key_field->flags & HIST_FIELD_FL_BUCKET) { + unsigned long buckets = key_field->buckets; + uval = *(u64 *)(key + key_field->offset); + seq_printf(m, "%s: ~ %llu-%llu", field_name, + uval, uval + buckets -1); } else if (key_field->flags & HIST_FIELD_FL_STRING) { seq_printf(m, "%s: %-50s", field_name, (char *)(key + key_field->offset)); @@ -5137,6 +5186,8 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) seq_printf(m, ".%s", flags); } } + if (hist_field->buckets) + seq_printf(m, "=%ld", hist_field->buckets); } static int event_hist_trigger_print(struct seq_file *m, -- cgit v1.2.3 From 370364351926e4fcc7c1a486901bfaae0172b7d9 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 7 Jul 2021 17:36:25 -0400 Subject: tracing/histogram: Update the documentation for the buckets modifier Update both the tracefs README file as well as the histogram.rst to include an explanation of what the buckets modifier is and how to use it. Include an example with the wakeup_latency example for both log2 and the buckets modifiers as there was no existing log2 example. Link: https://lkml.kernel.org/r/20210707213922.167218794@goodmis.org Acked-by: Namhyung Kim Reviewed-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/histogram.rst | 92 ++++++++++++++++++++++++++++++++++++--- kernel/trace/trace.c | 1 + 2 files changed, 87 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index f99be8062bc8..4e650671f245 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -77,6 +77,7 @@ Documentation written by Tom Zanussi .syscall display a syscall id as a system call name .execname display a common_pid as a program name .log2 display log2 value rather than raw number + .buckets=size display grouping of values rather than raw number .usecs display a common_timestamp in microseconds =========== ========================================== @@ -228,7 +229,7 @@ Extended error information that lists the total number of bytes requested for each function in the kernel that made one or more calls to kmalloc:: - # echo 'hist:key=call_site:val=bytes_req' > \ + # echo 'hist:key=call_site:val=bytes_req.buckets=32' > \ /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger This tells the tracing system to create a 'hist' trigger using the @@ -1823,20 +1824,99 @@ and variables defined on other events (see Section 2.2.3 below on how that is done using hist trigger 'onmatch' action). Once that is done, the 'wakeup_latency' synthetic event instance is created. -A histogram can now be defined for the new synthetic event:: - - # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \ - /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger - The new event is created under the tracing/events/synthetic/ directory and looks and behaves just like any other event:: # ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency enable filter format hist id trigger +A histogram can now be defined for the new synthetic event:: + + # echo 'hist:keys=pid,prio,lat.log2:sort=lat' >> \ + /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger + +The above shows the latency "lat" in a power of 2 grouping. + Like any other event, once a histogram is enabled for the event, the output can be displayed by reading the event's 'hist' file. + # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist + + # event histogram + # + # trigger info: hist:keys=pid,prio,lat.log2:vals=hitcount:sort=lat.log2:size=2048 [active] + # + + { pid: 2035, prio: 9, lat: ~ 2^2 } hitcount: 43 + { pid: 2034, prio: 9, lat: ~ 2^2 } hitcount: 60 + { pid: 2029, prio: 9, lat: ~ 2^2 } hitcount: 965 + { pid: 2034, prio: 120, lat: ~ 2^2 } hitcount: 9 + { pid: 2033, prio: 120, lat: ~ 2^2 } hitcount: 5 + { pid: 2030, prio: 9, lat: ~ 2^2 } hitcount: 335 + { pid: 2030, prio: 120, lat: ~ 2^2 } hitcount: 10 + { pid: 2032, prio: 120, lat: ~ 2^2 } hitcount: 1 + { pid: 2035, prio: 120, lat: ~ 2^2 } hitcount: 2 + { pid: 2031, prio: 9, lat: ~ 2^2 } hitcount: 176 + { pid: 2028, prio: 120, lat: ~ 2^2 } hitcount: 15 + { pid: 2033, prio: 9, lat: ~ 2^2 } hitcount: 91 + { pid: 2032, prio: 9, lat: ~ 2^2 } hitcount: 125 + { pid: 2029, prio: 120, lat: ~ 2^2 } hitcount: 4 + { pid: 2031, prio: 120, lat: ~ 2^2 } hitcount: 3 + { pid: 2029, prio: 120, lat: ~ 2^3 } hitcount: 2 + { pid: 2035, prio: 9, lat: ~ 2^3 } hitcount: 41 + { pid: 2030, prio: 120, lat: ~ 2^3 } hitcount: 1 + { pid: 2032, prio: 9, lat: ~ 2^3 } hitcount: 32 + { pid: 2031, prio: 9, lat: ~ 2^3 } hitcount: 44 + { pid: 2034, prio: 9, lat: ~ 2^3 } hitcount: 40 + { pid: 2030, prio: 9, lat: ~ 2^3 } hitcount: 29 + { pid: 2033, prio: 9, lat: ~ 2^3 } hitcount: 31 + { pid: 2029, prio: 9, lat: ~ 2^3 } hitcount: 31 + { pid: 2028, prio: 120, lat: ~ 2^3 } hitcount: 18 + { pid: 2031, prio: 120, lat: ~ 2^3 } hitcount: 2 + { pid: 2028, prio: 120, lat: ~ 2^4 } hitcount: 1 + { pid: 2029, prio: 9, lat: ~ 2^4 } hitcount: 4 + { pid: 2031, prio: 120, lat: ~ 2^7 } hitcount: 1 + { pid: 2032, prio: 120, lat: ~ 2^7 } hitcount: 1 + + Totals: + Hits: 2122 + Entries: 30 + Dropped: 0 + + +The latency values can also be grouped linearly by a given size with +the ".buckets" modifier and specify a size (in this case groups of 10). + + # echo 'hist:keys=pid,prio,lat.buckets=10:sort=lat' >> \ + /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger + + # event histogram + # + # trigger info: hist:keys=pid,prio,lat.buckets=10:vals=hitcount:sort=lat.buckets=10:size=2048 [active] + # + + { pid: 2067, prio: 9, lat: ~ 0-9 } hitcount: 220 + { pid: 2068, prio: 9, lat: ~ 0-9 } hitcount: 157 + { pid: 2070, prio: 9, lat: ~ 0-9 } hitcount: 100 + { pid: 2067, prio: 120, lat: ~ 0-9 } hitcount: 6 + { pid: 2065, prio: 120, lat: ~ 0-9 } hitcount: 2 + { pid: 2066, prio: 120, lat: ~ 0-9 } hitcount: 2 + { pid: 2069, prio: 9, lat: ~ 0-9 } hitcount: 122 + { pid: 2069, prio: 120, lat: ~ 0-9 } hitcount: 8 + { pid: 2070, prio: 120, lat: ~ 0-9 } hitcount: 1 + { pid: 2068, prio: 120, lat: ~ 0-9 } hitcount: 7 + { pid: 2066, prio: 9, lat: ~ 0-9 } hitcount: 365 + { pid: 2064, prio: 120, lat: ~ 0-9 } hitcount: 35 + { pid: 2065, prio: 9, lat: ~ 0-9 } hitcount: 998 + { pid: 2071, prio: 9, lat: ~ 0-9 } hitcount: 85 + { pid: 2065, prio: 9, lat: ~ 10-19 } hitcount: 2 + { pid: 2064, prio: 120, lat: ~ 10-19 } hitcount: 2 + + Totals: + Hits: 2112 + Entries: 16 + Dropped: 0 + 2.2.3 Hist trigger 'handlers' and 'actions' ------------------------------------------- diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a1adb29ef5c1..be0169594de5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5654,6 +5654,7 @@ static const char readme_msg[] = "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" + "\t .buckets=size display values in groups of size rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" -- cgit v1.2.3 From 3347d80baa41c357cf263923f60aa8051a753d76 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 22 Jul 2021 10:27:06 -0400 Subject: tracing: Have histogram types be constant when possible Instead of kstrdup("const", GFP_KERNEL), have the hist_field type simply assign the constant hist_field->type = "const"; And when the value passed to it is a variable, use "kstrdup_const(var, GFP_KERNEL);" which will just copy the value if the variable is already a constant. This saves on having to allocate when not needed. All frees of the hist_field->type will need to use kfree_const(). Link: https://lkml.kernel.org/r/20210722142837.280718447@goodmis.org Suggested-by: Masami Hiramatsu Reviewed-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 8e87c4a429fd..bb466a82b938 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1616,7 +1616,9 @@ static void __destroy_hist_field(struct hist_field *hist_field) kfree(hist_field->var.name); kfree(hist_field->name); - kfree(hist_field->type); + + /* Can likely be a const */ + kfree_const(hist_field->type); kfree(hist_field->system); kfree(hist_field->event_name); @@ -1673,9 +1675,7 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (flags & HIST_FIELD_FL_HITCOUNT) { hist_field->fn = hist_field_counter; hist_field->size = sizeof(u64); - hist_field->type = kstrdup("u64", GFP_KERNEL); - if (!hist_field->type) - goto free; + hist_field->type = "u64"; goto out; } @@ -1690,7 +1690,7 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, hist_field_bucket; hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); hist_field->size = hist_field->operands[0]->size; - hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); + hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL); if (!hist_field->type) goto free; goto out; @@ -1699,18 +1699,14 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (flags & HIST_FIELD_FL_TIMESTAMP) { hist_field->fn = hist_field_timestamp; hist_field->size = sizeof(u64); - hist_field->type = kstrdup("u64", GFP_KERNEL); - if (!hist_field->type) - goto free; + hist_field->type = "u64"; goto out; } if (flags & HIST_FIELD_FL_CPU) { hist_field->fn = hist_field_cpu; hist_field->size = sizeof(int); - hist_field->type = kstrdup("unsigned int", GFP_KERNEL); - if (!hist_field->type) - goto free; + hist_field->type = "unsigned int"; goto out; } @@ -1723,7 +1719,7 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, flags |= HIST_FIELD_FL_STRING; hist_field->size = MAX_FILTER_STR_VAL; - hist_field->type = kstrdup(field->type, GFP_KERNEL); + hist_field->type = kstrdup_const(field->type, GFP_KERNEL); if (!hist_field->type) goto free; @@ -1736,7 +1732,7 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, } else { hist_field->size = field->size; hist_field->is_signed = field->is_signed; - hist_field->type = kstrdup(field->type, GFP_KERNEL); + hist_field->type = kstrdup_const(field->type, GFP_KERNEL); if (!hist_field->type) goto free; @@ -1822,7 +1818,7 @@ static int init_var_ref(struct hist_field *ref_field, } } - ref_field->type = kstrdup(var_field->type, GFP_KERNEL); + ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL); if (!ref_field->type) { err = -ENOMEM; goto free; @@ -2215,7 +2211,7 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, expr->operands[0] = operand1; expr->operator = FIELD_OP_UNARY_MINUS; expr->name = expr_str(expr, 0); - expr->type = kstrdup(operand1->type, GFP_KERNEL); + expr->type = kstrdup_const(operand1->type, GFP_KERNEL); if (!expr->type) { ret = -ENOMEM; goto free; @@ -2355,7 +2351,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, expr->operator = field_op; expr->name = expr_str(expr, 0); - expr->type = kstrdup(operand1->type, GFP_KERNEL); + expr->type = kstrdup_const(operand1->type, GFP_KERNEL); if (!expr->type) { ret = -ENOMEM; goto free; @@ -2743,10 +2739,10 @@ static struct hist_field *create_var(struct hist_trigger_data *hist_data, var->var.hist_data = var->hist_data = hist_data; var->size = size; var->var.name = kstrdup(name, GFP_KERNEL); - var->type = kstrdup(type, GFP_KERNEL); + var->type = kstrdup_const(type, GFP_KERNEL); if (!var->var.name || !var->type) { + kfree_const(var->type); kfree(var->var.name); - kfree(var->type); kfree(var); var = ERR_PTR(-ENOMEM); } -- cgit v1.2.3 From ed2cf90735daf40ab8d938b4b6d3ca43c0f84466 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 22 Jul 2021 10:27:07 -0400 Subject: tracing: Allow execnames to be passed as args for synthetic events Allow common_pid.execname to be saved in a variable in one histogram to be passed to another histogram that can pass it as a parameter to a synthetic event. ># echo 'hist:keys=pid:__arg__1=common_timestamp.usecs:arg2=common_pid.execname' \ > events/sched/sched_waking/trigger ># echo 'wakeup_lat s32 pid; u64 delta; char wake_comm[]' > synthetic_events ># echo 'hist:keys=next_pid:pid=next_pid,delta=common_timestamp.usecs-$__arg__1,exec=$arg2'\ ':onmatch(sched.sched_waking).trace(wakeup_lat,$pid,$delta,$exec)' \ > events/sched/sched_switch/trigger The above is a wake up latency synthetic event setup that passes the execname of the common_pid that woke the task to the scheduling of that task, which triggers a synthetic event that passes the original execname as a parameter to display it. ># echo 1 > events/synthetic/enable ># cat trace -0 [006] d..4 186.863801: wakeup_lat: pid=1306 delta=65 wake_comm=kworker/u16:3 -0 [000] d..4 186.863858: wakeup_lat: pid=163 delta=27 wake_comm= -0 [001] d..4 186.863903: wakeup_lat: pid=1307 delta=36 wake_comm=kworker/u16:4 -0 [000] d..4 186.863927: wakeup_lat: pid=163 delta=5 wake_comm= -0 [006] d..4 186.863957: wakeup_lat: pid=1306 delta=24 wake_comm=kworker/u16:3 sshd-1306 [006] d..4 186.864051: wakeup_lat: pid=61 delta=62 wake_comm= -0 [000] d..4 186.965030: wakeup_lat: pid=609 delta=18 wake_comm= -0 [006] d..4 186.987582: wakeup_lat: pid=1306 delta=65 wake_comm=kworker/u16:3 -0 [000] d..4 186.987639: wakeup_lat: pid=163 delta=27 wake_comm= Link: https://lkml.kernel.org/r/20210722142837.458596338@goodmis.org Reviewed-by: Tom Zanussi Reviewed-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 46 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index bb466a82b938..9d91b1c06957 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1420,17 +1420,17 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) struct hist_trigger_data *hist_data = elt->map->private_data; unsigned int size = TASK_COMM_LEN; struct hist_elt_data *elt_data; - struct hist_field *key_field; + struct hist_field *hist_field; unsigned int i, n_str; elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); if (!elt_data) return -ENOMEM; - for_each_hist_key_field(i, hist_data) { - key_field = hist_data->fields[i]; + for_each_hist_field(i, hist_data) { + hist_field = hist_data->fields[i]; - if (key_field->flags & HIST_FIELD_FL_EXECNAME) { + if (hist_field->flags & HIST_FIELD_FL_EXECNAME) { elt_data->comm = kzalloc(size, GFP_KERNEL); if (!elt_data->comm) { kfree(elt_data); @@ -3771,6 +3771,41 @@ static int create_val_field(struct hist_trigger_data *hist_data, return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); } +static const char *no_comm = "(no comm)"; + +static u64 hist_field_execname(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_elt_data *elt_data; + + if (WARN_ON_ONCE(!elt)) + return (u64)(unsigned long)no_comm; + + elt_data = elt->private_data; + + if (WARN_ON_ONCE(!elt_data->comm)) + return (u64)(unsigned long)no_comm; + + return (u64)(unsigned long)(elt_data->comm); +} + +/* Convert a var that points to common_pid.execname to a string */ +static void update_var_execname(struct hist_field *hist_field) +{ + hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR | + HIST_FIELD_FL_EXECNAME; + hist_field->size = MAX_FILTER_STR_VAL; + hist_field->is_signed = 0; + + kfree_const(hist_field->type); + hist_field->type = "char[]"; + + hist_field->fn = hist_field_execname; +} + static int create_var_field(struct hist_trigger_data *hist_data, unsigned int val_idx, struct trace_event_file *file, @@ -3795,6 +3830,9 @@ static int create_var_field(struct hist_trigger_data *hist_data, ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); + if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME) + update_var_execname(hist_data->fields[val_idx]); + if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING) hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++; -- cgit v1.2.3 From de32951b29be3d6cc7a92bfbf366f48a9f4c4407 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 31 Jul 2021 14:22:31 +0900 Subject: tracing: Simplify the Kconfig dependency of FTRACE The entire FTRACE block is surrounded by 'if TRACING_SUPPORT' ... 'endif'. Using 'depends on' is a simpler way to guard FTRACE. Link: https://lkml.kernel.org/r/20210731052233.4703-1-masahiroy@kernel.org Signed-off-by: Masahiro Yamada Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 3ee23f4d437f..420ff4bc67fd 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -135,10 +135,9 @@ config TRACING_SUPPORT depends on STACKTRACE_SUPPORT default y -if TRACING_SUPPORT - menuconfig FTRACE bool "Tracers" + depends on TRACING_SUPPORT default y if DEBUG_KERNEL help Enable the kernel tracing infrastructure. @@ -1037,6 +1036,3 @@ config HIST_TRIGGERS_DEBUG If unsure, say N. endif # FTRACE - -endif # TRACING_SUPPORT - -- cgit v1.2.3 From e66ed86ca6c52488249e95f7b3a6a3d7d6ab5e1e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 10 Aug 2021 11:07:21 +0900 Subject: tracing/boot: Add per-event histogram action options Add a hist-trigger action syntax support to boot-time tracing. Currently, boot-time tracing supports per-event actions as option strings. However, for the histogram action, it has a special syntax and usually needs a long action definition. To make it readable and fit to the bootconfig syntax, this introduces a new options for histogram. Here are the histogram action options for boot-time tracing. ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist { keys = [,...] values = [,...] sort = [,...] size = name = var { = ... } pause|continue|clear onmax|onchange { var = ; [= ] } onmatch { event = ; [= ] } filter = } Where is one of below; trace = , [, ...] save = [, ...] snapshot Link: https://lkml.kernel.org/r/162856124106.203126.10501871028479029087.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 231 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index d713714cba67..3d0e51368f51 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -171,6 +171,231 @@ trace_boot_add_synth_event(struct xbc_node *node, const char *event) } #endif +#ifdef CONFIG_HIST_TRIGGERS +static int __init __printf(3, 4) +append_printf(char **bufp, char *end, const char *fmt, ...) +{ + va_list args; + int ret; + + if (*bufp == end) + return -ENOSPC; + + va_start(args, fmt); + ret = vsnprintf(*bufp, end - *bufp, fmt, args); + if (ret < end - *bufp) { + *bufp += ret; + } else { + *bufp = end; + ret = -ERANGE; + } + va_end(args); + + return ret; +} + +static int __init +append_str_nospace(char **bufp, char *end, const char *str) +{ + char *p = *bufp; + int len; + + while (p < end - 1 && *str != '\0') { + if (!isspace(*str)) + *(p++) = *str; + str++; + } + *p = '\0'; + if (p == end - 1) { + *bufp = end; + return -ENOSPC; + } + len = p - *bufp; + *bufp = p; + return (int)len; +} + +static int __init +trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp, + char *end, const char *key) +{ + struct xbc_node *knode, *anode; + const char *p; + char sep; + + knode = xbc_node_find_child(hnode, key); + if (knode) { + anode = xbc_node_get_child(knode); + if (!anode) { + pr_err("hist.%s requires value(s).\n", key); + return -EINVAL; + } + + append_printf(bufp, end, ":%s", key); + sep = '='; + xbc_array_for_each_value(anode, p) { + append_printf(bufp, end, "%c%s", sep, p); + if (sep == '=') + sep = ','; + } + } else + return -ENOENT; + + return 0; +} + +static int __init +trace_boot_hist_add_handler(struct xbc_node *hnode, char **bufp, + char *end, const char *param) +{ + struct xbc_node *knode, *anode; + const char *p; + char sep; + + /* Compose 'handler' parameter */ + p = xbc_node_find_value(hnode, param, NULL); + if (!p) { + pr_err("hist.%s requires '%s' option.\n", + xbc_node_get_data(hnode), param); + return -EINVAL; + } + append_printf(bufp, end, ":%s(%s)", xbc_node_get_data(hnode), p); + + /* Compose 'action' parameter */ + knode = xbc_node_find_child(hnode, "trace"); + if (!knode) + knode = xbc_node_find_child(hnode, "save"); + + if (knode) { + anode = xbc_node_get_child(knode); + if (!anode || !xbc_node_is_value(anode)) { + pr_err("hist.%s.%s requires value(s).\n", + xbc_node_get_data(hnode), + xbc_node_get_data(knode)); + return -EINVAL; + } + + append_printf(bufp, end, ".%s", xbc_node_get_data(knode)); + sep = '('; + xbc_array_for_each_value(anode, p) { + append_printf(bufp, end, "%c%s", sep, p); + if (sep == '(') + sep = ','; + } + append_printf(bufp, end, ")"); + } else if (xbc_node_find_child(hnode, "snapshot")) { + append_printf(bufp, end, ".snapshot()"); + } else { + pr_err("hist.%s requires an action.\n", + xbc_node_get_data(hnode)); + return -EINVAL; + } + + return 0; +} + +/* + * Histogram boottime tracing syntax. + * + * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist { + * keys = [,...] + * values = [,...] + * sort = [,...] + * size = + * name = + * var { = ... } + * pause|continue|clear + * onmax|onchange { var = ; [= ] } + * onmatch { event = ; [= ] } + * filter = + * } + * + * Where are; + * + * trace = , [, ...] + * save = [, ...] + * snapshot + */ +static int __init +trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) +{ + struct xbc_node *node, *knode; + char *end = buf + size; + const char *p; + int ret = 0; + + append_printf(&buf, end, "hist"); + + ret = trace_boot_hist_add_array(hnode, &buf, end, "keys"); + if (ret < 0) { + if (ret == -ENOENT) + pr_err("hist requires keys.\n"); + return -EINVAL; + } + + ret = trace_boot_hist_add_array(hnode, &buf, end, "values"); + if (ret == -EINVAL) + return ret; + ret = trace_boot_hist_add_array(hnode, &buf, end, "sort"); + if (ret == -EINVAL) + return ret; + + p = xbc_node_find_value(hnode, "size", NULL); + if (p) + append_printf(&buf, end, ":size=%s", p); + + p = xbc_node_find_value(hnode, "name", NULL); + if (p) + append_printf(&buf, end, ":name=%s", p); + + node = xbc_node_find_child(hnode, "var"); + if (node) { + xbc_node_for_each_key_value(node, knode, p) { + /* Expression must not include spaces. */ + append_printf(&buf, end, ":%s=", + xbc_node_get_data(knode)); + append_str_nospace(&buf, end, p); + } + } + + /* Histogram control attributes (mutual exclusive) */ + if (xbc_node_find_child(hnode, "pause")) + append_printf(&buf, end, ":pause"); + else if (xbc_node_find_child(hnode, "continue")) + append_printf(&buf, end, ":continue"); + else if (xbc_node_find_child(hnode, "clear")) + append_printf(&buf, end, ":clear"); + + /* Histogram handler and actions */ + node = xbc_node_find_child(hnode, "onmax"); + if (node && trace_boot_hist_add_handler(node, &buf, end, "var") < 0) + return -EINVAL; + node = xbc_node_find_child(hnode, "onchange"); + if (node && trace_boot_hist_add_handler(node, &buf, end, "var") < 0) + return -EINVAL; + node = xbc_node_find_child(hnode, "onmatch"); + if (node && trace_boot_hist_add_handler(node, &buf, end, "event") < 0) + return -EINVAL; + + p = xbc_node_find_value(hnode, "filter", NULL); + if (p) + append_printf(&buf, end, " if %s", p); + + if (buf == end) { + pr_err("hist exceeds the max command length.\n"); + return -E2BIG; + } + + return 0; +} +#else +static int __init +trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) +{ + return -EOPNOTSUPP; +} +#endif + static void __init trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, struct xbc_node *enode) @@ -212,6 +437,12 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, else if (trigger_process_regex(file, buf) < 0) pr_err("Failed to apply an action: %s\n", buf); } + anode = xbc_node_find_child(enode, "hist"); + if (anode && + trace_boot_compose_hist_cmd(anode, buf, ARRAY_SIZE(buf)) == 0) { + if (trigger_process_regex(file, buf) < 0) + pr_err("Failed to apply hist trigger: %s\n", buf); + } } else if (xbc_node_find_value(enode, "actions", NULL)) pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n"); -- cgit v1.2.3 From 8993665abcce793f00815b3504a486dce70cc2b9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 10 Aug 2021 11:07:29 +0900 Subject: tracing/boot: Support multiple handlers for per-event histogram Support multiple handlers for per-event histogram in boot-time tracing. Since the histogram can register multiple same handler-actions with different parameters, this expands the syntax to support such cases. With this update, the 'onmax', 'onchange' and 'onmatch' handler subkeys under per-event histogram option will take a number subkeys optionally as below. (see [.N]) ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist { onmax|onchange[.N] { var = ; [= ] } onmatch[.N] { event = ; [= ] } } The 'N' must be a digit (or digit started word). Thus user can add several handler-actions to the histogram, for example, ftrace.event.SOMEGROUP.SOMEEVENT.hist { keys = SOME_ID; lat = common_timestamp.usecs-$ts0 onmatch.1 { event = GROUP1.STARTEVENT1 trace = latency_event, SOME_ID, $lat } onmatch.2 { event = GROUP2.STARTEVENT2 trace = latency_event, SOME_ID, $lat } } Then, it can trace the elapsed time from GROUP1.STARTEVENT1 to SOMEGROUP.SOMEEVENT, and from GROUP2.STARTEVENT2 to SOMEGROUP.SOMEEVENT with SOME_ID key. Link: https://lkml.kernel.org/r/162856124905.203126.14913731908137885922.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 3d0e51368f51..f024f27b3602 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -245,8 +245,9 @@ trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp, } static int __init -trace_boot_hist_add_handler(struct xbc_node *hnode, char **bufp, - char *end, const char *param) +trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, + char *end, const char *handler, + const char *param) { struct xbc_node *knode, *anode; const char *p; @@ -259,7 +260,7 @@ trace_boot_hist_add_handler(struct xbc_node *hnode, char **bufp, xbc_node_get_data(hnode), param); return -EINVAL; } - append_printf(bufp, end, ":%s(%s)", xbc_node_get_data(hnode), p); + append_printf(bufp, end, ":%s(%s)", handler, p); /* Compose 'action' parameter */ knode = xbc_node_find_child(hnode, "trace"); @@ -294,6 +295,32 @@ trace_boot_hist_add_handler(struct xbc_node *hnode, char **bufp, return 0; } +static int __init +trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp, + char *end, const char *param) +{ + struct xbc_node *node; + const char *p, *handler; + int ret; + + handler = xbc_node_get_data(hnode); + + xbc_node_for_each_subkey(hnode, node) { + p = xbc_node_get_data(node); + if (!isdigit(p[0])) + continue; + /* All digit started node should be instances. */ + ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param); + if (ret < 0) + break; + } + + if (xbc_node_find_child(hnode, param)) + ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param); + + return ret; +} + /* * Histogram boottime tracing syntax. * @@ -305,8 +332,8 @@ trace_boot_hist_add_handler(struct xbc_node *hnode, char **bufp, * name = * var { = ... } * pause|continue|clear - * onmax|onchange { var = ; [= ] } - * onmatch { event = ; [= ] } + * onmax|onchange[.N] { var = ; [= ] } + * onmatch[.N] { event = ; [= ] } * filter = * } * @@ -368,13 +395,13 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) /* Histogram handler and actions */ node = xbc_node_find_child(hnode, "onmax"); - if (node && trace_boot_hist_add_handler(node, &buf, end, "var") < 0) + if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) return -EINVAL; node = xbc_node_find_child(hnode, "onchange"); - if (node && trace_boot_hist_add_handler(node, &buf, end, "var") < 0) + if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) return -EINVAL; node = xbc_node_find_child(hnode, "onmatch"); - if (node && trace_boot_hist_add_handler(node, &buf, end, "event") < 0) + if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0) return -EINVAL; p = xbc_node_find_value(hnode, "filter", NULL); -- cgit v1.2.3 From 17abd7c36c77c393fa65cde462059395d6437dba Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 10 Aug 2021 11:07:36 +0900 Subject: tracing/boot: Support multiple histograms for each event Add multiple histograms support for each event. This allows user to set multiple histograms to an event. ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] { ... } The 'N' is a digit started string and it can be omitted for the default histogram. For example, multiple hist triggers example in the Documentation/trace/histogram.rst can be written as below; ftrace.event.net.netif_receive_skb.hist { 1 { keys = skbaddr.hex values = len filter = len < 0 } 2 { keys = skbaddr.hex values = len filter = len > 4096 } 3 { keys = skbaddr.hex values = len filter = len == 256 } 4 { keys = skbaddr.hex values = len } 5 { keys = len values = common_preempt_count } } Link: https://lkml.kernel.org/r/162856125628.203126.15846930277378572120.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index f024f27b3602..1a2b270e9cda 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -324,7 +324,7 @@ trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp, /* * Histogram boottime tracing syntax. * - * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist { + * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] { * keys = [,...] * values = [,...] * sort = [,...] @@ -415,11 +415,37 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) return 0; } + +static void __init +trace_boot_init_histograms(struct trace_event_file *file, + struct xbc_node *hnode, char *buf, size_t size) +{ + struct xbc_node *node; + const char *p; + + xbc_node_for_each_subkey(hnode, node) { + p = xbc_node_get_data(node); + if (!isdigit(p[0])) + continue; + /* All digit started node should be instances. */ + if (trace_boot_compose_hist_cmd(node, buf, size) == 0) { + if (trigger_process_regex(file, buf) < 0) + pr_err("Failed to apply hist trigger: %s\n", buf); + } + } + + if (xbc_node_find_child(hnode, "keys")) { + if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) + if (trigger_process_regex(file, buf) < 0) + pr_err("Failed to apply hist trigger: %s\n", buf); + } +} #else -static int __init -trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) +static void __init +trace_boot_init_histograms(struct trace_event_file *file, + struct xbc_node *hnode, char *buf, size_t size) { - return -EOPNOTSUPP; + /* do nothing */ } #endif @@ -465,11 +491,8 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, pr_err("Failed to apply an action: %s\n", buf); } anode = xbc_node_find_child(enode, "hist"); - if (anode && - trace_boot_compose_hist_cmd(anode, buf, ARRAY_SIZE(buf)) == 0) { - if (trigger_process_regex(file, buf) < 0) - pr_err("Failed to apply hist trigger: %s\n", buf); - } + if (anode) + trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf)); } else if (xbc_node_find_value(enode, "actions", NULL)) pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n"); -- cgit v1.2.3 From 64dc7f6958ef56512137ed6ec228127cef7724e9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 10 Aug 2021 11:07:44 +0900 Subject: tracing/boot: Show correct histogram error command Since trigger_process_regex() modifies given trigger actions while parsing, the error message couldn't show what command was passed to the trigger_process_regex() when it returns an error. To fix that, show the backed up trigger action command instead of parsed buffer. Link: https://lkml.kernel.org/r/162856126413.203126.9465564928450701424.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 1a2b270e9cda..1060b0446032 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -422,6 +422,7 @@ trace_boot_init_histograms(struct trace_event_file *file, { struct xbc_node *node; const char *p; + char *tmp; xbc_node_for_each_subkey(hnode, node) { p = xbc_node_get_data(node); @@ -429,15 +430,20 @@ trace_boot_init_histograms(struct trace_event_file *file, continue; /* All digit started node should be instances. */ if (trace_boot_compose_hist_cmd(node, buf, size) == 0) { + tmp = kstrdup(buf, GFP_KERNEL); if (trigger_process_regex(file, buf) < 0) - pr_err("Failed to apply hist trigger: %s\n", buf); + pr_err("Failed to apply hist trigger: %s\n", tmp); + kfree(tmp); } } if (xbc_node_find_child(hnode, "keys")) { - if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) + if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) { + tmp = kstrdup(buf, GFP_KERNEL); if (trigger_process_regex(file, buf) < 0) - pr_err("Failed to apply hist trigger: %s\n", buf); + pr_err("Failed to apply hist trigger: %s\n", tmp); + kfree(tmp); + } } } #else @@ -488,7 +494,7 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) pr_err("action string is too long: %s\n", p); else if (trigger_process_regex(file, buf) < 0) - pr_err("Failed to apply an action: %s\n", buf); + pr_err("Failed to apply an action: %s\n", p); } anode = xbc_node_find_child(enode, "hist"); if (anode) -- cgit v1.2.3 From bd74095389b378f2292ae32397b23f4b97ff23c8 Mon Sep 17 00:00:00 2001 From: zhaoxiao Date: Mon, 16 Aug 2021 13:24:30 +0800 Subject: tracepoint: Fix kerneldoc comments Fix function name in tracepoint.c kernel-doc comment to remove a warning found by clang_w1. kernel/tracepoint.c:589: warning: expecting prototype for register_tracepoint_notifier(). Prototype was for register_tracepoint_module_notifier() instead kernel/tracepoint.c:613: warning: expecting prototype for unregister_tracepoint_notifier(). Prototype was for unregister_tracepoint_module_notifier() instead Link: https://lkml.kernel.org/r/20210816052430.16539-1-zhaoxiao@uniontech.com Acked-by: Mathieu Desnoyers Signed-off-by: zhaoxiao Signed-off-by: Steven Rostedt (VMware) --- kernel/tracepoint.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index efd14c79fab4..64ea283f2f86 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -577,7 +577,7 @@ bool trace_module_has_bad_taint(struct module *mod) static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); /** - * register_tracepoint_notifier - register tracepoint coming/going notifier + * register_tracepoint_module_notifier - register tracepoint coming/going notifier * @nb: notifier block * * Notifiers registered with this function are called on module @@ -603,7 +603,7 @@ end: EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); /** - * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier + * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier * @nb: notifier block * * The notifier block callback should expect a "struct tp_module" data -- cgit v1.2.3 From 99c37d1a63eafcd3673302a7953df760b46d0f6f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Aug 2021 16:16:19 +0200 Subject: tracing: Replace deprecated CPU-hotplug functions. The functions get_online_cpus() and put_online_cpus() have been deprecated during the CPU hotplug rework. They map directly to cpus_read_lock() and cpus_read_unlock(). Replace deprecated CPU-hotplug functions with the official version. The behavior remains unchanged. Link: https://lkml.kernel.org/r/20210803141621.780504-37-bigeasy@linutronix.de Cc: Peter Zijlstra Cc: Ingo Molnar Acked-by: Daniel Bristot de Oliveira Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 8 ++++---- kernel/trace/trace_hwlat.c | 28 ++++++++++++++-------------- kernel/trace/trace_osnoise.c | 16 ++++++++-------- 3 files changed, 26 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e592d1df6f88..c5a3fbf19617 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2111,7 +2111,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } } - get_online_cpus(); + cpus_read_lock(); /* * Fire off all the required work handlers * We can't schedule on offline CPUs, but it's not necessary @@ -2143,7 +2143,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, cpu_buffer->nr_pages_to_update = 0; } - put_online_cpus(); + cpus_read_unlock(); } else { cpu_buffer = buffer->buffers[cpu_id]; @@ -2171,7 +2171,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, goto out_err; } - get_online_cpus(); + cpus_read_lock(); /* Can't run something on an offline CPU. */ if (!cpu_online(cpu_id)) @@ -2183,7 +2183,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } cpu_buffer->nr_pages_to_update = 0; - put_online_cpus(); + cpus_read_unlock(); } out: diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 14f46aae1981..1b83d75eb103 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -325,10 +325,10 @@ static void move_to_next_cpu(void) if (!cpumask_equal(current_mask, current->cpus_ptr)) goto change_mode; - get_online_cpus(); + cpus_read_lock(); cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); next_cpu = cpumask_next(raw_smp_processor_id(), current_mask); - put_online_cpus(); + cpus_read_unlock(); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(current_mask); @@ -398,7 +398,7 @@ static void stop_single_kthread(void) struct hwlat_kthread_data *kdata = get_cpu_data(); struct task_struct *kthread; - get_online_cpus(); + cpus_read_lock(); kthread = kdata->kthread; if (!kthread) @@ -408,7 +408,7 @@ static void stop_single_kthread(void) kdata->kthread = NULL; out_put_cpus: - put_online_cpus(); + cpus_read_unlock(); } @@ -425,14 +425,14 @@ static int start_single_kthread(struct trace_array *tr) struct task_struct *kthread; int next_cpu; - get_online_cpus(); + cpus_read_lock(); if (kdata->kthread) goto out_put_cpus; kthread = kthread_create(kthread_fn, NULL, "hwlatd"); if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); - put_online_cpus(); + cpus_read_unlock(); return -ENOMEM; } @@ -452,7 +452,7 @@ static int start_single_kthread(struct trace_array *tr) wake_up_process(kthread); out_put_cpus: - put_online_cpus(); + cpus_read_unlock(); return 0; } @@ -479,10 +479,10 @@ static void stop_per_cpu_kthreads(void) { unsigned int cpu; - get_online_cpus(); + cpus_read_lock(); for_each_online_cpu(cpu) stop_cpu_kthread(cpu); - put_online_cpus(); + cpus_read_unlock(); } /* @@ -515,7 +515,7 @@ static void hwlat_hotplug_workfn(struct work_struct *dummy) mutex_lock(&trace_types_lock); mutex_lock(&hwlat_data.lock); - get_online_cpus(); + cpus_read_lock(); if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU) goto out_unlock; @@ -526,7 +526,7 @@ static void hwlat_hotplug_workfn(struct work_struct *dummy) start_cpu_kthread(cpu); out_unlock: - put_online_cpus(); + cpus_read_unlock(); mutex_unlock(&hwlat_data.lock); mutex_unlock(&trace_types_lock); } @@ -582,7 +582,7 @@ static int start_per_cpu_kthreads(struct trace_array *tr) unsigned int cpu; int retval; - get_online_cpus(); + cpus_read_lock(); /* * Run only on CPUs in which hwlat is allowed to run. */ @@ -596,12 +596,12 @@ static int start_per_cpu_kthreads(struct trace_array *tr) if (retval) goto out_error; } - put_online_cpus(); + cpus_read_unlock(); return 0; out_error: - put_online_cpus(); + cpus_read_unlock(); stop_per_cpu_kthreads(); return retval; } diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index b61eefe5ccf5..65b08b8e5bf8 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1498,12 +1498,12 @@ static void stop_per_cpu_kthreads(void) { int cpu; - get_online_cpus(); + cpus_read_lock(); for_each_online_cpu(cpu) stop_kthread(cpu); - put_online_cpus(); + cpus_read_unlock(); } /* @@ -1551,7 +1551,7 @@ static int start_per_cpu_kthreads(struct trace_array *tr) int retval; int cpu; - get_online_cpus(); + cpus_read_lock(); /* * Run only on CPUs in which trace and osnoise are allowed to run. */ @@ -1572,7 +1572,7 @@ static int start_per_cpu_kthreads(struct trace_array *tr) } } - put_online_cpus(); + cpus_read_unlock(); return 0; } @@ -1590,7 +1590,7 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy) goto out_unlock_trace; mutex_lock(&interface_lock); - get_online_cpus(); + cpus_read_lock(); if (!cpumask_test_cpu(cpu, &osnoise_cpumask)) goto out_unlock; @@ -1601,7 +1601,7 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy) start_kthread(cpu); out_unlock: - put_online_cpus(); + cpus_read_unlock(); mutex_unlock(&interface_lock); out_unlock_trace: mutex_unlock(&trace_types_lock); @@ -1743,11 +1743,11 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count, /* * osnoise_cpumask is read by CPU hotplug operations. */ - get_online_cpus(); + cpus_read_lock(); cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new); - put_online_cpus(); + cpus_read_unlock(); mutex_unlock(&interface_lock); if (running) -- cgit v1.2.3 From faf4ef823ac5f3b6a34a73b76c52895dee3dce55 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 23 Jun 2021 14:21:16 +0200 Subject: dma-direct: add support for dma_coherent_default_memory Add an option to allocate uncached memory for dma_alloc_coherent from the global dma_coherent_default_memory. This will allow to move arm-nommu (and eventually other platforms) to use generic code for allocating uncached memory from a pre-populated pool. Note that this is a different pool from the one that platforms that can remap at runtime use for GFP_ATOMIC allocations for now, although there might be opportunities to eventually end up with a common codebase for the two use cases. Signed-off-by: Christoph Hellwig Tested-by: Dillon Min --- kernel/dma/Kconfig | 4 ++++ kernel/dma/direct.c | 15 +++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 77b405508743..725cfd51762b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -93,6 +93,10 @@ config DMA_COHERENT_POOL select GENERIC_ALLOCATOR bool +config DMA_GLOBAL_POOL + select DMA_DECLARE_COHERENT + bool + config DMA_REMAP bool depends on MMU diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f33ceb68aef2..8dca4f97d12d 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size, if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && !dev_is_dma_coherent(dev)) return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !dev_is_dma_coherent(dev)) + return dma_alloc_from_global_coherent(dev, size, dma_handle); + /* * Remapping or decrypting memory may block. If either is required and * we can't block, allocate the memory from the atomic pools. @@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size, if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && !dev_is_dma_coherent(dev)) { arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); return; } + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !dev_is_dma_coherent(dev)) { + if (!dma_release_from_global_coherent(page_order, cpu_addr)) + WARN_ON_ONCE(1); + return; + } + /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) @@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) + return ret; if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) return -ENXIO; -- cgit v1.2.3 From 70d6aa0ecfed253a2b14659a6c77359af6d9b3ee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Jun 2021 19:38:19 +0200 Subject: dma-mapping: allow using the global coherent pool for !ARM Switch an ifdef so that the global coherent pool is initialized for any architecture that selects the DMA_GLOBAL_POOL symbol insted of hardcoding ARM. Signed-off-by: Christoph Hellwig Tested-by: Dillon Min --- kernel/dma/coherent.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 794e76b03b34..67b126afac5a 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -361,7 +361,9 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) pr_err("Reserved memory: regions without no-map are not yet supported\n"); return -EINVAL; } +#endif +#ifdef CONFIG_DMA_GLOBAL_POOL if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { WARN(dma_reserved_default_memory, "Reserved memory: region for default DMA coherent area is redefined\n"); -- cgit v1.2.3 From a6933571f34a9aee843fff2aa4b96949c57d6274 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 23 Jun 2021 16:00:04 +0200 Subject: dma-mapping: simplify dma_init_coherent_memory Return the allocated dma_coherent_mem structure, set the use_dma_pfn_offset and print the failure warning inside of dma_init_coherent_memory instead of leaving that to the callers. Signed-off-by: Christoph Hellwig Tested-by: Dillon Min --- kernel/dma/coherent.c | 78 ++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 67b126afac5a..ab397ebfd5ad 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -37,51 +37,44 @@ static inline dma_addr_t dma_get_device_base(struct device *dev, return mem->device_base; } -static int dma_init_coherent_memory(phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, - struct dma_coherent_mem **mem) +static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) { - struct dma_coherent_mem *dma_mem = NULL; - void *mem_base = NULL; + struct dma_coherent_mem *dma_mem; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); - int ret; + void *mem_base; - if (!size) { - ret = -EINVAL; - goto out; - } + if (!size) + return ERR_PTR(-EINVAL); mem_base = memremap(phys_addr, size, MEMREMAP_WC); - if (!mem_base) { - ret = -EINVAL; - goto out; - } + if (!mem_base) + return ERR_PTR(-EINVAL); + dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); - if (!dma_mem) { - ret = -ENOMEM; - goto out; - } + if (!dma_mem) + goto out_unmap_membase; dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!dma_mem->bitmap) { - ret = -ENOMEM; - goto out; - } + if (!dma_mem->bitmap) + goto out_free_dma_mem; dma_mem->virt_base = mem_base; dma_mem->device_base = device_addr; dma_mem->pfn_base = PFN_DOWN(phys_addr); dma_mem->size = pages; + dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset; spin_lock_init(&dma_mem->spinlock); - *mem = dma_mem; - return 0; + return dma_mem; -out: +out_free_dma_mem: kfree(dma_mem); - if (mem_base) - memunmap(mem_base); - return ret; +out_unmap_membase: + memunmap(mem_base); + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n", + &phys_addr, size / SZ_1M); + return ERR_PTR(-ENOMEM); } static void dma_release_coherent_memory(struct dma_coherent_mem *mem) @@ -130,9 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, struct dma_coherent_mem *mem; int ret; - ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); - if (ret) - return ret; + mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); + if (IS_ERR(mem)) + return PTR_ERR(mem); ret = dma_assign_coherent_memory(dev, mem); if (ret) @@ -319,21 +312,16 @@ static struct reserved_mem *dma_reserved_default_memory __initdata; static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { - struct dma_coherent_mem *mem = rmem->priv; - int ret; - - if (!mem) { - ret = dma_init_coherent_memory(rmem->base, rmem->base, - rmem->size, &mem); - if (ret) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return ret; - } + if (!rmem->priv) { + struct dma_coherent_mem *mem; + + mem = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, true); + if (IS_ERR(mem)) + return PTR_ERR(mem); + rmem->priv = mem; } - mem->use_dev_dma_pfn_offset = true; - rmem->priv = mem; - dma_assign_coherent_memory(dev, mem); + dma_assign_coherent_memory(dev, rmem->priv); return 0; } -- cgit v1.2.3 From 39a2d3506b2d53c569a6db13d65b2f3728c4feec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 23 Jun 2021 16:05:05 +0200 Subject: dma-mapping: add a dma_init_global_coherent helper Add a new helper to initialize the global coherent pool. This both cleans up the existing initialization which indirects through the reserved_mem_ops that are normally only used for struct device, and also allows using the global pool for non-devicetree architectures. Signed-off-by: Christoph Hellwig Tested-by: Dillon Min --- include/linux/dma-map-ops.h | 2 +- kernel/dma/coherent.c | 32 ++++++++++++++------------------ 2 files changed, 15 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 2f842498c448..068f1b11a6a4 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -177,7 +177,7 @@ void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, int dma_release_from_global_coherent(int order, void *vaddr); int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); - +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index ab397ebfd5ad..160d4e246ecb 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -300,6 +300,18 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, vaddr, size, ret); } +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) +{ + struct dma_coherent_mem *mem; + + mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true); + if (IS_ERR(mem)) + return PTR_ERR(mem); + dma_coherent_default_memory = mem; + pr_info("DMA: default coherent area is set\n"); + return 0; +} + /* * Support for reserved memory regions defined in device tree */ @@ -367,26 +379,10 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) static int __init dma_init_reserved_memory(void) { - const struct reserved_mem_ops *ops; - int ret; - if (!dma_reserved_default_memory) return -ENOMEM; - - ops = dma_reserved_default_memory->ops; - - /* - * We rely on rmem_dma_device_init() does not propagate error of - * dma_assign_coherent_memory() for "NULL" device. - */ - ret = ops->device_init(dma_reserved_default_memory, NULL); - - if (!ret) { - dma_coherent_default_memory = dma_reserved_default_memory->priv; - pr_info("DMA: default coherent area is set\n"); - } - - return ret; + return dma_init_global_coherent(dma_reserved_default_memory->base, + dma_reserved_default_memory->size); } core_initcall(dma_init_reserved_memory); -- cgit v1.2.3 From 8b0e6c744fef6462382041b30878c91f15069fc6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 16 Aug 2021 23:42:56 -0400 Subject: tracing: Add DYNAMIC flag for dynamic events To differentiate between static and dynamic events, add a new flag DYNAMIC to the event flags that all dynamic events have set. This will allow to differentiate when attaching to a dynamic event from a static event. Static events have a mod pointer that references the module they were created in (or NULL for core kernel). This can be incremented when the event has something attached to it. But there exists no such mechanism for dynamic events. This is dangerous as the dynamic events may now disappear without the "attachment" knowing that it no longer exists. To enforce the dynamic flag, change dyn_event_add() to pass the event that is being created such that it can set the DYNAMIC flag of the event. This helps make sure that no location that creates a dynamic event misses setting this flag. Link: https://lore.kernel.org/linux-trace-devel/20210813004448.51c7de69ce432d338f4d226b@kernel.org/ Link: https://lkml.kernel.org/r/20210817035026.936958254@goodmis.org Suggested-by: Masami Hiramatsu Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 3 +++ kernel/trace/trace_dynevent.h | 4 +++- kernel/trace/trace_events_synth.c | 2 +- kernel/trace/trace_kprobe.c | 4 ++-- kernel/trace/trace_uprobe.c | 4 ++-- 5 files changed, 11 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ad413b382a3c..53c9dffd87fd 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -310,6 +310,7 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, + TRACE_EVENT_FL_DYNAMIC_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, }; @@ -321,6 +322,7 @@ enum { * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file * TRACEPOINT - Event is a tracepoint + * DYNAMIC - Event is a dynamic event (created at run time) * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe */ @@ -330,6 +332,7 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), + TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), }; diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h index 7754936b57ee..936477a111d3 100644 --- a/kernel/trace/trace_dynevent.h +++ b/kernel/trace/trace_dynevent.h @@ -76,13 +76,15 @@ int dyn_event_init(struct dyn_event *ev, struct dyn_event_operations *ops) return 0; } -static inline int dyn_event_add(struct dyn_event *ev) +static inline int dyn_event_add(struct dyn_event *ev, + struct trace_event_call *call) { lockdep_assert_held(&event_mutex); if (!ev || !ev->ops) return -EINVAL; + call->flags |= TRACE_EVENT_FL_DYNAMIC; list_add_tail(&ev->list, &dyn_event_list); return 0; } diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 9315fc03e303..f4f5489e1e28 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -1298,7 +1298,7 @@ static int __create_synth_event(const char *name, const char *raw_fields) } ret = register_synth_event(event); if (!ret) - dyn_event_add(&event->devent); + dyn_event_add(&event->devent, &event->call); else free_synth_event(event); out: diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ea6178cb5e33..bfef43bfce37 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -618,7 +618,7 @@ static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to) if (ret) trace_probe_unlink(&tk->tp); else - dyn_event_add(&tk->devent); + dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); return ret; } @@ -661,7 +661,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk) if (ret < 0) unregister_kprobe_event(tk); else - dyn_event_add(&tk->devent); + dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); end: mutex_unlock(&event_mutex); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 9b50869a5ddb..50eca53b8d22 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -455,7 +455,7 @@ static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to) /* Append to existing event */ ret = trace_probe_append(&tu->tp, &to->tp); if (!ret) - dyn_event_add(&tu->devent); + dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); return ret; } @@ -518,7 +518,7 @@ static int register_trace_uprobe(struct trace_uprobe *tu) goto end; } - dyn_event_add(&tu->devent); + dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); end: mutex_unlock(&event_mutex); -- cgit v1.2.3 From 1d18538e6a09265003a0a94ca779d7a6127cb76c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 16 Aug 2021 23:42:57 -0400 Subject: tracing: Have dynamic events have a ref counter As dynamic events are not created by modules, if something is attached to one, calling "try_module_get()" on its "mod" field, is not going to keep the dynamic event from going away. Since dynamic events do not need the "mod" pointer of the event structure, make a union out of it in order to save memory (there's one structure for each of the thousand+ events in the kernel), and have any event with the DYNAMIC flag set to use a ref counter instead. Link: https://lore.kernel.org/linux-trace-devel/20210813004448.51c7de69ce432d338f4d226b@kernel.org/ Link: https://lkml.kernel.org/r/20210817035027.174869074@goodmis.org Suggested-by: Masami Hiramatsu Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 45 ++++++++++++++++++++++++++++++++++++- kernel/trace/trace.c | 4 ++-- kernel/trace/trace_dynevent.c | 38 +++++++++++++++++++++++++++++++ kernel/trace/trace_event_perf.c | 6 ++--- kernel/trace/trace_events.c | 22 +++++++++++------- kernel/trace/trace_events_synth.c | 19 ++++++++++------ kernel/trace/trace_events_trigger.c | 6 ++--- kernel/trace/trace_kprobe.c | 4 ++++ kernel/trace/trace_uprobe.c | 4 ++++ 9 files changed, 124 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 53c9dffd87fd..9564c4d9a3b6 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -350,7 +350,14 @@ struct trace_event_call { struct trace_event event; char *print_fmt; struct event_filter *filter; - void *mod; + /* + * Static events can disappear with modules, + * where as dynamic ones need their own ref count. + */ + union { + void *module; + atomic_t refcnt; + }; void *data; /* See the TRACE_EVENT_FL_* flags above */ @@ -366,6 +373,42 @@ struct trace_event_call { #endif }; +#ifdef CONFIG_DYNAMIC_EVENTS +bool trace_event_dyn_try_get_ref(struct trace_event_call *call); +void trace_event_dyn_put_ref(struct trace_event_call *call); +bool trace_event_dyn_busy(struct trace_event_call *call); +#else +static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call) +{ + /* Without DYNAMIC_EVENTS configured, nothing should be calling this */ + return false; +} +static inline void trace_event_dyn_put_ref(struct trace_event_call *call) +{ +} +static inline bool trace_event_dyn_busy(struct trace_event_call *call) +{ + /* Nothing should call this without DYNAIMIC_EVENTS configured. */ + return true; +} +#endif + +static inline bool trace_event_try_get_ref(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + return trace_event_dyn_try_get_ref(call); + else + return try_module_get(call->module); +} + +static inline void trace_event_put_ref(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + trace_event_dyn_put_ref(call); + else + module_put(call->module); +} + #ifdef CONFIG_PERF_EVENTS static inline bool bpf_prog_array_valid(struct trace_event_call *call) { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index be0169594de5..8425c3d70895 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3697,11 +3697,11 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str) return false; event = container_of(trace_event, struct trace_event_call, event); - if (!event->mod) + if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) return false; /* Would rather have rodata, but this will suffice */ - if (within_module_core(addr, event->mod)) + if (within_module_core(addr, event->module)) return true; return false; diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index e57cc0870892..1110112e55bd 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -13,11 +13,49 @@ #include #include "trace.h" +#include "trace_output.h" /* for trace_event_sem */ #include "trace_dynevent.h" static DEFINE_MUTEX(dyn_event_ops_mutex); static LIST_HEAD(dyn_event_ops_list); +bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call) +{ + struct trace_event_call *call; + bool ret = false; + + if (WARN_ON_ONCE(!(dyn_call->flags & TRACE_EVENT_FL_DYNAMIC))) + return false; + + down_read(&trace_event_sem); + list_for_each_entry(call, &ftrace_events, list) { + if (call == dyn_call) { + atomic_inc(&dyn_call->refcnt); + ret = true; + } + } + up_read(&trace_event_sem); + return ret; +} + +void trace_event_dyn_put_ref(struct trace_event_call *call) +{ + if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC))) + return; + + if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) { + atomic_set(&call->refcnt, 0); + return; + } + + atomic_dec(&call->refcnt); +} + +bool trace_event_dyn_busy(struct trace_event_call *call) +{ + return atomic_read(&call->refcnt) != 0; +} + int dyn_event_register(struct dyn_event_operations *ops) { if (!ops || !ops->create || !ops->show || !ops->is_busy || diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 03be4435d103..6aed10e2f7ce 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -177,7 +177,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event) } } out: - module_put(tp_event->mod); + trace_event_put_ref(tp_event); } static int perf_trace_event_open(struct perf_event *p_event) @@ -224,10 +224,10 @@ int perf_trace_init(struct perf_event *p_event) list_for_each_entry(tp_event, &ftrace_events, list) { if (tp_event->event.type == event_id && tp_event->class && tp_event->class->reg && - try_module_get(tp_event->mod)) { + trace_event_try_get_ref(tp_event)) { ret = perf_trace_event_init(tp_event, p_event); if (ret) - module_put(tp_event->mod); + trace_event_put_ref(tp_event); break; } } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 80e96989770e..1349b6de5eeb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2525,7 +2525,10 @@ __register_event(struct trace_event_call *call, struct module *mod) return ret; list_add(&call->list, &ftrace_events); - call->mod = mod; + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + atomic_set(&call->refcnt, 0); + else + call->module = mod; return 0; } @@ -2839,7 +2842,9 @@ static void trace_module_remove_events(struct module *mod) down_write(&trace_event_sem); list_for_each_entry_safe(call, p, &ftrace_events, list) { - if (call->mod == mod) + if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) + continue; + if (call->module == mod) __trace_remove_event_call(call); } up_write(&trace_event_sem); @@ -2982,7 +2987,7 @@ struct trace_event_file *trace_get_event_file(const char *instance, } /* Don't let event modules unload while in use */ - ret = try_module_get(file->event_call->mod); + ret = trace_event_try_get_ref(file->event_call); if (!ret) { trace_array_put(tr); ret = -EBUSY; @@ -3012,7 +3017,7 @@ EXPORT_SYMBOL_GPL(trace_get_event_file); void trace_put_event_file(struct trace_event_file *file) { mutex_lock(&event_mutex); - module_put(file->event_call->mod); + trace_event_put_ref(file->event_call); mutex_unlock(&event_mutex); trace_array_put(file->tr); @@ -3147,7 +3152,7 @@ static int free_probe_data(void *data) if (!edata->ref) { /* Remove the SOFT_MODE flag */ __ftrace_event_enable_disable(edata->file, 0, 1); - module_put(edata->file->event_call->mod); + trace_event_put_ref(edata->file->event_call); kfree(edata); } return 0; @@ -3280,7 +3285,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, out_reg: /* Don't let event modules unload while probe registered */ - ret = try_module_get(file->event_call->mod); + ret = trace_event_try_get_ref(file->event_call); if (!ret) { ret = -EBUSY; goto out_free; @@ -3310,7 +3315,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, out_disable: __ftrace_event_enable_disable(file, 0, 1); out_put: - module_put(file->event_call->mod); + trace_event_put_ref(file->event_call); out_free: kfree(data); goto out; @@ -3376,7 +3381,8 @@ void __trace_early_add_events(struct trace_array *tr) list_for_each_entry(call, &ftrace_events, list) { /* Early boot up should not have any modules loaded */ - if (WARN_ON_ONCE(call->mod)) + if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && + WARN_ON_ONCE(call->module)) continue; ret = __trace_early_add_new_event(call, tr); diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index f4f5489e1e28..d54094b7a9d7 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -1369,13 +1369,15 @@ static int destroy_synth_event(struct synth_event *se) int ret; if (se->ref) - ret = -EBUSY; - else { - ret = unregister_synth_event(se); - if (!ret) { - dyn_event_remove(&se->devent); - free_synth_event(se); - } + return -EBUSY; + + if (trace_event_dyn_busy(&se->call)) + return -EBUSY; + + ret = unregister_synth_event(se); + if (!ret) { + dyn_event_remove(&se->devent); + free_synth_event(se); } return ret; @@ -2102,6 +2104,9 @@ static int synth_event_release(struct dyn_event *ev) if (event->ref) return -EBUSY; + if (trace_event_dyn_busy(&event->call)) + return -EBUSY; + ret = unregister_synth_event(event); if (ret) return ret; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index cf84d0f6583a..6b11e335a62e 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -1334,7 +1334,7 @@ void event_enable_trigger_free(struct event_trigger_ops *ops, if (!data->ref) { /* Remove the SOFT_MODE flag */ trace_event_enable_disable(enable_data->file, 0, 1); - module_put(enable_data->file->event_call->mod); + trace_event_put_ref(enable_data->file->event_call); trigger_data_free(data); kfree(enable_data); } @@ -1481,7 +1481,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, out_reg: /* Don't let event modules unload while probe registered */ - ret = try_module_get(event_enable_file->event_call->mod); + ret = trace_event_try_get_ref(event_enable_file->event_call); if (!ret) { ret = -EBUSY; goto out_free; @@ -1510,7 +1510,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, out_disable: trace_event_enable_disable(event_enable_file, 0, 1); out_put: - module_put(event_enable_file->event_call->mod); + trace_event_put_ref(event_enable_file->event_call); out_free: if (cmd_ops->set_filter) cmd_ops->set_filter(NULL, trigger_data, NULL); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bfef43bfce37..82c3b86013b2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -543,6 +543,10 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk) if (trace_probe_is_enabled(&tk->tp)) return -EBUSY; + /* If there's a reference to the dynamic event */ + if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp))) + return -EBUSY; + /* Will fail if probe is being used by ftrace or perf */ if (unregister_kprobe_event(tk)) return -EBUSY; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 50eca53b8d22..1e2a92e7607d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -393,6 +393,10 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) if (trace_probe_has_sibling(&tu->tp)) goto unreg; + /* If there's a reference to the dynamic event */ + if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp))) + return -EBUSY; + ret = unregister_uprobe_event(tu); if (ret) return ret; -- cgit v1.2.3 From fcd9db51df8e219e3a61b14e9b8c5ee67d39d37c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 16 Aug 2021 23:42:58 -0400 Subject: tracing/probe: Have traceprobe_parse_probe_arg() take a const arg The two places that call traceprobe_parse_probe_arg() allocate a temporary buffer to copy the argv[i] into, because argv[i] is constant and the traceprobe_parse_probe_arg() will modify it to do the parsing. These two places allocate this buffer and then free it right after calling this function, leaving the onus of this allocation to the caller. As there's about to be a third user of this function that will have to do the same thing, instead of having the caller allocate the temporary buffer, simply move that allocation into the traceprobe_parse_probe_arg() itself, which will simplify the code of the callers. Link: https://lkml.kernel.org/r/20210817035027.385422828@goodmis.org Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 9 +-------- kernel/trace/trace_probe.c | 47 +++++++++++++++++++++++++++------------------ kernel/trace/trace_probe.h | 2 +- kernel/trace/trace_uprobe.c | 9 +-------- 4 files changed, 31 insertions(+), 36 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 82c3b86013b2..ed1e3c2087ab 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -873,15 +873,8 @@ static int __trace_kprobe_create(int argc, const char *argv[]) /* parse arguments */ for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { - tmp = kstrdup(argv[i], GFP_KERNEL); - if (!tmp) { - ret = -ENOMEM; - goto error; - } - trace_probe_log_set_index(i + 2); - ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags); - kfree(tmp); + ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags); if (ret) goto error; /* This can be -ENOMEM */ } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 15413ad7cef2..ef717b373443 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -540,26 +540,34 @@ static int __parse_bitfield_probe_arg(const char *bf, } /* String length checking wrapper */ -static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, +static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, struct probe_arg *parg, unsigned int flags, int offset) { struct fetch_insn *code, *scode, *tmp = NULL; char *t, *t2, *t3; + char *arg; int ret, len; + arg = kstrdup(argv, GFP_KERNEL); + if (!arg) + return -ENOMEM; + + ret = -EINVAL; len = strlen(arg); if (len > MAX_ARGSTR_LEN) { trace_probe_log_err(offset, ARG_TOO_LONG); - return -EINVAL; + goto out; } else if (len == 0) { trace_probe_log_err(offset, NO_ARG_BODY); - return -EINVAL; + goto out; } + ret = -ENOMEM; parg->comm = kstrdup(arg, GFP_KERNEL); if (!parg->comm) - return -ENOMEM; + goto out; + ret = -EINVAL; t = strchr(arg, ':'); if (t) { *t = '\0'; @@ -571,22 +579,22 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, offset += t2 + strlen(t2) - arg; trace_probe_log_err(offset, ARRAY_NO_CLOSE); - return -EINVAL; + goto out; } else if (t3[1] != '\0') { trace_probe_log_err(offset + t3 + 1 - arg, BAD_ARRAY_SUFFIX); - return -EINVAL; + goto out; } *t3 = '\0'; if (kstrtouint(t2, 0, &parg->count) || !parg->count) { trace_probe_log_err(offset + t2 - arg, BAD_ARRAY_NUM); - return -EINVAL; + goto out; } if (parg->count > MAX_ARRAY_LEN) { trace_probe_log_err(offset + t2 - arg, ARRAY_TOO_BIG); - return -EINVAL; + goto out; } } } @@ -598,29 +606,30 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) { /* The type of $comm must be "string", and not an array. */ if (parg->count || (t && strcmp(t, "string"))) - return -EINVAL; + goto out; parg->type = find_fetch_type("string"); } else parg->type = find_fetch_type(t); if (!parg->type) { trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_TYPE); - return -EINVAL; + goto out; } parg->offset = *size; *size += parg->type->size * (parg->count ?: 1); + ret = -ENOMEM; if (parg->count) { len = strlen(parg->type->fmttype) + 6; parg->fmt = kmalloc(len, GFP_KERNEL); if (!parg->fmt) - return -ENOMEM; + goto out; snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype, parg->count); } code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL); if (!code) - return -ENOMEM; + goto out; code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1], @@ -628,6 +637,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, if (ret) goto fail; + ret = -EINVAL; /* Store operation */ if (!strcmp(parg->type->name, "string") || !strcmp(parg->type->name, "ustring")) { @@ -636,7 +646,6 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, code->op != FETCH_OP_DATA) { trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_STRING); - ret = -EINVAL; goto fail; } if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || @@ -650,7 +659,6 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, code++; if (code->op != FETCH_OP_NOP) { trace_probe_log_err(offset, TOO_MANY_OPS); - ret = -EINVAL; goto fail; } } @@ -672,7 +680,6 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, code++; if (code->op != FETCH_OP_NOP) { trace_probe_log_err(offset, TOO_MANY_OPS); - ret = -EINVAL; goto fail; } code->op = FETCH_OP_ST_RAW; @@ -687,6 +694,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, goto fail; } } + ret = -EINVAL; /* Loop(Array) operation */ if (parg->count) { if (scode->op != FETCH_OP_ST_MEM && @@ -694,13 +702,11 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, scode->op != FETCH_OP_ST_USTRING) { trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_STRING); - ret = -EINVAL; goto fail; } code++; if (code->op != FETCH_OP_NOP) { trace_probe_log_err(offset, TOO_MANY_OPS); - ret = -EINVAL; goto fail; } code->op = FETCH_OP_LP_ARRAY; @@ -709,6 +715,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, code++; code->op = FETCH_OP_END; + ret = 0; /* Shrink down the code buffer */ parg->code = kcalloc(code - tmp + 1, sizeof(*code), GFP_KERNEL); if (!parg->code) @@ -724,6 +731,8 @@ fail: kfree(code->data); } kfree(tmp); +out: + kfree(arg); return ret; } @@ -745,11 +754,11 @@ static int traceprobe_conflict_field_name(const char *name, return 0; } -int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg, +int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, const char *arg, unsigned int flags) { struct probe_arg *parg = &tp->args[i]; - char *body; + const char *body; /* Increment count for freeing args in error case */ tp->nr_args++; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 227d518e5ba5..42aa084902fa 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -354,7 +354,7 @@ int trace_probe_create(const char *raw_command, int (*createfn)(int, const char #define TPARG_FL_MASK GENMASK(2, 0) extern int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, - char *arg, unsigned int flags); + const char *argv, unsigned int flags); extern int traceprobe_update_arg(struct probe_arg *arg); extern void traceprobe_free_probe_arg(struct probe_arg *arg); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 1e2a92e7607d..93ff96541971 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -684,16 +684,9 @@ static int __trace_uprobe_create(int argc, const char **argv) /* parse arguments */ for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { - tmp = kstrdup(argv[i], GFP_KERNEL); - if (!tmp) { - ret = -ENOMEM; - goto error; - } - trace_probe_log_set_index(i + 2); - ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp, + ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], is_return ? TPARG_FL_RETURN : 0); - kfree(tmp); if (ret) goto error; } -- cgit v1.2.3 From bc1b973455fd5d84dac4a094da44202f2d8a98ef Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 16 Aug 2021 23:42:59 -0400 Subject: tracing/probes: Allow for dot delimiter as well as slash for system names Kprobe and uprobe events can add a "system" to the events that are created via the kprobe_events and uprobe_events files respectively. If they do not include a "system" in the name, then the default "kprobes" or "uprobes" is used. The current notation to specify a system for one of these probe events is to add a '/' delimiter in the name, where the content before the '/' will be the system to use, and the content after will be the event name. echo 'p:my_system/my_event' > kprobe_events But this is inconsistent with the way histogram triggers separate their system / event names. The histogram triggers use a '.' delimiter, which can be confusing. To allow this to be more consistent, as well as keep backward compatibility, allow the kprobe and uprobe events to denote a system name with either a '/' or a '.'. That is: echo 'p:my_system/my_event' > kprobe_events is equivalent to: echo 'p:my_system.my_event' > kprobe_events Link: https://lore.kernel.org/linux-trace-devel/20210813004448.51c7de69ce432d338f4d226b@kernel.org/ Link: https://lkml.kernel.org/r/20210817035027.580493202@goodmis.org Suggested-by: Masami Hiramatsu Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_probe.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index ef717b373443..0916a9964719 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -233,6 +233,9 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup, int len; slash = strchr(event, '/'); + if (!slash) + slash = strchr(event, '.'); + if (slash) { if (slash == event) { trace_probe_log_err(offset, NO_GROUP_NAME); -- cgit v1.2.3 From 845cbf3e11acc263ec7a46a89097d88e7e50a9ae Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 16 Aug 2021 23:43:00 -0400 Subject: tracing/probes: Use struct_size() instead of defining custom macros Remove SIZEOF_TRACE_KPROBE() and SIZEOF_TRACE_UPROBE() and use struct_size() as that's what it is made for. No need to have custom macros. Especially since struct_size() has some extra memory checks for correctness. Link: https://lkml.kernel.org/r/20210817035027.795000217@goodmis.org Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 6 +----- kernel/trace/trace_uprobe.c | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ed1e3c2087ab..ca726c9d0859 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -80,10 +80,6 @@ static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev) for_each_dyn_event(dpos) \ if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos))) -#define SIZEOF_TRACE_KPROBE(n) \ - (offsetof(struct trace_kprobe, tp.args) + \ - (sizeof(struct probe_arg) * (n))) - static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) { return tk->rp.handler != NULL; @@ -265,7 +261,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, struct trace_kprobe *tk; int ret = -ENOMEM; - tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); + tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL); if (!tk) return ERR_PTR(ret); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 93ff96541971..590bb9a02f8d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -83,10 +83,6 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) for_each_dyn_event(dpos) \ if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos))) -#define SIZEOF_TRACE_UPROBE(n) \ - (offsetof(struct trace_uprobe, tp.args) + \ - (sizeof(struct probe_arg) * (n))) - static int register_uprobe_event(struct trace_uprobe *tu); static int unregister_uprobe_event(struct trace_uprobe *tu); @@ -340,7 +336,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) struct trace_uprobe *tu; int ret; - tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); + tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); if (!tu) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From 39f75da7bcc829ddc4d40bb60d0e95520de7898b Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 2 Aug 2021 23:40:31 +0300 Subject: isystem: trim/fixup stdarg.h and other headers Delete/fixup few includes in anticipation of global -isystem compile option removal. Note: crypto/aegis128-neon-inner.c keeps due to redefinition of uintptr_t error (one definition comes from , another from ). Signed-off-by: Alexey Dobriyan Signed-off-by: Masahiro Yamada --- arch/arm/kernel/process.c | 2 -- arch/arm/mach-bcm/bcm_kona_smc.c | 2 -- arch/arm64/kernel/process.c | 3 --- arch/openrisc/kernel/process.c | 2 -- arch/parisc/kernel/process.c | 3 --- arch/powerpc/kernel/prom.c | 1 - arch/sparc/kernel/process_32.c | 3 --- arch/sparc/kernel/process_64.c | 3 --- arch/um/drivers/rtc_user.c | 1 + arch/um/drivers/vector_user.c | 1 + arch/um/include/shared/irq_user.h | 1 - arch/um/include/shared/os.h | 1 - arch/um/os-Linux/signal.c | 2 +- arch/um/os-Linux/util.c | 1 + drivers/block/xen-blkback/xenbus.c | 1 - drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 1 - drivers/gpu/drm/msm/disp/msm_disp_snapshot.h | 1 - drivers/macintosh/macio-adb.c | 1 - drivers/macintosh/via-macii.c | 2 -- drivers/net/wireless/intersil/orinoco/hermes.c | 1 - drivers/net/wwan/iosm/iosm_ipc_imem.h | 1 - drivers/pinctrl/aspeed/pinmux-aspeed.h | 1 - drivers/scsi/elx/efct/efct_driver.h | 1 - drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h | 2 -- drivers/xen/xen-scsiback.c | 2 -- include/linux/filter.h | 2 -- include/linux/mISDNif.h | 1 - kernel/debug/kdb/kdb_support.c | 1 - sound/aoa/codecs/onyx.h | 1 - sound/aoa/codecs/tas.c | 1 - sound/core/info.c | 1 - 31 files changed, 4 insertions(+), 43 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index fc9e8b37eaa8..bb5ad8a6a4c3 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -5,8 +5,6 @@ * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Original Copyright (C) 1995 Linus Torvalds */ -#include - #include #include #include diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c index 43a16f922b53..43829e49ad93 100644 --- a/arch/arm/mach-bcm/bcm_kona_smc.c +++ b/arch/arm/mach-bcm/bcm_kona_smc.c @@ -10,8 +10,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - -#include #include #include #include diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c8989b999250..5f7ac9a0f9a3 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -6,9 +6,6 @@ * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Copyright (C) 2012 ARM Ltd. */ - -#include - #include #include #include diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index eb62429681fc..b0698d9ce14f 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -14,8 +14,6 @@ */ #define __KERNEL_SYSCALLS__ -#include - #include #include #include diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 184ec3c1eae4..38ec4ae81239 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -17,9 +17,6 @@ * Copyright (C) 2001-2014 Helge Deller * Copyright (C) 2002 Randolph Chung */ - -#include - #include #include #include diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index f620e04dc9bf..a1e7ba0fad09 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -11,7 +11,6 @@ #undef DEBUG -#include #include #include #include diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 93983d6d431d..bbbe0cfef746 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -8,9 +8,6 @@ /* * This file handles the architecture-dependent parts of process handling.. */ - -#include - #include #include #include diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index d33c58a58d4f..0cabcdfb23fd 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -9,9 +9,6 @@ /* * This file handles the architecture-dependent parts of process handling.. */ - -#include - #include #include #include diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c index 4016bc1d577e..7c3cec4c68cf 100644 --- a/arch/um/drivers/rtc_user.c +++ b/arch/um/drivers/rtc_user.c @@ -3,6 +3,7 @@ * Copyright (C) 2020 Intel Corporation * Author: Johannes Berg */ +#include #include #include #include diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c index bae53220ce26..e4ffeb9a1fa4 100644 --- a/arch/um/drivers/vector_user.c +++ b/arch/um/drivers/vector_user.c @@ -3,6 +3,7 @@ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) */ +#include #include #include #include diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h index 065829f443ae..86a8a573b65c 100644 --- a/arch/um/include/shared/irq_user.h +++ b/arch/um/include/shared/irq_user.h @@ -7,7 +7,6 @@ #define __IRQ_USER_H__ #include -#include enum um_irq_type { IRQ_READ, diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 60b84edc8a68..96d400387c93 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -8,7 +8,6 @@ #ifndef __OS_H__ #define __OS_H__ -#include #include #include #include diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 6de99bb16113..6cf098c23a39 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -67,7 +67,7 @@ int signals_enabled; #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT static int signals_blocked; #else -#define signals_blocked false +#define signals_blocked 0 #endif static unsigned int signals_pending; static unsigned int signals_active = 0; diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c index 07327425d06e..41297ec404bf 100644 --- a/arch/um/os-Linux/util.c +++ b/arch/um/os-Linux/util.c @@ -3,6 +3,7 @@ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) */ +#include #include #include #include diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 125b22205d38..33eba3df4dd9 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -8,7 +8,6 @@ #define pr_fmt(fmt) "xen-blkback: " fmt -#include #include #include #include diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7c4734f905d9..68fd451aca23 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -39,7 +39,6 @@ #include #include #include -#include #include "atomfirmware.h" diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h index c92a9508c8d3..0f9a5364cd86 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h @@ -25,7 +25,6 @@ #include #include #include -#include #include "msm_kms.h" #define MSM_DISP_SNAPSHOT_MAX_BLKS 10 diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index d4759db002c6..dc634c2932fd 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c @@ -2,7 +2,6 @@ /* * Driver for the ADB controller in the Mac I/O (Hydra) chip. */ -#include #include #include #include diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 060e03f2264b..db9270da5b8e 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -23,8 +23,6 @@ * Apple's "ADB Analyzer" bus sniffer is invaluable: * ftp://ftp.apple.com/developer/Tool_Chest/Devices_-_Hardware/Apple_Desktop_Bus/ */ - -#include #include #include #include diff --git a/drivers/net/wireless/intersil/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c index 6d4b7f64efcf..256946552742 100644 --- a/drivers/net/wireless/intersil/orinoco/hermes.c +++ b/drivers/net/wireless/intersil/orinoco/hermes.c @@ -79,7 +79,6 @@ #undef HERMES_DEBUG #ifdef HERMES_DEBUG -#include #define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff) diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index 0d2f10e4cbc8..dc65b0712261 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -7,7 +7,6 @@ #define IOSM_IPC_IMEM_H #include -#include #include "iosm_ipc_mmio.h" #include "iosm_ipc_pcie.h" diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index b69ba6b360a2..4d7548686f39 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -5,7 +5,6 @@ #define ASPEED_PINMUX_H #include -#include /* * The ASPEED SoCs provide typically more than 200 pins for GPIO and other diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h index dab8eac4f243..0e3c931db7c2 100644 --- a/drivers/scsi/elx/efct/efct_driver.h +++ b/drivers/scsi/elx/efct/efct_driver.h @@ -10,7 +10,6 @@ /*************************************************************************** * OS specific includes */ -#include #include #include #include diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h index eceeb5d160ad..4dbec4063b3d 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h @@ -16,8 +16,6 @@ #ifndef __ISP_LOCAL_H_INCLUDED__ #define __ISP_LOCAL_H_INCLUDED__ -#include - #include "isp_global.h" #include diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 61ce0d142eea..0c5e565aa8cf 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -33,8 +33,6 @@ #define pr_fmt(fmt) "xen-pvscsi: " fmt -#include - #include #include #include diff --git a/include/linux/filter.h b/include/linux/filter.h index 83b896044e79..c1711c9f9439 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -5,8 +5,6 @@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ -#include - #include #include #include diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index a7330eb3ec64..7dd1f01ec4f9 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -18,7 +18,6 @@ #ifndef mISDNIF_H #define mISDNIF_H -#include #include #include #include diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 9f50d22d68e6..4f9950678e7b 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -10,7 +10,6 @@ * 03/02/13 added new 2.5 kallsyms */ -#include #include #include #include diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h index 8a32c3c3d716..6c31b7373b78 100644 --- a/sound/aoa/codecs/onyx.h +++ b/sound/aoa/codecs/onyx.h @@ -6,7 +6,6 @@ */ #ifndef __SND_AOA_CODEC_ONYX_H #define __SND_AOA_CODEC_ONYX_H -#include #include #include #include diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c index ac246dd3ab49..ab19a37e2a68 100644 --- a/sound/aoa/codecs/tas.c +++ b/sound/aoa/codecs/tas.c @@ -58,7 +58,6 @@ * and up to the hardware designer to not wire * them up in some weird unusable way. */ -#include #include #include #include diff --git a/sound/core/info.c b/sound/core/info.c index 9fec3070f8ba..a451b24199c3 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -16,7 +16,6 @@ #include #include #include -#include int snd_info_check_reserved_words(const char *str) { -- cgit v1.2.3 From 22f9feb49950885cdb6e37513f134d154175e743 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Jun 2021 19:37:00 +0200 Subject: dma-mapping: make the global coherent pool conditional Only build the code to support the global coherent pool if support for it is enabled. Signed-off-by: Christoph Hellwig Tested-by: Dillon Min --- include/linux/dma-map-ops.h | 18 +++++++++-------- kernel/dma/coherent.c | 49 +++++++++++++++++++++++++-------------------- 2 files changed, 37 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 068f1b11a6a4..0d5b06b3a4a6 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -171,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); - -void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle); -int dma_release_from_global_coherent(int order, void *vaddr); -int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, - size_t size, int *ret); -int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) @@ -187,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev, #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#ifdef CONFIG_DMA_GLOBAL_POOL +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle); +int dma_release_from_global_coherent(int order, void *vaddr); +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, + size_t size, int *ret); +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); +#else static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) { @@ -202,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, { return 0; } -#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#endif /* CONFIG_DMA_GLOBAL_POOL */ /* * This is the actual return value from the ->alloc_noncontiguous method. diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 160d4e246ecb..25fc85a7aebe 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -20,8 +20,6 @@ struct dma_coherent_mem { bool use_dev_dma_pfn_offset; }; -static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; - static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) { if (dev && dev->dma_mem) @@ -191,16 +189,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, return 1; } -void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle) -{ - if (!dma_coherent_default_memory) - return NULL; - - return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, - dma_handle); -} - static int __dma_release_from_coherent(struct dma_coherent_mem *mem, int order, void *vaddr) { @@ -236,15 +224,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) return __dma_release_from_coherent(mem, order, vaddr); } -int dma_release_from_global_coherent(int order, void *vaddr) -{ - if (!dma_coherent_default_memory) - return 0; - - return __dma_release_from_coherent(dma_coherent_default_memory, order, - vaddr); -} - static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { @@ -290,6 +269,28 @@ int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); } +#ifdef CONFIG_DMA_GLOBAL_POOL +static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; + +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle) +{ + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, + dma_handle); +} + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { @@ -311,6 +312,7 @@ int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) pr_info("DMA: default coherent area is set\n"); return 0; } +#endif /* CONFIG_DMA_GLOBAL_POOL */ /* * Support for reserved memory regions defined in device tree @@ -320,7 +322,9 @@ int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) #include #include +#ifdef CONFIG_DMA_GLOBAL_POOL static struct reserved_mem *dma_reserved_default_memory __initdata; +#endif static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { @@ -377,6 +381,7 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) return 0; } +#ifdef CONFIG_DMA_GLOBAL_POOL static int __init dma_init_reserved_memory(void) { if (!dma_reserved_default_memory) @@ -384,8 +389,8 @@ static int __init dma_init_reserved_memory(void) return dma_init_global_coherent(dma_reserved_default_memory->base, dma_reserved_default_memory->size); } - core_initcall(dma_init_reserved_memory); +#endif /* CONFIG_DMA_GLOBAL_POOL */ RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); #endif -- cgit v1.2.3 From 1daf08a066cfe500587affd3fa3be8c13b8ff007 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 3 Aug 2021 16:16:09 +0200 Subject: livepatch: Replace deprecated CPU-hotplug functions. The functions get_online_cpus() and put_online_cpus() have been deprecated during the CPU hotplug rework. They map directly to cpus_read_lock() and cpus_read_unlock(). Replace deprecated CPU-hotplug functions with the official version. The behavior remains unchanged. Cc: Josh Poimboeuf Cc: Jiri Kosina Cc: Miroslav Benes Cc: Petr Mladek Cc: Joe Lawrence Cc: live-patching@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Petr Mladek Signed-off-by: Jiri Kosina --- kernel/livepatch/transition.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 3a4beb9395c4..291b857a6e20 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -411,7 +411,7 @@ void klp_try_complete_transition(void) /* * Ditto for the idle "swapper" tasks. */ - get_online_cpus(); + cpus_read_lock(); for_each_possible_cpu(cpu) { task = idle_task(cpu); if (cpu_online(cpu)) { @@ -423,7 +423,7 @@ void klp_try_complete_transition(void) task->patch_state = klp_target_state; } } - put_online_cpus(); + cpus_read_unlock(); if (!complete) { if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) -- cgit v1.2.3 From 007517a01995fb24f2f4effc9cf34814361a9d10 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 19 Aug 2021 00:13:27 -0400 Subject: tracing/probe: Change traceprobe_set_print_fmt() to take a type Instead of a boolean "is_return" have traceprobe_set_print_fmt() take a type (currently just PROBE_PRINT_NORMAL and PROBE_PRINT_RETURN). This will simplify adding different types. For example, the development of the event_probe, will need its own type as it prints an event, and not an IP. Link: https://lkml.kernel.org/r/20210819041842.104626301@goodmis.org Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 9 +++++++-- kernel/trace/trace_probe.c | 18 ++++++++++++------ kernel/trace/trace_probe.h | 7 ++++++- kernel/trace/trace_uprobe.c | 8 ++++++-- 4 files changed, 31 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ca726c9d0859..c6fe7a6e3f35 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -742,6 +742,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) bool is_return = false; char *symbol = NULL, *tmp = NULL; const char *event = NULL, *group = KPROBE_EVENT_SYSTEM; + enum probe_print_type ptype; int maxactive = 0; long offset = 0; void *addr = NULL; @@ -875,7 +876,8 @@ static int __trace_kprobe_create(int argc, const char *argv[]) goto error; /* This can be -ENOMEM */ } - ret = traceprobe_set_print_fmt(&tk->tp, is_return); + ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; + ret = traceprobe_set_print_fmt(&tk->tp, ptype); if (ret < 0) goto error; @@ -1799,6 +1801,7 @@ struct trace_event_call * create_local_trace_kprobe(char *func, void *addr, unsigned long offs, bool is_return) { + enum probe_print_type ptype; struct trace_kprobe *tk; int ret; char *event; @@ -1822,7 +1825,9 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, init_trace_event_call(tk); - if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) { + ptype = trace_kprobe_is_return(tk) ? + PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; + if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) { ret = -ENOMEM; goto error; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 0916a9964719..9c9c83a063b2 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -851,19 +851,25 @@ int traceprobe_update_arg(struct probe_arg *arg) /* When len=0, we just calculate the needed length */ #define LEN_OR_ZERO (len ? len - pos : 0) static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, - bool is_return) + enum probe_print_type ptype) { struct probe_arg *parg; int i, j; int pos = 0; const char *fmt, *arg; - if (!is_return) { + switch (ptype) { + case PROBE_PRINT_NORMAL: fmt = "(%lx)"; arg = "REC->" FIELD_STRING_IP; - } else { + break; + case PROBE_PRINT_RETURN: fmt = "(%lx <- %lx)"; arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; + break; + default: + WARN_ON_ONCE(1); + return 0; } pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); @@ -912,20 +918,20 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, } #undef LEN_OR_ZERO -int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return) +int traceprobe_set_print_fmt(struct trace_probe *tp, enum probe_print_type ptype) { struct trace_event_call *call = trace_probe_event_call(tp); int len; char *print_fmt; /* First: called with 0 length to calculate the needed length */ - len = __set_print_fmt(tp, NULL, 0, is_return); + len = __set_print_fmt(tp, NULL, 0, ptype); print_fmt = kmalloc(len + 1, GFP_KERNEL); if (!print_fmt) return -ENOMEM; /* Second: actually write the @print_fmt */ - __set_print_fmt(tp, print_fmt, len + 1, is_return); + __set_print_fmt(tp, print_fmt, len + 1, ptype); call->print_fmt = print_fmt; return 0; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 42aa084902fa..8adf5f3542a6 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -363,7 +363,12 @@ extern int traceprobe_split_symbol_offset(char *symbol, long *offset); int traceprobe_parse_event_name(const char **pevent, const char **pgroup, char *buf, int offset); -extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return); +enum probe_print_type { + PROBE_PRINT_NORMAL, + PROBE_PRINT_RETURN, +}; + +extern int traceprobe_set_print_fmt(struct trace_probe *tp, enum probe_print_type ptype); #ifdef CONFIG_PERF_EVENTS extern struct trace_event_call * diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 590bb9a02f8d..09f8ca7f7ba0 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -536,6 +536,7 @@ static int __trace_uprobe_create(int argc, const char **argv) const char *event = NULL, *group = UPROBE_EVENT_SYSTEM; char *arg, *filename, *rctr, *rctr_end, *tmp; char buf[MAX_EVENT_NAME_LEN]; + enum probe_print_type ptype; struct path path; unsigned long offset, ref_ctr_offset; bool is_return = false; @@ -687,7 +688,8 @@ static int __trace_uprobe_create(int argc, const char **argv) goto error; } - ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)); + ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; + ret = traceprobe_set_print_fmt(&tu->tp, ptype); if (ret < 0) goto error; @@ -1578,6 +1580,7 @@ struct trace_event_call * create_local_trace_uprobe(char *name, unsigned long offs, unsigned long ref_ctr_offset, bool is_return) { + enum probe_print_type ptype; struct trace_uprobe *tu; struct path path; int ret; @@ -1612,7 +1615,8 @@ create_local_trace_uprobe(char *name, unsigned long offs, tu->filename = kstrdup(name, GFP_KERNEL); init_trace_event_call(tu); - if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) { + ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; + if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) { ret = -ENOMEM; goto error; } -- cgit v1.2.3 From 8565a45d0858078b63c7d84074a21a42ba9ebf01 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 19 Aug 2021 00:13:28 -0400 Subject: tracing/probes: Have process_fetch_insn() take a void * instead of pt_regs In preparation to allow event probes to use the process_fetch_insn() callback in trace_probe_tmpl.h, change the data passed to it from a pointer to pt_regs, as the event probe will not be using regs, and make it a void pointer instead. Update the process_fetch_insn() callers for kprobe and uprobe events to have the regs defined in the function and just typecast the void pointer parameter. Link: https://lkml.kernel.org/r/20210819041842.291622924@goodmis.org Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 3 ++- kernel/trace/trace_probe_tmpl.h | 6 +++--- kernel/trace/trace_uprobe.c | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c6fe7a6e3f35..4b013d24f5a9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1325,9 +1325,10 @@ probe_mem_read(void *dest, void *src, size_t size) /* Note that we don't verify it, since the code does not come from user space */ static int -process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, void *base) { + struct pt_regs *regs = rec; unsigned long val; retry: diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index f003c5d02a3a..b3bdb8ddb862 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -54,7 +54,7 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf) * If dest is NULL, don't store result and return required dynamic data size. */ static int -process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, void *base); static nokprobe_inline int fetch_store_strlen(unsigned long addr); static nokprobe_inline int @@ -188,7 +188,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs) /* Store the value of each argument */ static nokprobe_inline void -store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, +store_trace_args(void *data, struct trace_probe *tp, void *rec, int header_size, int maxlen) { struct probe_arg *arg; @@ -203,7 +203,7 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, /* Point the dynamic data area if needed */ if (unlikely(arg->dynamic)) *dl = make_data_loc(maxlen, dyndata - base); - ret = process_fetch_insn(arg->code, regs, dl, base); + ret = process_fetch_insn(arg->code, rec, dl, base); if (unlikely(ret < 0 && arg->dynamic)) { *dl = make_data_loc(0, dyndata - base); } else { diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 09f8ca7f7ba0..d219ba50efbd 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -213,9 +213,10 @@ static unsigned long translate_user_vaddr(unsigned long file_offset) /* Note that we don't verify it, since the code does not come from user space */ static int -process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, void *base) { + struct pt_regs *regs = rec; unsigned long val; /* 1st stage: get value from context */ -- cgit v1.2.3 From 8e242060c6a4947e8ae7d29794af6a581db08841 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 19 Aug 2021 19:26:02 +0900 Subject: tracing/probes: Reject events which have the same name of existing one Since kprobe_events and uprobe_events only check whether the other same-type probe event has the same name or not, if the user gives the same name of the existing tracepoint event (or the other type of probe events), it silently fails to create the tracefs entry (but registered.) as below. /sys/kernel/tracing # ls events/task/task_rename enable filter format hist id trigger /sys/kernel/tracing # echo p:task/task_rename vfs_read >> kprobe_events [ 113.048508] Could not create tracefs 'task_rename' directory /sys/kernel/tracing # cat kprobe_events p:task/task_rename vfs_read To fix this issue, check whether the existing events have the same name or not in trace_probe_register_event_call(). If exists, it rejects to register the new event. Link: https://lkml.kernel.org/r/162936876189.187130.17558311387542061930.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 6 +++++- kernel/trace/trace_probe.c | 25 +++++++++++++++++++++++++ kernel/trace/trace_probe.h | 1 + kernel/trace/trace_uprobe.c | 6 +++++- 4 files changed, 36 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 4b013d24f5a9..882c27044029 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -647,7 +647,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk) /* Register new event */ ret = register_kprobe_event(tk); if (ret) { - pr_warn("Failed to register probe event(%d)\n", ret); + if (ret == -EEXIST) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, EVENT_EXIST); + } else + pr_warn("Failed to register probe event(%d)\n", ret); goto end; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 9c9c83a063b2..782c00eb6859 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1047,11 +1047,36 @@ error: return ret; } +static struct trace_event_call * +find_trace_event_call(const char *system, const char *event_name) +{ + struct trace_event_call *tp_event; + const char *name; + + list_for_each_entry(tp_event, &ftrace_events, list) { + if (!tp_event->class->system || + strcmp(system, tp_event->class->system)) + continue; + name = trace_event_name(tp_event); + if (!name || strcmp(event_name, name)) + continue; + return tp_event; + } + + return NULL; +} + int trace_probe_register_event_call(struct trace_probe *tp) { struct trace_event_call *call = trace_probe_event_call(tp); int ret; + lockdep_assert_held(&event_mutex); + + if (find_trace_event_call(trace_probe_group_name(tp), + trace_probe_name(tp))) + return -EEXIST; + ret = register_trace_event(&call->event); if (!ret) return -ENODEV; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 8adf5f3542a6..66701a92d186 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -404,6 +404,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(NO_EVENT_NAME, "Event name is not specified"), \ C(EVENT_TOO_LONG, "Event name is too long"), \ C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \ + C(EVENT_EXIST, "Given group/event name is already used by another event"), \ C(RETVAL_ON_PROBE, "$retval is not available on probe"), \ C(BAD_STACK_NUM, "Invalid stack number"), \ C(BAD_ARG_NUM, "Invalid argument number"), \ diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d219ba50efbd..225ce569bf8f 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -515,7 +515,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu) ret = register_uprobe_event(tu); if (ret) { - pr_warn("Failed to register probe event(%d)\n", ret); + if (ret == -EEXIST) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, EVENT_EXIST); + } else + pr_warn("Failed to register probe event(%d)\n", ret); goto end; } -- cgit v1.2.3 From 7491e2c442781a1860181adb5ab472a52075f393 Mon Sep 17 00:00:00 2001 From: "Tzvetomir Stoyanov (VMware)" Date: Thu, 19 Aug 2021 11:26:06 -0400 Subject: tracing: Add a probe that attaches to trace events A new dynamic event is introduced: event probe. The event is attached to an existing tracepoint and uses its fields as arguments. The user can specify custom format string of the new event, select what tracepoint arguments will be printed and how to print them. An event probe is created by writing configuration string in 'dynamic_events' ftrace file: e[:[SNAME/]ENAME] SYSTEM/EVENT [FETCHARGS] - Set an event probe -:SNAME/ENAME - Delete an event probe Where: SNAME - System name, if omitted 'eprobes' is used. ENAME - Name of the new event in SNAME, if omitted the SYSTEM_EVENT is used. SYSTEM - Name of the system, where the tracepoint is defined, mandatory. EVENT - Name of the tracepoint event in SYSTEM, mandatory. FETCHARGS - Arguments: =$[:TYPE] - Fetch given filed of the tracepoint and print it as given TYPE with given name. Supported types are: (u8/u16/u32/u64/s8/s16/s32/s64), basic type (x8/x16/x32/x64), hexadecimal types "string", "ustring" and bitfield. Example, attach an event probe on openat system call and print name of the file that will be opened: echo "e:esys/eopen syscalls/sys_enter_openat file=\$filename:string" >> dynamic_events A new dynamic event is created in events/esys/eopen/ directory. It can be deleted with: echo "-:esys/eopen" >> dynamic_events Filters, triggers and histograms can be attached to the new event, it can be matched in synthetic events. There is one limitation - an event probe can not be attached to kprobe, uprobe or another event probe. Link: https://lkml.kernel.org/r/20210812145805.2292326-1-tz.stoyanov@gmail.com Link: https://lkml.kernel.org/r/20210819152825.142428383@goodmis.org Acked-by: Masami Hiramatsu Co-developed-by: Steven Rostedt (VMware) Signed-off-by: Tzvetomir Stoyanov (VMware) Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 4 + kernel/trace/Makefile | 1 + kernel/trace/trace.c | 5 +- kernel/trace/trace.h | 18 + kernel/trace/trace_eprobe.c | 903 ++++++++++++++++++++++++++++++++++++ kernel/trace/trace_events_trigger.c | 14 +- kernel/trace/trace_kprobe.c | 8 - kernel/trace/trace_probe.c | 16 +- kernel/trace/trace_probe.h | 6 +- 9 files changed, 962 insertions(+), 13 deletions(-) create mode 100644 kernel/trace/trace_eprobe.c (limited to 'kernel') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 9564c4d9a3b6..0a0144580bbd 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -313,6 +313,7 @@ enum { TRACE_EVENT_FL_DYNAMIC_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, + TRACE_EVENT_FL_EPROBE_BIT, }; /* @@ -325,6 +326,7 @@ enum { * DYNAMIC - Event is a dynamic event (created at run time) * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe + * EPROBE - Event is an event probe */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), @@ -335,6 +337,7 @@ enum { TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), + TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -680,6 +683,7 @@ enum event_trigger_type { ETT_EVENT_ENABLE = (1 << 3), ETT_EVENT_HIST = (1 << 4), ETT_HIST_ENABLE = (1 << 5), + ETT_EVENT_EPROBE = (1 << 6), }; extern int filter_match_preds(struct event_filter *filter, void *rec); diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index b1c47ccf4f73..6de5d4d63165 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o +obj-$(CONFIG_PROBE_EVENTS) += trace_eprobe.o obj-$(CONFIG_TRACE_EVENT_INJECT) += trace_events_inject.o obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8425c3d70895..489924cde4f8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5543,6 +5543,7 @@ static const char readme_msg[] = #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/] []\n" #endif + "\t e[:[/]] . []\n" "\t -:[/]\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [:][+]|\n" @@ -5552,7 +5553,7 @@ static const char readme_msg[] = " place (uprobe): :[%return][(ref_ctr_offset)]\n" #endif "\t args: =fetcharg[:type]\n" - "\t fetcharg: %, @
, @[+|-],\n" + "\t fetcharg: (%|$), @
, @[+|-],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack, $stack, $retval, $comm, $arg,\n" #else @@ -5567,6 +5568,8 @@ static const char readme_msg[] = "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif + "\t efield: For event probes ('e' types), the field is on of the fields\n" + "\t of the /.\n" #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4a0e693000c6..b7c0f8e160fb 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -126,6 +126,11 @@ struct kprobe_trace_entry_head { unsigned long ip; }; +struct eprobe_trace_entry_head { + struct trace_entry ent; + unsigned int type; +}; + struct kretprobe_trace_entry_head { struct trace_entry ent; unsigned long func; @@ -1508,9 +1513,14 @@ static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } extern int register_trigger_cmds(void); extern void clear_event_triggers(struct trace_array *tr); +enum { + EVENT_TRIGGER_FL_PROBE = BIT(0), +}; + struct event_trigger_data { unsigned long count; int ref; + int flags; struct event_trigger_ops *ops; struct event_command *cmd_ops; struct event_filter __rcu *filter; @@ -1918,6 +1928,14 @@ static inline bool is_good_name(const char *name) return true; } +/* Convert certain expected symbols into '_' when generating event names */ +static inline void sanitize_event_name(char *name) +{ + while (*name++ != '\0') + if (*name == ':' || *name == '.') + *name = '_'; +} + /* * This is a generic way to read and write a u64 value from a file in tracefs. * diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c new file mode 100644 index 000000000000..56a96e9750cf --- /dev/null +++ b/kernel/trace/trace_eprobe.c @@ -0,0 +1,903 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * event probes + * + * Part of this code was copied from kernel/trace/trace_kprobe.c written by + * Masami Hiramatsu + * + * Copyright (C) 2021, VMware Inc, Steven Rostedt + * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com> + * + */ +#include +#include +#include + +#include "trace_dynevent.h" +#include "trace_probe.h" +#include "trace_probe_tmpl.h" + +#define EPROBE_EVENT_SYSTEM "eprobes" + +struct trace_eprobe { + /* tracepoint system */ + const char *event_system; + + /* tracepoint event */ + const char *event_name; + + struct trace_event_call *event; + + struct dyn_event devent; + struct trace_probe tp; +}; + +struct eprobe_data { + struct trace_event_file *file; + struct trace_eprobe *ep; +}; + +static int __trace_eprobe_create(int argc, const char *argv[]); + +static void trace_event_probe_cleanup(struct trace_eprobe *ep) +{ + if (!ep) + return; + trace_probe_cleanup(&ep->tp); + kfree(ep->event_name); + kfree(ep->event_system); + if (ep->event) + trace_event_put_ref(ep->event); + kfree(ep); +} + +static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev) +{ + return container_of(ev, struct trace_eprobe, devent); +} + +static int eprobe_dyn_event_create(const char *raw_command) +{ + return trace_probe_create(raw_command, __trace_eprobe_create); +} + +static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev) +{ + struct trace_eprobe *ep = to_trace_eprobe(ev); + int i; + + seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp), + trace_probe_name(&ep->tp)); + seq_printf(m, " %s.%s", ep->event_system, ep->event_name); + + for (i = 0; i < ep->tp.nr_args; i++) + seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm); + seq_putc(m, '\n'); + + return 0; +} + +static int unregister_trace_eprobe(struct trace_eprobe *ep) +{ + /* If other probes are on the event, just unregister eprobe */ + if (trace_probe_has_sibling(&ep->tp)) + goto unreg; + + /* Enabled event can not be unregistered */ + if (trace_probe_is_enabled(&ep->tp)) + return -EBUSY; + + /* Will fail if probe is being used by ftrace or perf */ + if (trace_probe_unregister_event_call(&ep->tp)) + return -EBUSY; + +unreg: + dyn_event_remove(&ep->devent); + trace_probe_unlink(&ep->tp); + + return 0; +} + +static int eprobe_dyn_event_release(struct dyn_event *ev) +{ + struct trace_eprobe *ep = to_trace_eprobe(ev); + int ret = unregister_trace_eprobe(ep); + + if (!ret) + trace_event_probe_cleanup(ep); + return ret; +} + +static bool eprobe_dyn_event_is_busy(struct dyn_event *ev) +{ + struct trace_eprobe *ep = to_trace_eprobe(ev); + + return trace_probe_is_enabled(&ep->tp); +} + +static bool eprobe_dyn_event_match(const char *system, const char *event, + int argc, const char **argv, struct dyn_event *ev) +{ + struct trace_eprobe *ep = to_trace_eprobe(ev); + + return strcmp(trace_probe_name(&ep->tp), event) == 0 && + (!system || strcmp(trace_probe_group_name(&ep->tp), system) == 0) && + trace_probe_match_command_args(&ep->tp, argc, argv); +} + +static struct dyn_event_operations eprobe_dyn_event_ops = { + .create = eprobe_dyn_event_create, + .show = eprobe_dyn_event_show, + .is_busy = eprobe_dyn_event_is_busy, + .free = eprobe_dyn_event_release, + .match = eprobe_dyn_event_match, +}; + +static struct trace_eprobe *alloc_event_probe(const char *group, + const char *this_event, + struct trace_event_call *event, + int nargs) +{ + struct trace_eprobe *ep; + const char *event_name; + const char *sys_name; + int ret = -ENOMEM; + + if (!event) + return ERR_PTR(-ENODEV); + + sys_name = event->class->system; + event_name = trace_event_name(event); + + ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL); + if (!ep) { + trace_event_put_ref(ep->event); + goto error; + } + ep->event = event; + ep->event_name = kstrdup(event_name, GFP_KERNEL); + if (!ep->event_name) + goto error; + ep->event_system = kstrdup(sys_name, GFP_KERNEL); + if (!ep->event_system) + goto error; + + ret = trace_probe_init(&ep->tp, this_event, group, false); + if (ret < 0) + goto error; + + dyn_event_init(&ep->devent, &eprobe_dyn_event_ops); + return ep; +error: + trace_event_probe_cleanup(ep); + return ERR_PTR(ret); +} + +static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i) +{ + struct probe_arg *parg = &ep->tp.args[i]; + struct ftrace_event_field *field; + struct list_head *head; + + head = trace_get_fields(ep->event); + list_for_each_entry(field, head, link) { + if (!strcmp(parg->code->data, field->name)) { + kfree(parg->code->data); + parg->code->data = field; + return 0; + } + } + kfree(parg->code->data); + parg->code->data = NULL; + return -ENOENT; +} + +static int eprobe_event_define_fields(struct trace_event_call *event_call) +{ + int ret; + struct eprobe_trace_entry_head field; + struct trace_probe *tp; + + tp = trace_probe_primary_from_call(event_call); + if (WARN_ON_ONCE(!tp)) + return -ENOENT; + + DEFINE_FIELD(unsigned int, type, FIELD_STRING_TYPE, 0); + + return traceprobe_define_arg_fields(event_call, sizeof(field), tp); +} + +static struct trace_event_fields eprobe_fields_array[] = { + { .type = TRACE_FUNCTION_TYPE, + .define_fields = eprobe_event_define_fields }, + {} +}; + +/* Event entry printers */ +static enum print_line_t +print_eprobe_event(struct trace_iterator *iter, int flags, + struct trace_event *event) +{ + struct eprobe_trace_entry_head *field; + struct trace_event_call *pevent; + struct trace_event *probed_event; + struct trace_seq *s = &iter->seq; + struct trace_probe *tp; + + field = (struct eprobe_trace_entry_head *)iter->ent; + tp = trace_probe_primary_from_call( + container_of(event, struct trace_event_call, event)); + if (WARN_ON_ONCE(!tp)) + goto out; + + trace_seq_printf(s, "%s: (", trace_probe_name(tp)); + + probed_event = ftrace_find_event(field->type); + if (probed_event) { + pevent = container_of(probed_event, struct trace_event_call, event); + trace_seq_printf(s, "%s.%s", pevent->class->system, + trace_event_name(pevent)); + } else { + trace_seq_printf(s, "%u", field->type); + } + + trace_seq_putc(s, ')'); + + if (print_probe_args(s, tp->args, tp->nr_args, + (u8 *)&field[1], field) < 0) + goto out; + + trace_seq_putc(s, '\n'); + out: + return trace_handle_return(s); +} + +static unsigned long get_event_field(struct fetch_insn *code, void *rec) +{ + struct ftrace_event_field *field = code->data; + unsigned long val; + void *addr; + + addr = rec + field->offset; + + switch (field->size) { + case 1: + if (field->is_signed) + val = *(char *)addr; + else + val = *(unsigned char *)addr; + break; + case 2: + if (field->is_signed) + val = *(short *)addr; + else + val = *(unsigned short *)addr; + break; + case 4: + if (field->is_signed) + val = *(int *)addr; + else + val = *(unsigned int *)addr; + break; + default: + if (field->is_signed) + val = *(long *)addr; + else + val = *(unsigned long *)addr; + break; + } + return val; +} + +static int get_eprobe_size(struct trace_probe *tp, void *rec) +{ + struct probe_arg *arg; + int i, len, ret = 0; + + for (i = 0; i < tp->nr_args; i++) { + arg = tp->args + i; + if (unlikely(arg->dynamic)) { + unsigned long val; + + val = get_event_field(arg->code, rec); + len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL); + if (len > 0) + ret += len; + } + } + + return ret; +} + +/* Kprobe specific fetch functions */ + +/* Note that we don't verify it, since the code does not come from user space */ +static int +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, + void *base) +{ + unsigned long val; + + val = get_event_field(code, rec); + return process_fetch_insn_bottom(code + 1, val, dest, base); +} +NOKPROBE_SYMBOL(process_fetch_insn) + +/* Return the length of string -- including null terminal byte */ +static nokprobe_inline int +fetch_store_strlen_user(unsigned long addr) +{ + const void __user *uaddr = (__force const void __user *)addr; + + return strnlen_user_nofault(uaddr, MAX_STRING_SIZE); +} + +/* Return the length of string -- including null terminal byte */ +static nokprobe_inline int +fetch_store_strlen(unsigned long addr) +{ + int ret, len = 0; + u8 c; + +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + if (addr < TASK_SIZE) + return fetch_store_strlen_user(addr); +#endif + + do { + ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); + len++; + } while (c && ret == 0 && len < MAX_STRING_SIZE); + + return (ret < 0) ? ret : len; +} + +/* + * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf + * with max length and relative data location. + */ +static nokprobe_inline int +fetch_store_string_user(unsigned long addr, void *dest, void *base) +{ + const void __user *uaddr = (__force const void __user *)addr; + int maxlen = get_loc_len(*(u32 *)dest); + void *__dest; + long ret; + + if (unlikely(!maxlen)) + return -ENOMEM; + + __dest = get_loc_data(dest, base); + + ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); + if (ret >= 0) + *(u32 *)dest = make_data_loc(ret, __dest - base); + + return ret; +} + +/* + * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max + * length and relative data location. + */ +static nokprobe_inline int +fetch_store_string(unsigned long addr, void *dest, void *base) +{ + int maxlen = get_loc_len(*(u32 *)dest); + void *__dest; + long ret; + +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + if ((unsigned long)addr < TASK_SIZE) + return fetch_store_string_user(addr, dest, base); +#endif + + if (unlikely(!maxlen)) + return -ENOMEM; + + __dest = get_loc_data(dest, base); + + /* + * Try to get string again, since the string can be changed while + * probing. + */ + ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); + if (ret >= 0) + *(u32 *)dest = make_data_loc(ret, __dest - base); + + return ret; +} + +static nokprobe_inline int +probe_mem_read_user(void *dest, void *src, size_t size) +{ + const void __user *uaddr = (__force const void __user *)src; + + return copy_from_user_nofault(dest, uaddr, size); +} + +static nokprobe_inline int +probe_mem_read(void *dest, void *src, size_t size) +{ +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + if ((unsigned long)src < TASK_SIZE) + return probe_mem_read_user(dest, src, size); +#endif + return copy_from_kernel_nofault(dest, src, size); +} + +/* eprobe handler */ +static inline void +__eprobe_trace_func(struct eprobe_data *edata, void *rec) +{ + struct eprobe_trace_entry_head *entry; + struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp); + struct trace_event_buffer fbuffer; + int dsize; + + if (WARN_ON_ONCE(call != edata->file->event_call)) + return; + + if (trace_trigger_soft_disabled(edata->file)) + return; + + fbuffer.trace_ctx = tracing_gen_ctx(); + fbuffer.trace_file = edata->file; + + dsize = get_eprobe_size(&edata->ep->tp, rec); + fbuffer.regs = NULL; + + fbuffer.event = + trace_event_buffer_lock_reserve(&fbuffer.buffer, edata->file, + call->event.type, + sizeof(*entry) + edata->ep->tp.size + dsize, + fbuffer.trace_ctx); + if (!fbuffer.event) + return; + + entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); + if (edata->ep->event) + entry->type = edata->ep->event->event.type; + else + entry->type = 0; + store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize); + + trace_event_buffer_commit(&fbuffer); +} + +/* + * The event probe implementation uses event triggers to get access to + * the event it is attached to, but is not an actual trigger. The below + * functions are just stubs to fulfill what is needed to use the trigger + * infrastructure. + */ +static int eprobe_trigger_init(struct event_trigger_ops *ops, + struct event_trigger_data *data) +{ + return 0; +} + +static void eprobe_trigger_free(struct event_trigger_ops *ops, + struct event_trigger_data *data) +{ + +} + +static int eprobe_trigger_print(struct seq_file *m, + struct event_trigger_ops *ops, + struct event_trigger_data *data) +{ + /* Do not print eprobe event triggers */ + return 0; +} + +static void eprobe_trigger_func(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, + struct ring_buffer_event *rbe) +{ + struct eprobe_data *edata = data->private_data; + + __eprobe_trace_func(edata, rec); +} + +static struct event_trigger_ops eprobe_trigger_ops = { + .func = eprobe_trigger_func, + .print = eprobe_trigger_print, + .init = eprobe_trigger_init, + .free = eprobe_trigger_free, +}; + +static int eprobe_trigger_cmd_func(struct event_command *cmd_ops, + struct trace_event_file *file, + char *glob, char *cmd, char *param) +{ + return -1; +} + +static int eprobe_trigger_reg_func(char *glob, struct event_trigger_ops *ops, + struct event_trigger_data *data, + struct trace_event_file *file) +{ + return -1; +} + +static void eprobe_trigger_unreg_func(char *glob, struct event_trigger_ops *ops, + struct event_trigger_data *data, + struct trace_event_file *file) +{ + +} + +static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd, + char *param) +{ + return &eprobe_trigger_ops; +} + +static struct event_command event_trigger_cmd = { + .name = "eprobe", + .trigger_type = ETT_EVENT_EPROBE, + .flags = EVENT_CMD_FL_NEEDS_REC, + .func = eprobe_trigger_cmd_func, + .reg = eprobe_trigger_reg_func, + .unreg = eprobe_trigger_unreg_func, + .unreg_all = NULL, + .get_trigger_ops = eprobe_trigger_get_ops, + .set_filter = NULL, +}; + +static struct event_trigger_data * +new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file) +{ + struct event_trigger_data *trigger; + struct eprobe_data *edata; + + edata = kzalloc(sizeof(*edata), GFP_KERNEL); + trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + if (!trigger || !edata) { + kfree(edata); + kfree(trigger); + return ERR_PTR(-ENOMEM); + } + + trigger->flags = EVENT_TRIGGER_FL_PROBE; + trigger->count = -1; + trigger->ops = &eprobe_trigger_ops; + + /* + * EVENT PROBE triggers are not registered as commands with + * register_event_command(), as they are not controlled by the user + * from the trigger file + */ + trigger->cmd_ops = &event_trigger_cmd; + + INIT_LIST_HEAD(&trigger->list); + RCU_INIT_POINTER(trigger->filter, NULL); + + edata->file = file; + edata->ep = ep; + trigger->private_data = edata; + + return trigger; +} + +static int enable_eprobe(struct trace_eprobe *ep, + struct trace_event_file *eprobe_file) +{ + struct event_trigger_data *trigger; + struct trace_event_file *file; + struct trace_array *tr = eprobe_file->tr; + + file = find_event_file(tr, ep->event_system, ep->event_name); + if (!file) + return -ENOENT; + trigger = new_eprobe_trigger(ep, eprobe_file); + if (IS_ERR(trigger)) + return PTR_ERR(trigger); + + list_add_tail_rcu(&trigger->list, &file->triggers); + + trace_event_trigger_enable_disable(file, 1); + update_cond_flag(file); + + return 0; +} + +static struct trace_event_functions eprobe_funcs = { + .trace = print_eprobe_event +}; + +static int disable_eprobe(struct trace_eprobe *ep, + struct trace_array *tr) +{ + struct event_trigger_data *trigger; + struct trace_event_file *file; + struct eprobe_data *edata; + + file = find_event_file(tr, ep->event_system, ep->event_name); + if (!file) + return -ENOENT; + + list_for_each_entry(trigger, &file->triggers, list) { + if (!(trigger->flags & EVENT_TRIGGER_FL_PROBE)) + continue; + edata = trigger->private_data; + if (edata->ep == ep) + break; + } + if (list_entry_is_head(trigger, &file->triggers, list)) + return -ENODEV; + + list_del_rcu(&trigger->list); + + trace_event_trigger_enable_disable(file, 0); + update_cond_flag(file); + return 0; +} + +static int enable_trace_eprobe(struct trace_event_call *call, + struct trace_event_file *file) +{ + struct trace_probe *pos, *tp; + struct trace_eprobe *ep; + bool enabled; + int ret = 0; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; + enabled = trace_probe_is_enabled(tp); + + /* This also changes "enabled" state */ + if (file) { + ret = trace_probe_add_file(tp, file); + if (ret) + return ret; + } else + trace_probe_set_flag(tp, TP_FLAG_PROFILE); + + if (enabled) + return 0; + + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + ep = container_of(pos, struct trace_eprobe, tp); + ret = enable_eprobe(ep, file); + if (ret) + break; + enabled = true; + } + + if (ret) { + /* Failed to enable one of them. Roll back all */ + if (enabled) + disable_eprobe(ep, file->tr); + if (file) + trace_probe_remove_file(tp, file); + else + trace_probe_clear_flag(tp, TP_FLAG_PROFILE); + } + + return ret; +} + +static int disable_trace_eprobe(struct trace_event_call *call, + struct trace_event_file *file) +{ + struct trace_probe *pos, *tp; + struct trace_eprobe *ep; + + tp = trace_probe_primary_from_call(call); + if (WARN_ON_ONCE(!tp)) + return -ENODEV; + + if (file) { + if (!trace_probe_get_file_link(tp, file)) + return -ENOENT; + if (!trace_probe_has_single_file(tp)) + goto out; + trace_probe_clear_flag(tp, TP_FLAG_TRACE); + } else + trace_probe_clear_flag(tp, TP_FLAG_PROFILE); + + if (!trace_probe_is_enabled(tp)) { + list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + ep = container_of(pos, struct trace_eprobe, tp); + disable_eprobe(ep, file->tr); + } + } + + out: + if (file) + /* + * Synchronization is done in below function. For perf event, + * file == NULL and perf_trace_event_unreg() calls + * tracepoint_synchronize_unregister() to ensure synchronize + * event. We don't need to care about it. + */ + trace_probe_remove_file(tp, file); + + return 0; +} + +static int eprobe_register(struct trace_event_call *event, + enum trace_reg type, void *data) +{ + struct trace_event_file *file = data; + + switch (type) { + case TRACE_REG_REGISTER: + return enable_trace_eprobe(event, file); + case TRACE_REG_UNREGISTER: + return disable_trace_eprobe(event, file); +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: + case TRACE_REG_PERF_UNREGISTER: + case TRACE_REG_PERF_OPEN: + case TRACE_REG_PERF_CLOSE: + case TRACE_REG_PERF_ADD: + case TRACE_REG_PERF_DEL: + return 0; +#endif + } + return 0; +} + +static inline void init_trace_eprobe_call(struct trace_eprobe *ep) +{ + struct trace_event_call *call = trace_probe_event_call(&ep->tp); + + call->flags = TRACE_EVENT_FL_EPROBE; + call->event.funcs = &eprobe_funcs; + call->class->fields_array = eprobe_fields_array; + call->class->reg = eprobe_register; +} + +static struct trace_event_call * +find_and_get_event(const char *system, const char *event_name) +{ + struct trace_event_call *tp_event; + const char *name; + + list_for_each_entry(tp_event, &ftrace_events, list) { + /* Skip other probes and ftrace events */ + if (tp_event->flags & + (TRACE_EVENT_FL_IGNORE_ENABLE | + TRACE_EVENT_FL_KPROBE | + TRACE_EVENT_FL_UPROBE | + TRACE_EVENT_FL_EPROBE)) + continue; + if (!tp_event->class->system || + strcmp(system, tp_event->class->system)) + continue; + name = trace_event_name(tp_event); + if (!name || strcmp(event_name, name)) + continue; + if (!trace_event_try_get_ref(tp_event)) { + return NULL; + break; + } + return tp_event; + break; + } + return NULL; +} + +static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i) +{ + unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_TPOINT; + int ret; + + ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], flags); + if (ret) + return ret; + + if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG) + ret = trace_eprobe_tp_arg_update(ep, i); + + return ret; +} + +static int __trace_eprobe_create(int argc, const char *argv[]) +{ + /* + * Argument syntax: + * e[:[GRP/]ENAME] SYSTEM.EVENT [FETCHARGS] + * Fetch args: + * =$[:TYPE] + */ + const char *event = NULL, *group = EPROBE_EVENT_SYSTEM; + const char *sys_event = NULL, *sys_name = NULL; + struct trace_event_call *event_call; + struct trace_eprobe *ep = NULL; + char buf1[MAX_EVENT_NAME_LEN]; + char buf2[MAX_EVENT_NAME_LEN]; + int ret = 0; + int i; + + if (argc < 2 || argv[0][0] != 'e') + return -ECANCELED; + + trace_probe_log_init("event_probe", argc, argv); + + event = strchr(&argv[0][1], ':'); + if (event) { + event++; + ret = traceprobe_parse_event_name(&event, &group, buf1, + event - argv[0]); + if (ret) + goto parse_error; + } else { + strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN); + sanitize_event_name(buf1); + event = buf1; + } + if (!is_good_name(event) || !is_good_name(group)) + goto parse_error; + + sys_event = argv[1]; + ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, + sys_event - argv[1]); + if (ret || !sys_name) + goto parse_error; + if (!is_good_name(sys_event) || !is_good_name(sys_name)) + goto parse_error; + + mutex_lock(&event_mutex); + event_call = find_and_get_event(sys_name, sys_event); + ep = alloc_event_probe(group, event, event_call, argc - 2); + mutex_unlock(&event_mutex); + + if (IS_ERR(ep)) { + ret = PTR_ERR(ep); + /* This must return -ENOMEM, else there is a bug */ + WARN_ON_ONCE(ret != -ENOMEM); + goto error; /* We know ep is not allocated */ + } + + argc -= 2; argv += 2; + /* parse arguments */ + for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { + trace_probe_log_set_index(i + 2); + ret = trace_eprobe_tp_update_arg(ep, argv, i); + if (ret) + goto error; + } + ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT); + if (ret < 0) + goto error; + init_trace_eprobe_call(ep); + mutex_lock(&event_mutex); + ret = trace_probe_register_event_call(&ep->tp); + if (ret) { + if (ret == -EEXIST) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, EVENT_EXIST); + } + mutex_unlock(&event_mutex); + goto error; + } + ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + mutex_unlock(&event_mutex); + return ret; +parse_error: + ret = -EINVAL; +error: + trace_event_probe_cleanup(ep); + return ret; +} + +/* + * Register dynevent at core_initcall. This allows kernel to setup eprobe + * events in postcore_initcall without tracefs. + */ +static __init int trace_events_eprobe_init_early(void) +{ + int err = 0; + + err = dyn_event_register(&eprobe_dyn_event_ops); + if (err) + pr_warn("Could not register eprobe_dyn_event_ops\n"); + + return err; +} +core_initcall(trace_events_eprobe_init_early); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 6b11e335a62e..3d5c07239a2a 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -124,6 +124,18 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) return seq_list_next(t, &event_file->triggers, pos); } +static bool check_user_trigger(struct trace_event_file *file) +{ + struct event_trigger_data *data; + + list_for_each_entry_rcu(data, &file->triggers, list) { + if (data->flags & EVENT_TRIGGER_FL_PROBE) + continue; + return true; + } + return false; +} + static void *trigger_start(struct seq_file *m, loff_t *pos) { struct trace_event_file *event_file; @@ -134,7 +146,7 @@ static void *trigger_start(struct seq_file *m, loff_t *pos) if (unlikely(!event_file)) return ERR_PTR(-ENODEV); - if (list_empty(&event_file->triggers)) + if (list_empty(&event_file->triggers) || !check_user_trigger(event_file)) return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; return seq_list_start(&event_file->triggers, *pos); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 882c27044029..3a64ba4bbad6 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -707,14 +707,6 @@ static struct notifier_block trace_kprobe_module_nb = { .priority = 1 /* Invoked after kprobe module callback */ }; -/* Convert certain expected symbols into '_' when generating event names */ -static inline void sanitize_event_name(char *name) -{ - while (*name++ != '\0') - if (*name == ':' || *name == '.') - *name = '_'; -} - static int __trace_kprobe_create(int argc, const char *argv[]) { /* diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 782c00eb6859..3ed2a3f37297 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -319,6 +319,13 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, code->op = FETCH_OP_ARG; code->param = (unsigned int)param - 1; #endif + } else if (flags & TPARG_FL_TPOINT) { + if (code->data) + return -EFAULT; + code->data = kstrdup(arg, GFP_KERNEL); + if (!code->data) + return -ENOMEM; + code->op = FETCH_OP_TP_ARG; } else goto inval_var; @@ -646,13 +653,14 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, !strcmp(parg->type->name, "ustring")) { if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF && code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM && - code->op != FETCH_OP_DATA) { + code->op != FETCH_OP_DATA && code->op != FETCH_OP_TP_ARG) { trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_STRING); goto fail; } if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || - code->op == FETCH_OP_DATA) || parg->count) { + code->op == FETCH_OP_DATA) || code->op == FETCH_OP_TP_ARG || + parg->count) { /* * IMM, DATA and COMM is pointing actual address, those * must be kept, and if parg->count != 0, this is an @@ -867,6 +875,10 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, fmt = "(%lx <- %lx)"; arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; break; + case PROBE_PRINT_EVENT: + fmt = "(%u)"; + arg = "REC->" FIELD_STRING_TYPE; + break; default: WARN_ON_ONCE(1); return 0; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 66701a92d186..99e7a5df025e 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -38,6 +38,7 @@ #define FIELD_STRING_IP "__probe_ip" #define FIELD_STRING_RETIP "__probe_ret_ip" #define FIELD_STRING_FUNC "__probe_func" +#define FIELD_STRING_TYPE "__probe_type" #undef DEFINE_FIELD #define DEFINE_FIELD(type, item, name, is_signed) \ @@ -102,6 +103,7 @@ enum fetch_op { FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */ // Stage 5 (loop) op FETCH_OP_LP_ARRAY, /* Array: .param = loop count */ + FETCH_OP_TP_ARG, /* Trace Point argument */ FETCH_OP_END, FETCH_NOP_SYMBOL, /* Unresolved Symbol holder */ }; @@ -351,7 +353,8 @@ int trace_probe_create(const char *raw_command, int (*createfn)(int, const char #define TPARG_FL_RETURN BIT(0) #define TPARG_FL_KERNEL BIT(1) #define TPARG_FL_FENTRY BIT(2) -#define TPARG_FL_MASK GENMASK(2, 0) +#define TPARG_FL_TPOINT BIT(3) +#define TPARG_FL_MASK GENMASK(3, 0) extern int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, const char *argv, unsigned int flags); @@ -366,6 +369,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup, enum probe_print_type { PROBE_PRINT_NORMAL, PROBE_PRINT_RETURN, + PROBE_PRINT_EVENT, }; extern int traceprobe_set_print_fmt(struct trace_probe *tp, enum probe_print_type ptype); -- cgit v1.2.3 From f3c4b1341e8320e63f197a554fc5a25686a11d22 Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Fri, 27 Aug 2021 11:48:02 +0800 Subject: swiotlb: use depends on for DMA_RESTRICTED_POOL Use depends on instead of select for DMA_RESTRICTED_POOL; otherwise it will make SWIOTLB user configurable and cause compile errors for some arch (e.g. mips). Fixes: 0b84e4f8b793 ("swiotlb: Add restricted DMA pool initialization") Acked-by: Will Deacon Reported-by: Guenter Roeck Signed-off-by: Claire Chang Signed-off-by: Konrad Rzeszutek Wilk --- kernel/dma/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 3e961dc39634..d19f9fef4ab9 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -82,8 +82,7 @@ config SWIOTLB config DMA_RESTRICTED_POOL bool "DMA Restricted Pool" - depends on OF && OF_RESERVED_MEM - select SWIOTLB + depends on OF && OF_RESERVED_MEM && SWIOTLB help This enables support for restricted DMA pools which provide a level of DMA memory protection on systems with limited hardware protection -- cgit v1.2.3 From 15eb7c888e749fbd1cc0370f3d38de08ad903700 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 31 Aug 2021 08:38:19 +0200 Subject: locking/rwsem: Add missing __init_rwsem() for PREEMPT_RT 730633f0b7f95 became the first direct caller of __init_rwsem() vs the usual init_rwsem(), exposing PREEMPT_RT's lack thereof. Add it. [ tglx: Move it out of line ] Signed-off-by: Mike Galbraith Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/50a936b7d8f12277d6ec7ed2ef0421a381056909.camel@gmx.de --- include/linux/rwsem.h | 12 ++---------- kernel/locking/rwsem.c | 10 ++++++---- 2 files changed, 8 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 426e98e0b675..352c6127cb90 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -142,22 +142,14 @@ struct rw_semaphore { #define DECLARE_RWSEM(lockname) \ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, +extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name, struct lock_class_key *key); -#else -static inline void __rwsem_init(struct rw_semaphore *rwsem, const char *name, - struct lock_class_key *key) -{ -} -#endif #define init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - init_rwbase_rt(&(sem)->rwbase); \ - __rwsem_init((sem), #sem, &__key); \ + __init_rwsem((sem), #sem, &__key); \ } while (0) static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 9215b4d6a9de..000e8d5a2884 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1376,15 +1376,17 @@ static inline void __downgrade_write(struct rw_semaphore *sem) #include "rwbase_rt.c" -#ifdef CONFIG_DEBUG_LOCK_ALLOC -void __rwsem_init(struct rw_semaphore *sem, const char *name, +void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { + init_rwbase_rt(&(sem)->rwbase); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); -} -EXPORT_SYMBOL(__rwsem_init); #endif +} +EXPORT_SYMBOL(__init_rwsem); static inline void __down_read(struct rw_semaphore *sem) { -- cgit v1.2.3 From a974b54036f79dd5e395e9f6c80c3decb4661a14 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 18 Aug 2021 14:18:40 +0100 Subject: futex: Return error code instead of assigning it without effect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The check on the rt_waiter and top_waiter->pi_state is assigning an error return code to ret but this later gets re-assigned, hence the check is ineffective. Return -EINVAL rather than assigning it to ret which was the original intent. Fixes: dc7109aaa233 ("futex: Validate waiter correctly in futex_proxy_trylock_atomic()") Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Thomas Gleixner Reviewed-by: André Almeida Link: https://lore.kernel.org/r/20210818131840.34262-1-colin.king@canonical.com --- kernel/futex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index e7b4c6121da4..30e7daebaec8 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2025,7 +2025,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, * and waiting on the 'waitqueue' futex which is always !PI. */ if (!top_waiter->rt_waiter || top_waiter->pi_state) - ret = -EINVAL; + return -EINVAL; /* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) -- cgit v1.2.3 From 4f07ec0d76f242d4ca0f0c0c6f7293c28254a554 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 2 Sep 2021 11:48:48 +0200 Subject: futex: Prevent inconsistent state and exit race The recent rework of the requeue PI code introduced a possibility for going back to user space in inconsistent state: CPU 0 CPU 1 requeue_futex() if (lock_pifutex_user()) { dequeue_waiter(); wake_waiter(task); sched_in(task); return_from_futex_syscall(); ---> Inconsistent state because PI state is not established It becomes worse if the woken up task immediately exits: sys_exit(); attach_pistate(vpid); <--- FAIL Attach the pi state before dequeuing and waking the waiter. If the waiter gets a spurious wakeup before the dequeue operation it will wait in futex_requeue_pi_wakeup_sync() and therefore cannot return and exit. Fixes: 07d91ef510fb ("futex: Prevent requeue_pi() lock nesting issue on RT") Reported-by: syzbot+4d1bd0725ef09168e1a0@syzkaller.appspotmail.com Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210902094414.558914045@linutronix.de --- kernel/futex.c | 98 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 43 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 30e7daebaec8..04164d440d98 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1454,8 +1454,23 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, newval |= FUTEX_WAITERS; ret = lock_pi_update_atomic(uaddr, uval, newval); - /* If the take over worked, return 1 */ - return ret < 0 ? ret : 1; + if (ret) + return ret; + + /* + * If the waiter bit was requested the caller also needs PI + * state attached to the new owner of the user space futex. + * + * @task is guaranteed to be alive and it cannot be exiting + * because it is either sleeping or waiting in + * futex_requeue_pi_wakeup_sync(). + */ + if (set_waiters) { + ret = attach_to_pi_owner(uaddr, newval, key, ps, + exiting); + WARN_ON(ret); + } + return 1; } /* @@ -2036,17 +2051,24 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, return -EAGAIN; /* - * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in - * the contended case or if set_waiters is 1. The pi_state is returned - * in ps in contended cases. + * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit + * in the contended case or if @set_waiters is true. + * + * In the contended case PI state is attached to the lock owner. If + * the user space lock can be acquired then PI state is attached to + * the new owner (@top_waiter->task) when @set_waiters is true. */ vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, exiting, set_waiters); if (ret == 1) { - /* Dequeue, wake up and update top_waiter::requeue_state */ + /* + * Lock was acquired in user space and PI state was + * attached to @top_waiter->task. That means state is fully + * consistent and the waiter can return to user space + * immediately after the wakeup. + */ requeue_pi_wake_futex(top_waiter, key2, hb2); - return vpid; } else if (ret < 0) { /* Rewind top_waiter::requeue_state */ futex_requeue_pi_complete(top_waiter, ret); @@ -2208,19 +2230,26 @@ retry_private: &exiting, nr_requeue); /* - * At this point the top_waiter has either taken uaddr2 or is - * waiting on it. If the former, then the pi_state will not - * exist yet, look it up one more time to ensure we have a - * reference to it. If the lock was taken, @ret contains the - * VPID of the top waiter task. - * If the lock was not taken, we have pi_state and an initial - * refcount on it. In case of an error we have nothing. + * At this point the top_waiter has either taken uaddr2 or + * is waiting on it. In both cases pi_state has been + * established and an initial refcount on it. In case of an + * error there's nothing. * * The top waiter's requeue_state is up to date: * - * - If the lock was acquired atomically (ret > 0), then + * - If the lock was acquired atomically (ret == 1), then * the state is Q_REQUEUE_PI_LOCKED. * + * The top waiter has been dequeued and woken up and can + * return to user space immediately. The kernel/user + * space state is consistent. In case that there must be + * more waiters requeued the WAITERS bit in the user + * space futex is set so the top waiter task has to go + * into the syscall slowpath to unlock the futex. This + * will block until this requeue operation has been + * completed and the hash bucket locks have been + * dropped. + * * - If the trylock failed with an error (ret < 0) then * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing * happened", or Q_REQUEUE_PI_IGNORE when there was an @@ -2234,36 +2263,20 @@ retry_private: * the same sanity checks for requeue_pi as the loop * below does. */ - if (ret > 0) { - WARN_ON(pi_state); - task_count++; - /* - * If futex_proxy_trylock_atomic() acquired the - * user space futex, then the user space value - * @uaddr2 has been set to the @hb1's top waiter - * task VPID. This task is guaranteed to be alive - * and cannot be exiting because it is either - * sleeping or blocked on @hb2 lock. - * - * The @uaddr2 futex cannot have waiters either as - * otherwise futex_proxy_trylock_atomic() would not - * have succeeded. - * - * In order to requeue waiters to @hb2, pi state is - * required. Hand in the VPID value (@ret) and - * allocate PI state with an initial refcount on - * it. - */ - ret = attach_to_pi_owner(uaddr2, ret, &key2, &pi_state, - &exiting); - WARN_ON(ret); - } - switch (ret) { case 0: /* We hold a reference on the pi state. */ break; + case 1: + /* + * futex_proxy_trylock_atomic() acquired the user space + * futex. Adjust task_count. + */ + task_count++; + ret = 0; + break; + /* * If the above failed, then pi_state is NULL and * waiter::requeue_state is correct. @@ -2395,9 +2408,8 @@ retry_private: } /* - * We took an extra initial reference to the pi_state either in - * futex_proxy_trylock_atomic() or in attach_to_pi_owner(). We need - * to drop it here again. + * We took an extra initial reference to the pi_state in + * futex_proxy_trylock_atomic(). We need to drop it here again. */ put_pi_state(pi_state); -- cgit v1.2.3 From 249955e51c8136189b3c66f54e212981a1350a0f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 2 Sep 2021 11:48:50 +0200 Subject: futex: Clarify comment for requeue_pi_wake_futex() It's slightly confusing. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210902094414.618613025@linutronix.de --- kernel/futex.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 04164d440d98..82cd270f25a0 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1954,12 +1954,26 @@ static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q) * @hb: the hash_bucket of the requeue target futex * * During futex_requeue, with requeue_pi=1, it is possible to acquire the - * target futex if it is uncontended or via a lock steal. Set the futex_q key - * to the requeue target futex so the waiter can detect the wakeup on the right - * futex, but remove it from the hb and NULL the rt_waiter so it can detect - * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock - * to protect access to the pi_state to fixup the owner later. Must be called - * with both q->lock_ptr and hb->lock held. + * target futex if it is uncontended or via a lock steal. + * + * 1) Set @q::key to the requeue target futex key so the waiter can detect + * the wakeup on the right futex. + * + * 2) Dequeue @q from the hash bucket. + * + * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock + * acquisition. + * + * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that + * the waiter has to fixup the pi state. + * + * 5) Complete the requeue state so the waiter can make progress. After + * this point the waiter task can return from the syscall immediately in + * case that the pi state does not have to be fixed up. + * + * 6) Wake the waiter task. + * + * Must be called with both q->lock_ptr and hb->lock held. */ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, -- cgit v1.2.3 From 340576590dac4bb58d532a8ad5bfa806d8ab473c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 2 Sep 2021 11:48:51 +0200 Subject: futex: Avoid redundant task lookup No need to do the full VPID based task lookup and validation of the top waiter when the user space futex was acquired on it's behalf during the requeue_pi operation. The task is known already and it cannot go away before requeue_pi_wake_futex() has been invoked. Split out the actual attach code from attach_pi_state_owner() and use that instead of the full blown variant. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210902094414.676104881@linutronix.de --- kernel/futex.c | 67 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 82cd270f25a0..a316dce74c3d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1263,6 +1263,36 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval, return -ESRCH; } +static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key, + struct futex_pi_state **ps) +{ + /* + * No existing pi state. First waiter. [2] + * + * This creates pi_state, we have hb->lock held, this means nothing can + * observe this state, wait_lock is irrelevant. + */ + struct futex_pi_state *pi_state = alloc_pi_state(); + + /* + * Initialize the pi_mutex in locked state and make @p + * the owner of it: + */ + rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); + + /* Store the key for possible exit cleanups: */ + pi_state->key = *key; + + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &p->pi_state_list); + /* + * Assignment without holding pi_state->pi_mutex.wait_lock is safe + * because there is no concurrency as the object is not published yet. + */ + pi_state->owner = p; + + *ps = pi_state; +} /* * Lookup the task for the TID provided from user space and attach to * it after doing proper sanity checks. @@ -1272,7 +1302,6 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, struct task_struct **exiting) { pid_t pid = uval & FUTEX_TID_MASK; - struct futex_pi_state *pi_state; struct task_struct *p; /* @@ -1324,36 +1353,11 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, return ret; } - /* - * No existing pi state. First waiter. [2] - * - * This creates pi_state, we have hb->lock held, this means nothing can - * observe this state, wait_lock is irrelevant. - */ - pi_state = alloc_pi_state(); - - /* - * Initialize the pi_mutex in locked state and make @p - * the owner of it: - */ - rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); - - /* Store the key for possible exit cleanups: */ - pi_state->key = *key; - - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &p->pi_state_list); - /* - * Assignment without holding pi_state->pi_mutex.wait_lock is safe - * because there is no concurrency as the object is not published yet. - */ - pi_state->owner = p; + __attach_to_pi_owner(p, key, ps); raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); - *ps = pi_state; - return 0; } @@ -1464,11 +1468,14 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, * @task is guaranteed to be alive and it cannot be exiting * because it is either sleeping or waiting in * futex_requeue_pi_wakeup_sync(). + * + * No need to do the full attach_to_pi_owner() exercise + * because @task is known and valid. */ if (set_waiters) { - ret = attach_to_pi_owner(uaddr, newval, key, ps, - exiting); - WARN_ON(ret); + raw_spin_lock_irq(&task->pi_lock); + __attach_to_pi_owner(task, key, ps); + raw_spin_unlock_irq(&task->pi_lock); } return 1; } -- cgit v1.2.3 From 35d7bdc86031a2c1ae05ac27dfa93b2acdcbaecc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 Apr 2021 10:20:25 +0200 Subject: kernel/fork: factor out replacing the current MM exe_file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's factor the main logic out into replace_mm_exe_file(), such that all mm->exe_file logic is contained in kernel/fork.c. While at it, perform some simple cleanups that are possible now that we're simplifying the individual functions. Acked-by: Christian Brauner Acked-by: "Eric W. Biederman" Acked-by: Christian König Signed-off-by: David Hildenbrand --- include/linux/mm.h | 1 + kernel/fork.c | 44 +++++++++++++++++++++++++++++++++++++++++--- kernel/sys.c | 33 +-------------------------------- 3 files changed, 43 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/include/linux/mm.h b/include/linux/mm.h index 7ca22e6e694a..48c6fa9ab792 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2581,6 +2581,7 @@ extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); extern struct file *get_task_exe_file(struct task_struct *task); diff --git a/kernel/fork.c b/kernel/fork.c index 44f4c2d83763..f4ac883c4a1e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1148,9 +1148,7 @@ void mmput_async(struct mm_struct *mm) * * Main users are mmput() and sys_execve(). Callers prevent concurrent * invocations: in mmput() nobody alive left, in execve task is single - * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the - * mm->exe_file, but does so without using set_mm_exe_file() in order - * to avoid the need for any locks. + * threaded. */ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { @@ -1170,6 +1168,46 @@ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) fput(old_exe_file); } +/** + * replace_mm_exe_file - replace a reference to the mm's executable file + * + * This changes mm's executable file (shown as symlink /proc/[pid]/exe), + * dealing with concurrent invocation and without grabbing the mmap lock in + * write mode. + * + * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). + */ +int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) +{ + struct vm_area_struct *vma; + struct file *old_exe_file; + int ret = 0; + + /* Forbid mm->exe_file change if old file still mapped. */ + old_exe_file = get_mm_exe_file(mm); + if (old_exe_file) { + mmap_read_lock(mm); + for (vma = mm->mmap; vma && !ret; vma = vma->vm_next) { + if (!vma->vm_file) + continue; + if (path_equal(&vma->vm_file->f_path, + &old_exe_file->f_path)) + ret = -EBUSY; + } + mmap_read_unlock(mm); + fput(old_exe_file); + if (ret) + return ret; + } + + /* set the new file, lockless */ + get_file(new_exe_file); + old_exe_file = xchg(&mm->exe_file, new_exe_file); + if (old_exe_file) + fput(old_exe_file); + return 0; +} + /** * get_mm_exe_file - acquire a reference to the mm's executable file * diff --git a/kernel/sys.c b/kernel/sys.c index ef1a78f5d71c..30c12e54585a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1846,7 +1846,6 @@ SYSCALL_DEFINE1(umask, int, mask) static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) { struct fd exe; - struct file *old_exe, *exe_file; struct inode *inode; int err; @@ -1869,40 +1868,10 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (err) goto exit; - /* - * Forbid mm->exe_file change if old file still mapped. - */ - exe_file = get_mm_exe_file(mm); - err = -EBUSY; - if (exe_file) { - struct vm_area_struct *vma; - - mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (!vma->vm_file) - continue; - if (path_equal(&vma->vm_file->f_path, - &exe_file->f_path)) - goto exit_err; - } - - mmap_read_unlock(mm); - fput(exe_file); - } - - err = 0; - /* set the new file, lockless */ - get_file(exe.file); - old_exe = xchg(&mm->exe_file, exe.file); - if (old_exe) - fput(old_exe); + err = replace_mm_exe_file(mm, exe.file); exit: fdput(exe); return err; -exit_err: - mmap_read_unlock(mm); - fput(exe_file); - goto exit; } /* -- cgit v1.2.3 From fe69d560b5bd9ec77b5d5749bd7027344daef47e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 Apr 2021 10:29:59 +0200 Subject: kernel/fork: always deny write access to current MM exe_file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to remove VM_DENYWRITE only currently only used when mapping the executable during exec. During exec, we already deny_write_access() the executable, however, after exec completes the VMAs mapped with VM_DENYWRITE effectively keeps write access denied via deny_write_access(). Let's deny write access when setting or replacing the MM exe_file. With this change, we can remove VM_DENYWRITE for mapping executables. Make set_mm_exe_file() return an error in case deny_write_access() fails; note that this should never happen, because exec code does a deny_write_access() early and keeps write access denied when calling set_mm_exe_file. However, it makes the code easier to read and makes set_mm_exe_file() and replace_mm_exe_file() look more similar. This represents a minor user space visible change: sys_prctl(PR_SET_MM_MAP/EXE_FILE) can now fail if the file is already opened writable. Also, after sys_prctl(PR_SET_MM_MAP/EXE_FILE) the file cannot be opened writable. Note that we can already fail with -EACCES if the file doesn't have execute permissions. Acked-by: "Eric W. Biederman" Acked-by: Christian König Signed-off-by: David Hildenbrand --- fs/exec.c | 4 +++- include/linux/mm.h | 2 +- kernel/fork.c | 50 ++++++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 48 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/fs/exec.c b/fs/exec.c index 38f63451b928..9294049f5487 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1270,7 +1270,9 @@ int begin_new_exec(struct linux_binprm * bprm) * not visibile until then. This also enables the update * to be lockless. */ - set_mm_exe_file(bprm->mm, bprm->file); + retval = set_mm_exe_file(bprm->mm, bprm->file); + if (retval) + goto out; /* If the binary is not readable then enforce mm->dumpable=0 */ would_dump(bprm, bprm->file); diff --git a/include/linux/mm.h b/include/linux/mm.h index 48c6fa9ab792..56b1cd41db61 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2580,7 +2580,7 @@ static inline int check_data_rlimit(unsigned long rlim, extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); -extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); extern struct file *get_task_exe_file(struct task_struct *task); diff --git a/kernel/fork.c b/kernel/fork.c index f4ac883c4a1e..7677f897ecb6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -470,6 +470,20 @@ void free_task(struct task_struct *tsk) } EXPORT_SYMBOL(free_task); +static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) +{ + struct file *exe_file; + + exe_file = get_mm_exe_file(oldmm); + RCU_INIT_POINTER(mm->exe_file, exe_file); + /* + * We depend on the oldmm having properly denied write access to the + * exe_file already. + */ + if (exe_file && deny_write_access(exe_file)) + pr_warn_once("deny_write_access() failed in %s\n", __func__); +} + #ifdef CONFIG_MMU static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) @@ -493,7 +507,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); /* No ordering required: file already has been exposed. */ - RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); + dup_mm_exe_file(mm, oldmm); mm->total_vm = oldmm->total_vm; mm->data_vm = oldmm->data_vm; @@ -639,7 +653,7 @@ static inline void mm_free_pgd(struct mm_struct *mm) static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { mmap_write_lock(oldmm); - RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); + dup_mm_exe_file(mm, oldmm); mmap_write_unlock(oldmm); return 0; } @@ -1149,8 +1163,10 @@ void mmput_async(struct mm_struct *mm) * Main users are mmput() and sys_execve(). Callers prevent concurrent * invocations: in mmput() nobody alive left, in execve task is single * threaded. + * + * Can only fail if new_exe_file != NULL. */ -void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) +int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct file *old_exe_file; @@ -1161,11 +1177,21 @@ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) */ old_exe_file = rcu_dereference_raw(mm->exe_file); - if (new_exe_file) + if (new_exe_file) { + /* + * We expect the caller (i.e., sys_execve) to already denied + * write access, so this is unlikely to fail. + */ + if (unlikely(deny_write_access(new_exe_file))) + return -EACCES; get_file(new_exe_file); + } rcu_assign_pointer(mm->exe_file, new_exe_file); - if (old_exe_file) + if (old_exe_file) { + allow_write_access(old_exe_file); fput(old_exe_file); + } + return 0; } /** @@ -1201,10 +1227,22 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) } /* set the new file, lockless */ + ret = deny_write_access(new_exe_file); + if (ret) + return -EACCES; get_file(new_exe_file); + old_exe_file = xchg(&mm->exe_file, new_exe_file); - if (old_exe_file) + if (old_exe_file) { + /* + * Don't race with dup_mmap() getting the file and disallowing + * write access while someone might open the file writable. + */ + mmap_read_lock(mm); + allow_write_access(old_exe_file); fput(old_exe_file); + mmap_read_unlock(mm); + } return 0; } -- cgit v1.2.3 From 8d0920bde5eb8ec7e567939b85e65a0596c8580d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 22 Apr 2021 12:08:20 +0200 Subject: mm: remove VM_DENYWRITE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All in-tree users of MAP_DENYWRITE are gone. MAP_DENYWRITE cannot be set from user space, so all users are gone; let's remove it. Acked-by: "Eric W. Biederman" Acked-by: Christian König Signed-off-by: David Hildenbrand --- fs/proc/task_mmu.c | 1 - include/linux/mm.h | 1 - include/linux/mman.h | 1 - include/trace/events/mmflags.h | 1 - kernel/events/core.c | 2 -- kernel/fork.c | 3 --- lib/test_printf.c | 5 ++--- mm/mmap.c | 27 +++------------------------ 8 files changed, 5 insertions(+), 36 deletions(-) (limited to 'kernel') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index eb97468dfe4c..cf25be3e0321 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -619,7 +619,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_MAYSHARE)] = "ms", [ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_PFNMAP)] = "pf", - [ilog2(VM_DENYWRITE)] = "dw", [ilog2(VM_LOCKED)] = "lo", [ilog2(VM_IO)] = "io", [ilog2(VM_SEQ_READ)] = "sr", diff --git a/include/linux/mm.h b/include/linux/mm.h index 56b1cd41db61..257995f62e83 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -281,7 +281,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ -#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_LOCKED 0x00002000 diff --git a/include/linux/mman.h b/include/linux/mman.h index ebb09a964272..bd9aadda047b 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -153,7 +153,6 @@ static inline unsigned long calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | arch_calc_vm_flag_bits(flags); diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index f160484afc5c..0b53e855c4ac 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -165,7 +165,6 @@ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") {VM_UFFD_MISSING, "uffd_missing" }, \ IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ {VM_PFNMAP, "pfnmap" }, \ - {VM_DENYWRITE, "denywrite" }, \ {VM_UFFD_WP, "uffd_wp" }, \ {VM_LOCKED, "locked" }, \ {VM_IO, "io" }, \ diff --git a/kernel/events/core.c b/kernel/events/core.c index 1cb1f9b8392e..19767bb9933c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8307,8 +8307,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) else flags = MAP_PRIVATE; - if (vma->vm_flags & VM_DENYWRITE) - flags |= MAP_DENYWRITE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; if (is_vm_hugetlb_page(vma)) diff --git a/kernel/fork.c b/kernel/fork.c index 7677f897ecb6..feef1057081d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -570,12 +570,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); file = tmp->vm_file; if (file) { - struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping; get_file(file); - if (tmp->vm_flags & VM_DENYWRITE) - put_write_access(inode); i_mmap_lock_write(mapping); if (tmp->vm_flags & VM_SHARED) mapping_allow_writable(mapping); diff --git a/lib/test_printf.c b/lib/test_printf.c index 8ac71aee46af..8a48b61c3763 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -675,9 +675,8 @@ flags(void) "uptodate|dirty|lru|active|swapbacked", cmp_buffer); - flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC - | VM_DENYWRITE; - test("read|exec|mayread|maywrite|mayexec|denywrite", "%pGv", &flags); + flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags); gfp = GFP_TRANSHUGE; test("GFP_TRANSHUGE", "%pGg", &gfp); diff --git a/mm/mmap.c b/mm/mmap.c index ca54d36d203a..589dc1dc13db 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -148,8 +148,6 @@ void vma_set_page_prot(struct vm_area_struct *vma) static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { - if (vma->vm_flags & VM_DENYWRITE) - allow_write_access(file); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); @@ -666,8 +664,6 @@ static void __vma_link_file(struct vm_area_struct *vma) if (file) { struct address_space *mapping = file->f_mapping; - if (vma->vm_flags & VM_DENYWRITE) - put_write_access(file_inode(file)); if (vma->vm_flags & VM_SHARED) mapping_allow_writable(mapping); @@ -1788,22 +1784,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma->vm_pgoff = pgoff; if (file) { - if (vm_flags & VM_DENYWRITE) { - error = deny_write_access(file); - if (error) - goto free_vma; - } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) - goto allow_write_and_free_vma; + goto free_vma; } - /* ->mmap() can change vma->vm_file, but must guarantee that - * vma_link() below can deny write-access if VM_DENYWRITE is set - * and map writably if VM_SHARED is set. This usually means the - * new file must not have been exposed to user-space, yet. - */ vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) @@ -1860,13 +1846,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ - if (file) { unmap_writable: - if (vm_flags & VM_SHARED) - mapping_unmap_writable(file->f_mapping); - if (vm_flags & VM_DENYWRITE) - allow_write_access(file); - } + if (file && vm_flags & VM_SHARED) + mapping_unmap_writable(file->f_mapping); file = vma->vm_file; out: perf_event_mmap(vma); @@ -1906,9 +1888,6 @@ unmap_and_free_vma: charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); -allow_write_and_free_vma: - if (vm_flags & VM_DENYWRITE) - allow_write_access(file); free_vma: vm_area_free(vma); unacct_error: -- cgit v1.2.3 From fab827dbee8c2e06ca4ba000fa6c48bcf9054aba Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 2 Sep 2021 14:54:57 -0700 Subject: memcg: enable accounting for pids in nested pid namespaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 5d097056c9a0 ("kmemcg: account certain kmem allocations to memcg") enabled memcg accounting for pids allocated from init_pid_ns.pid_cachep, but forgot to adjust the setting for nested pid namespaces. As a result, pid memory is not accounted exactly where it is really needed, inside memcg-limited containers with their own pid namespaces. Pid was one the first kernel objects enabled for memcg accounting. init_pid_ns.pid_cachep marked by SLAB_ACCOUNT and we can expect that any new pids in the system are memcg-accounted. Though recently I've noticed that it is wrong. nested pid namespaces creates own slab caches for pid objects, nested pids have increased size because contain id both for all parent and for own pid namespaces. The problem is that these slab caches are _NOT_ marked by SLAB_ACCOUNT, as a result any pids allocated in nested pid namespaces are not memcg-accounted. Pid struct in nested pid namespace consumes up to 500 bytes memory, 100000 such objects gives us up to ~50Mb unaccounted memory, this allow container to exceed assigned memcg limits. Link: https://lkml.kernel.org/r/8b6de616-fd1a-02c6-cbdb-976ecdcfa604@virtuozzo.com Fixes: 5d097056c9a0 ("kmemcg: account certain kmem allocations to memcg") Cc: stable@vger.kernel.org Signed-off-by: Vasily Averin Reviewed-by: Michal Koutný Reviewed-by: Shakeel Butt Acked-by: Christian Brauner Acked-by: Roman Gushchin Cc: Michal Hocko Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/pid_namespace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index ca43239a255a..cb5a25a8a0cc 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -51,7 +51,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level) mutex_lock(&pid_caches_mutex); /* Name collision forces to do allocation under mutex. */ if (!*pkc) - *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0); + *pkc = kmem_cache_create(name, len, 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0); mutex_unlock(&pid_caches_mutex); /* current can fail, but someone else can succeed. */ return READ_ONCE(*pkc); -- cgit v1.2.3 From 30acd0bdfb86548172168a0cc71d455944de0683 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 2 Sep 2021 14:55:27 -0700 Subject: memcg: enable accounting for new namesapces and struct nsproxy Container admin can create new namespaces and force kernel to allocate up to several pages of memory for the namespaces and its associated structures. Net and uts namespaces have enabled accounting for such allocations. It makes sense to account for rest ones to restrict the host's memory consumption from inside the memcg-limited container. Link: https://lkml.kernel.org/r/5525bcbf-533e-da27-79b7-158686c64e13@virtuozzo.com Signed-off-by: Vasily Averin Acked-by: Serge Hallyn Acked-by: Christian Brauner Acked-by: Kirill Tkhai Reviewed-by: Shakeel Butt Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrei Vagin Cc: Borislav Petkov Cc: Borislav Petkov Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: "Eric W. Biederman" Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "J. Bruce Fields" Cc: Jeff Layton Cc: Jens Axboe Cc: Jiri Slaby Cc: Johannes Weiner Cc: Michal Hocko Cc: Oleg Nesterov Cc: Roman Gushchin Cc: Tejun Heo Cc: Thomas Gleixner Cc: Vladimir Davydov Cc: Yutian Yang Cc: Zefan Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namespace.c | 2 +- ipc/namespace.c | 2 +- kernel/cgroup/namespace.c | 2 +- kernel/nsproxy.c | 2 +- kernel/pid_namespace.c | 2 +- kernel/time/namespace.c | 4 ++-- kernel/user_namespace.c | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/fs/namespace.c b/fs/namespace.c index e51b63ae233b..94a9817851cc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3307,7 +3307,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a if (!ucounts) return ERR_PTR(-ENOSPC); - new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL); + new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT); if (!new_ns) { dec_mnt_namespaces(ucounts); return ERR_PTR(-ENOMEM); diff --git a/ipc/namespace.c b/ipc/namespace.c index 7bd0766ddc3b..ae83f0f2651b 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -42,7 +42,7 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, goto fail; err = -ENOMEM; - ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL); + ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT); if (ns == NULL) goto fail_dec; diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c index f5e8828c109c..0d5c29879a50 100644 --- a/kernel/cgroup/namespace.c +++ b/kernel/cgroup/namespace.c @@ -24,7 +24,7 @@ static struct cgroup_namespace *alloc_cgroup_ns(void) struct cgroup_namespace *new_ns; int ret; - new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL); + new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL_ACCOUNT); if (!new_ns) return ERR_PTR(-ENOMEM); ret = ns_alloc_inum(&new_ns->ns); diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index abc01fcad8c7..eec72ca962e2 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -568,6 +568,6 @@ out: int __init nsproxy_cache_init(void) { - nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC); + nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC|SLAB_ACCOUNT); return 0; } diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index cb5a25a8a0cc..a46a3723bc66 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -450,7 +450,7 @@ const struct proc_ns_operations pidns_for_children_operations = { static __init int pid_namespaces_init(void) { - pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); + pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC | SLAB_ACCOUNT); #ifdef CONFIG_CHECKPOINT_RESTORE register_sysctl_paths(kern_path, pid_ns_ctl_table); diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c index 12eab0d2ae28..aec832801c26 100644 --- a/kernel/time/namespace.c +++ b/kernel/time/namespace.c @@ -88,13 +88,13 @@ static struct time_namespace *clone_time_ns(struct user_namespace *user_ns, goto fail; err = -ENOMEM; - ns = kmalloc(sizeof(*ns), GFP_KERNEL); + ns = kmalloc(sizeof(*ns), GFP_KERNEL_ACCOUNT); if (!ns) goto fail_dec; refcount_set(&ns->ns.count, 1); - ns->vvar_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + ns->vvar_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); if (!ns->vvar_page) goto fail_free; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index ef82d401dde8..6b2e3ca7ee99 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -1385,7 +1385,7 @@ const struct proc_ns_operations userns_operations = { static __init int user_namespaces_init(void) { - user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); + user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC | SLAB_ACCOUNT); return 0; } subsys_initcall(user_namespaces_init); -- cgit v1.2.3 From 5f58c39819ff78ca5ddbba2b3cd8ff4779b19bb5 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 2 Sep 2021 14:55:35 -0700 Subject: memcg: enable accounting for signals When a user send a signal to any another processes it forces the kernel to allocate memory for 'struct sigqueue' objects. The number of signals is limited by RLIMIT_SIGPENDING resource limit, but even the default settings allow each user to consume up to several megabytes of memory. It makes sense to account for these allocations to restrict the host's memory consumption from inside the memcg-limited container. Link: https://lkml.kernel.org/r/e34e958c-e785-712e-a62a-2c7b66c646c7@virtuozzo.com Signed-off-by: Vasily Averin Reviewed-by: Shakeel Butt Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrei Vagin Cc: Borislav Petkov Cc: Borislav Petkov Cc: Christian Brauner Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: "Eric W. Biederman" Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "J. Bruce Fields" Cc: Jeff Layton Cc: Jens Axboe Cc: Jiri Slaby Cc: Johannes Weiner Cc: Kirill Tkhai Cc: Michal Hocko Cc: Oleg Nesterov Cc: Roman Gushchin Cc: Serge Hallyn Cc: Tejun Heo Cc: Thomas Gleixner Cc: Vladimir Davydov Cc: Yutian Yang Cc: Zefan Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index a3229add4455..8921c4a8419f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -4663,7 +4663,7 @@ void __init signals_init(void) { siginfo_buildtime_checks(); - sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT); } #ifdef CONFIG_KGDB_KDB -- cgit v1.2.3 From c509723ec27e925bb91a20682c448e95d4bc8c9f Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 2 Sep 2021 14:55:39 -0700 Subject: memcg: enable accounting for posix_timers_cache slab A program may create multiple interval timers using timer_create(). For each timer the kernel preallocates a "queued real-time signal", Consequently, the number of timers is limited by the RLIMIT_SIGPENDING resource limit. The allocated object is quite small, ~250 bytes, but even the default signal limits allow to consume up to 100 megabytes per user. It makes sense to account for them to limit the host's memory consumption from inside the memcg-limited container. Link: https://lkml.kernel.org/r/57795560-025c-267c-6b1a-dea852d95530@virtuozzo.com Signed-off-by: Vasily Averin Reviewed-by: Thomas Gleixner Reviewed-by: Shakeel Butt Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrei Vagin Cc: Borislav Petkov Cc: Borislav Petkov Cc: Christian Brauner Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: "Eric W. Biederman" Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "J. Bruce Fields" Cc: Jeff Layton Cc: Jens Axboe Cc: Jiri Slaby Cc: Johannes Weiner Cc: Kirill Tkhai Cc: Michal Hocko Cc: Oleg Nesterov Cc: Roman Gushchin Cc: Serge Hallyn Cc: Tejun Heo Cc: Vladimir Davydov Cc: Yutian Yang Cc: Zefan Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/posix-timers.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index dd5697d7347b..7363f81dc31a 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -273,8 +273,8 @@ static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) static __init int init_posix_timers(void) { posix_timers_cache = kmem_cache_create("posix_timers_cache", - sizeof (struct k_itimer), 0, SLAB_PANIC, - NULL); + sizeof(struct k_itimer), 0, + SLAB_PANIC | SLAB_ACCOUNT, NULL); return 0; } __initcall(init_posix_timers); -- cgit v1.2.3 From 65d759c8f9f57b96c199f3fe5cfb93ac7da095e9 Mon Sep 17 00:00:00 2001 From: Charan Teja Reddy Date: Thu, 2 Sep 2021 14:59:59 -0700 Subject: mm: compaction: support triggering of proactive compaction by user The proactive compaction[1] gets triggered for every 500msec and run compaction on the node for COMPACTION_HPAGE_ORDER (usually order-9) pages based on the value set to sysctl.compaction_proactiveness. Triggering the compaction for every 500msec in search of COMPACTION_HPAGE_ORDER pages is not needed for all applications, especially on the embedded system usecases which may have few MB's of RAM. Enabling the proactive compaction in its state will endup in running almost always on such systems. Other side, proactive compaction can still be very much useful for getting a set of higher order pages in some controllable manner(controlled by using the sysctl.compaction_proactiveness). So, on systems where enabling the proactive compaction always may proove not required, can trigger the same from user space on write to its sysctl interface. As an example, say app launcher decide to launch the memory heavy application which can be launched fast if it gets more higher order pages thus launcher can prepare the system in advance by triggering the proactive compaction from userspace. This triggering of proactive compaction is done on a write to sysctl.compaction_proactiveness by user. [1]https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?id=facdaa917c4d5a376d09d25865f5a863f906234a [akpm@linux-foundation.org: tweak vm.rst, per Mike] Link: https://lkml.kernel.org/r/1627653207-12317-1-git-send-email-charante@codeaurora.org Signed-off-by: Charan Teja Reddy Acked-by: Vlastimil Babka Acked-by: Rafael Aquini Cc: Mike Rapoport Cc: Luis Chamberlain Cc: Kees Cook Cc: Iurii Zaikin Cc: Dave Hansen Cc: Mel Gorman Cc: Nitin Gupta Cc: Jonathan Corbet Cc: Khalid Aziz Cc: David Rientjes Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/sysctl/vm.rst | 3 ++- include/linux/compaction.h | 2 ++ include/linux/mmzone.h | 1 + kernel/sysctl.c | 2 +- mm/compaction.c | 38 +++++++++++++++++++++++++++++++-- 5 files changed, 42 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 003d5cc3751b..5e795202111f 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -118,7 +118,8 @@ compaction_proactiveness This tunable takes a value in the range [0, 100] with a default value of 20. This tunable determines how aggressively compaction is done in the -background. Setting it to 0 disables proactive compaction. +background. Write of a non zero value to this tunable will immediately +trigger the proactive compaction. Setting it to 0 disables proactive compaction. Note that compaction has a non-trivial system-wide impact as pages belonging to different processes are moved around, which could also lead diff --git a/include/linux/compaction.h b/include/linux/compaction.h index c24098c7acca..34bce35c808d 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -84,6 +84,8 @@ static inline unsigned long compact_gap(unsigned int order) extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); +extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table, + int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 59bad25ce78e..1bd5f5955f9a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -846,6 +846,7 @@ typedef struct pglist_data { enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; + bool proactive_compact_trigger; #endif /* * This is a per-node reserve of pages that are not available diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 272f4a272f8c..297f0b3966bd 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2871,7 +2871,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_compaction_proactiveness, .maxlen = sizeof(sysctl_compaction_proactiveness), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = compaction_proactiveness_sysctl_handler, .extra1 = SYSCTL_ZERO, .extra2 = &one_hundred, }, diff --git a/mm/compaction.c b/mm/compaction.c index 4ee0d40d93f2..fa9b2b598eab 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2706,6 +2706,30 @@ static void compact_nodes(void) */ unsigned int __read_mostly sysctl_compaction_proactiveness = 20; +int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *length, loff_t *ppos) +{ + int rc, nid; + + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + if (write && sysctl_compaction_proactiveness) { + for_each_online_node(nid) { + pg_data_t *pgdat = NODE_DATA(nid); + + if (pgdat->proactive_compact_trigger) + continue; + + pgdat->proactive_compact_trigger = true; + wake_up_interruptible(&pgdat->kcompactd_wait); + } + } + + return 0; +} + /* * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory @@ -2750,7 +2774,8 @@ void compaction_unregister_node(struct node *node) static inline bool kcompactd_work_requested(pg_data_t *pgdat) { - return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); + return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || + pgdat->proactive_compact_trigger; } static bool kcompactd_node_suitable(pg_data_t *pgdat) @@ -2901,9 +2926,16 @@ static int kcompactd(void *p) while (!kthread_should_stop()) { unsigned long pflags; + /* + * Avoid the unnecessary wakeup for proactive compaction + * when it is disabled. + */ + if (!sysctl_compaction_proactiveness) + timeout = MAX_SCHEDULE_TIMEOUT; trace_mm_compaction_kcompactd_sleep(pgdat->node_id); if (wait_event_freezable_timeout(pgdat->kcompactd_wait, - kcompactd_work_requested(pgdat), timeout)) { + kcompactd_work_requested(pgdat), timeout) && + !pgdat->proactive_compact_trigger) { psi_memstall_enter(&pflags); kcompactd_do_work(pgdat); @@ -2938,6 +2970,8 @@ static int kcompactd(void *p) timeout = default_timeout << COMPACT_MAX_DEFER_SHIFT; } + if (unlikely(pgdat->proactive_compact_trigger)) + pgdat->proactive_compact_trigger = false; } return 0; -- cgit v1.2.3 From dce49103962840dd61423d7627748d6c558d58c5 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 2 Sep 2021 15:00:33 -0700 Subject: mm: wire up syscall process_mrelease Split off from prev patch in the series that implements the syscall. Link: https://lkml.kernel.org/r/20210809185259.405936-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Geert Uytterhoeven Cc: Andy Lutomirski Cc: Christian Brauner Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Rientjes Cc: Florian Weimer Cc: Jan Engelhardt Cc: Jann Horn Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Rik van Riel Cc: Roman Gushchin Cc: Shakeel Butt Cc: Tim Murray Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/syscalls/syscall.tbl | 2 ++ arch/arm/tools/syscall.tbl | 2 ++ arch/arm64/include/asm/unistd.h | 2 +- arch/arm64/include/asm/unistd32.h | 2 ++ arch/ia64/kernel/syscalls/syscall.tbl | 2 ++ arch/m68k/kernel/syscalls/syscall.tbl | 2 ++ arch/microblaze/kernel/syscalls/syscall.tbl | 2 ++ arch/mips/kernel/syscalls/syscall_n32.tbl | 2 ++ arch/mips/kernel/syscalls/syscall_n64.tbl | 2 ++ arch/mips/kernel/syscalls/syscall_o32.tbl | 2 ++ arch/parisc/kernel/syscalls/syscall.tbl | 2 ++ arch/powerpc/kernel/syscalls/syscall.tbl | 2 ++ arch/s390/kernel/syscalls/syscall.tbl | 2 ++ arch/sh/kernel/syscalls/syscall.tbl | 2 ++ arch/sparc/kernel/syscalls/syscall.tbl | 2 ++ arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + arch/xtensa/kernel/syscalls/syscall.tbl | 2 ++ include/linux/syscalls.h | 1 + include/uapi/asm-generic/unistd.h | 4 +++- kernel/sys_ni.c | 1 + 21 files changed, 38 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index a17687ed4b51..605645eae04c 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -486,3 +486,5 @@ 554 common landlock_create_ruleset sys_landlock_create_ruleset 555 common landlock_add_rule sys_landlock_add_rule 556 common landlock_restrict_self sys_landlock_restrict_self +# 557 reserved for memfd_secret +558 common process_mrelease sys_process_mrelease diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index c5df1179fc5d..2f32eb8beca8 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -460,3 +460,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 727bfc3be99b..3cb206aea3db 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 447 +#define __NR_compat_syscalls 449 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 99ffcafc736c..0f49cdb180dd 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -901,6 +901,8 @@ __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset) __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) #define __NR_landlock_restrict_self 446 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) +#define __NR_process_mrelease 448 +__SYSCALL(__NR_process_mrelease, sys_process_mrelease) /* * Please add new compat syscalls above this comment and update diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 6d07742c57b8..9bf45f2be966 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -367,3 +367,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 541bc1b3a8f9..f1f98ee6c82d 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -446,3 +446,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index a176faca2927..da49ddd4bb54 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -452,3 +452,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index c2d2e19abea8..56c8d3cf42ed 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -385,3 +385,5 @@ 444 n32 landlock_create_ruleset sys_landlock_create_ruleset 445 n32 landlock_add_rule sys_landlock_add_rule 446 n32 landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 n32 process_mrelease sys_process_mrelease diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index ac653d08b1ea..1ca7bc337932 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -361,3 +361,5 @@ 444 n64 landlock_create_ruleset sys_landlock_create_ruleset 445 n64 landlock_add_rule sys_landlock_add_rule 446 n64 landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 n64 process_mrelease sys_process_mrelease diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 253f2cd70b6b..fd3a9df60ec2 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -434,3 +434,5 @@ 444 o32 landlock_create_ruleset sys_landlock_create_ruleset 445 o32 landlock_add_rule sys_landlock_add_rule 446 o32 landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 o32 process_mrelease sys_process_mrelease diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index e26187b9ab87..040df1b7a589 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -444,3 +444,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index aef2a290e71a..d8ebd7d37c0f 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -526,3 +526,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 64d51ab5a8b4..57233ace30cb 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -449,3 +449,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease sys_process_mrelease diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index e0a70be77d84..2f6e95eb4690 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -449,3 +449,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 603f5a821502..42fc2906215d 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -492,3 +492,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index ce763a12311c..661a03bcfbd1 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -452,3 +452,4 @@ 445 i386 landlock_add_rule sys_landlock_add_rule 446 i386 landlock_restrict_self sys_landlock_restrict_self 447 i386 memfd_secret sys_memfd_secret +448 i386 process_mrelease sys_process_mrelease diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index f6b57799c1ea..807b6a1de8e8 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -369,6 +369,7 @@ 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self 447 common memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease # # Due to a historical design error, certain syscalls are numbered differently diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 235d67d6ceb4..f4384951f393 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -417,3 +417,5 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 69c9a7010081..00bc170a50f0 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -915,6 +915,7 @@ asmlinkage long sys_mincore(unsigned long start, size_t len, asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, size_t vlen, int behavior, unsigned int flags); +asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags); diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index a9d6fcd95f42..14c8fe863c6d 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -877,9 +877,11 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) #define __NR_memfd_secret 447 __SYSCALL(__NR_memfd_secret, sys_memfd_secret) #endif +#define __NR_process_mrelease 448 +__SYSCALL(__NR_process_mrelease, sys_process_mrelease) #undef __NR_syscalls -#define __NR_syscalls 448 +#define __NR_syscalls 449 /* * 32 bit systems traditionally used different diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 30971b1dd4a9..18a9c2cde767 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -289,6 +289,7 @@ COND_SYSCALL(munlockall); COND_SYSCALL(mincore); COND_SYSCALL(madvise); COND_SYSCALL(process_madvise); +COND_SYSCALL(process_mrelease); COND_SYSCALL(remap_file_pages); COND_SYSCALL(mbind); COND_SYSCALL_COMPAT(mbind); -- cgit v1.2.3 From d66e3edee7af87fe212df611ab9846b987a5070f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Sep 2021 22:47:06 +0200 Subject: futex: Remove unused variable 'vpid' in futex_proxy_trylock_atomic() The recent bug fix left the variable 'vpid' and an assignment to it around, but the variable is otherwise unused. clang dose not complain even with W=1, but gcc exposed this. Fixes: 4f07ec0d76f2 ("futex: Prevent inconsistent state and exit race") Signed-off-by: Thomas Gleixner --- kernel/futex.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index a316dce74c3d..c15ad276fd15 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2034,7 +2034,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, { struct futex_q *top_waiter = NULL; u32 curval; - int ret, vpid; + int ret; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; @@ -2079,7 +2079,6 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, * the user space lock can be acquired then PI state is attached to * the new owner (@top_waiter->task) when @set_waiters is true. */ - vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, exiting, set_waiters); if (ret == 1) { -- cgit v1.2.3 From 54357f0c9149c871e5e4b83ad385a6f2ad3a749f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 10 Aug 2021 15:26:25 +0200 Subject: tracing: Add migrate-disabled counter to tracing output. migrate_disable() forbids task migration to another CPU. It is available since v5.11 and has already users such as highmem or BPF. It is useful to observe this task state in tracing which already has other states like the preemption counter. Instead of adding the migrate disable counter as a new entry to struct trace_entry, which would extend the whole struct by four bytes, it is squashed into the preempt-disable counter. The lower four bits represent the preemption counter, the upper four bits represent the migrate disable counter. Both counter shouldn't exceed 15 but if they do, there is a safety net which caps the value at 15. Add the migrate-disable counter to the trace entry so it shows up in the trace. Due to the users mentioned above, it is already possible to observe it: | bash-1108 [000] ...21 73.950578: rss_stat: mm_id=2213312838 curr=0 type=MM_ANONPAGES size=8192B | bash-1108 [000] d..31 73.951222: irq_disable: caller=flush_tlb_mm_range+0x115/0x130 parent=ptep_clear_flush+0x42/0x50 | bash-1108 [000] d..31 73.951222: tlb_flush: pages:1 reason:local mm shootdown (3) The last value is the migrate-disable counter. Things that popped up: - trace_print_lat_context() does not print the migrate counter. Not sure if it should. It is used in "verbose" mode and uses 8 digits and I'm not sure ther is something processing the value. - trace_define_common_fields() now defines a different variable. This probably breaks things. No ide what to do in order to preserve the old behaviour. Since this is used as a filter it should be split somehow to be able to match both nibbles here. Link: https://lkml.kernel.org/r/20210810132625.ylssabmsrkygokuv@linutronix.de Signed-off-by: Thomas Gleixner [bigeasy: patch description.] Signed-off-by: Sebastian Andrzej Siewior [ SDR: Removed change to common_preempt_count field name ] Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 26 +++++++++++++++++++------- kernel/trace/trace_events.c | 1 + kernel/trace/trace_output.c | 11 ++++++++--- 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 489924cde4f8..b554e18924f8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2603,6 +2603,15 @@ enum print_line_t trace_handle_return(struct trace_seq *s) } EXPORT_SYMBOL_GPL(trace_handle_return); +static unsigned short migration_disable_value(void) +{ +#if defined(CONFIG_SMP) + return current->migration_disabled; +#else + return 0; +#endif +} + unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) { unsigned int trace_flags = irqs_status; @@ -2621,7 +2630,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; - return (trace_flags << 16) | (pc & 0xff); + return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | + (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } struct ring_buffer_event * @@ -4189,9 +4199,10 @@ static void print_lat_help_header(struct seq_file *m) "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" - "# |||| / delay \n" - "# cmd pid ||||| time | caller \n" - "# \\ / ||||| \\ | / \n"); + "# |||| / _-=> migrate-disable \n" + "# ||||| / delay \n" + "# cmd pid |||||| time | caller \n" + "# \\ / |||||| \\ | / \n"); } static void print_event_info(struct array_buffer *buf, struct seq_file *m) @@ -4229,9 +4240,10 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); - seq_printf(m, "# %.*s||| / delay\n", prec, space); - seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); - seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); + seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); + seq_printf(m, "# %.*s|||| / delay\n", prec, space); + seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); + seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); } void diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1349b6de5eeb..830b3b9940f4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -181,6 +181,7 @@ static int trace_define_common_fields(void) __common_field(unsigned short, type); __common_field(unsigned char, flags); + /* Holds both preempt_count and migrate_disable */ __common_field(unsigned char, preempt_count); __common_field(int, pid); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index a0bf446bb034..c2ca40e8595b 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -492,8 +492,13 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) trace_seq_printf(s, "%c%c%c", irqs_off, need_resched, hardsoft_irq); - if (entry->preempt_count) - trace_seq_printf(s, "%x", entry->preempt_count); + if (entry->preempt_count & 0xf) + trace_seq_printf(s, "%x", entry->preempt_count & 0xf); + else + trace_seq_putc(s, '.'); + + if (entry->preempt_count & 0xf0) + trace_seq_printf(s, "%x", entry->preempt_count >> 4); else trace_seq_putc(s, '.'); @@ -656,7 +661,7 @@ int trace_print_lat_context(struct trace_iterator *iter) trace_seq_printf( s, "%16s %7d %3d %d %08x %08lx ", comm, entry->pid, iter->cpu, entry->flags, - entry->preempt_count, iter->idx); + entry->preempt_count & 0xf, iter->idx); } else { lat_print_generic(s, entry, iter->cpu); } -- cgit v1.2.3 From f8416aa29185468e0d914ba4b2a330fd53ee263f Mon Sep 17 00:00:00 2001 From: Cai Huoqing Date: Mon, 6 Sep 2021 19:23:02 +0800 Subject: kernel: debug: Convert to SPDX identifier use SPDX-License-Identifier instead of a verbose license text Signed-off-by: Cai Huoqing Link: https://lore.kernel.org/r/20210906112302.937-1-caihuoqing@baidu.com Signed-off-by: Daniel Thompson --- kernel/debug/debug_core.c | 5 +---- kernel/debug/gdbstub.c | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index b4aa6bb6b2bd..da06a5553835 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Kernel Debug Core * @@ -22,10 +23,6 @@ * * Original KGDB stub: David Grothe , * Tigran Aivazian - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #define pr_fmt(fmt) "KGDB: " fmt diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index b6f28fad4307..9d34d2364b5a 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Kernel Debug Core * @@ -22,10 +23,6 @@ * * Original KGDB stub: David Grothe , * Tigran Aivazian - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include -- cgit v1.2.3 From 5615e088b43d564aec08ccfaecb21ba484a1b4d6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 24 Aug 2021 14:51:50 +0300 Subject: tracing: Fix some alloc_event_probe() error handling bugs There are two bugs in this code. First, if the kzalloc() fails it leads to a NULL dereference of "ep" on the next line. Second, if the alloc_event_probe() function returns an error then it leads to an error pointer dereference in the caller. Link: https://lkml.kernel.org/r/20210824115150.GI31143@kili Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events") Signed-off-by: Dan Carpenter Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_eprobe.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index 56a96e9750cf..3044b762cbd7 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -151,7 +151,7 @@ static struct trace_eprobe *alloc_event_probe(const char *group, ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL); if (!ep) { - trace_event_put_ref(ep->event); + trace_event_put_ref(event); goto error; } ep->event = event; @@ -851,7 +851,8 @@ static int __trace_eprobe_create(int argc, const char *argv[]) ret = PTR_ERR(ep); /* This must return -ENOMEM, else there is a bug */ WARN_ON_ONCE(ret != -ENOMEM); - goto error; /* We know ep is not allocated */ + ep = NULL; + goto error; } argc -= 2; argv += 2; -- cgit v1.2.3 From 3c91dda97eea704ac257ddb138d1154adab8db62 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 7 Sep 2021 19:58:18 -0700 Subject: kernel/acct.c: use dedicated helper to access rlimit values Use rlimit() helper instead of manually writing whole chain from task to rlimit value. See patch "posix-cpu-timers: Use dedicated helper to access rlimit values". Link: https://lkml.kernel.org/r/20210728030822.524789-1-yang.yang29@zte.com.cn Signed-off-by: Yang Yang Reported-by: Zeal Robot Cc: Randy Dunlap Cc: sh_def@163.com Cc: Yang Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/acct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/acct.c b/kernel/acct.c index a64102be2bb0..23a7ab8e6cbc 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -478,7 +478,7 @@ static void do_acct_process(struct bsd_acct_struct *acct) /* * Accounting records are not subject to resource limits. */ - flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + flim = rlimit(RLIMIT_FSIZE); current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; /* Perform file operations on behalf of whoever enabled accounting */ orig_cred = override_creds(file->f_cred); -- cgit v1.2.3 From 2d186afd04d669fe9c48b994c41a7405a3c9f16d Mon Sep 17 00:00:00 2001 From: Pavel Skripkin Date: Tue, 7 Sep 2021 19:58:21 -0700 Subject: profiling: fix shift-out-of-bounds bugs Syzbot reported shift-out-of-bounds bug in profile_init(). The problem was in incorrect prof_shift. Since prof_shift value comes from userspace we need to clamp this value into [0, BITS_PER_LONG -1] boundaries. Second possible shiht-out-of-bounds was found by Tetsuo: sample_step local variable in read_profile() had "unsigned int" type, but prof_shift allows to make a BITS_PER_LONG shift. So, to prevent possible shiht-out-of-bounds sample_step type was changed to "unsigned long". Also, "unsigned short int" will be sufficient for storing [0, BITS_PER_LONG] value, that's why there is no need for "unsigned long" prof_shift. Link: https://lkml.kernel.org/r/20210813140022.5011-1-paskripkin@gmail.com Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-and-tested-by: syzbot+e68c89a9510c159d9684@syzkaller.appspotmail.com Suggested-by: Tetsuo Handa Signed-off-by: Pavel Skripkin Cc: Thomas Gleixner Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/profile.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/profile.c b/kernel/profile.c index c2ebddb5e974..eb9c7f0f5ac5 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -41,7 +41,8 @@ struct profile_hit { #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) static atomic_t *prof_buffer; -static unsigned long prof_len, prof_shift; +static unsigned long prof_len; +static unsigned short int prof_shift; int prof_on __read_mostly; EXPORT_SYMBOL_GPL(prof_on); @@ -67,8 +68,8 @@ int profile_setup(char *str) if (str[strlen(sleepstr)] == ',') str += strlen(sleepstr) + 1; if (get_option(&str, &par)) - prof_shift = par; - pr_info("kernel sleep profiling enabled (shift: %ld)\n", + prof_shift = clamp(par, 0, BITS_PER_LONG - 1); + pr_info("kernel sleep profiling enabled (shift: %u)\n", prof_shift); #else pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); @@ -78,21 +79,21 @@ int profile_setup(char *str) if (str[strlen(schedstr)] == ',') str += strlen(schedstr) + 1; if (get_option(&str, &par)) - prof_shift = par; - pr_info("kernel schedule profiling enabled (shift: %ld)\n", + prof_shift = clamp(par, 0, BITS_PER_LONG - 1); + pr_info("kernel schedule profiling enabled (shift: %u)\n", prof_shift); } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { prof_on = KVM_PROFILING; if (str[strlen(kvmstr)] == ',') str += strlen(kvmstr) + 1; if (get_option(&str, &par)) - prof_shift = par; - pr_info("kernel KVM profiling enabled (shift: %ld)\n", + prof_shift = clamp(par, 0, BITS_PER_LONG - 1); + pr_info("kernel KVM profiling enabled (shift: %u)\n", prof_shift); } else if (get_option(&str, &par)) { - prof_shift = par; + prof_shift = clamp(par, 0, BITS_PER_LONG - 1); prof_on = CPU_PROFILING; - pr_info("kernel profiling enabled (shift: %ld)\n", + pr_info("kernel profiling enabled (shift: %u)\n", prof_shift); } return 1; @@ -468,7 +469,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned long p = *ppos; ssize_t read; char *pnt; - unsigned int sample_step = 1 << prof_shift; + unsigned long sample_step = 1UL << prof_shift; profile_flip_buffers(); if (p >= (prof_len+1)*sizeof(unsigned int)) -- cgit v1.2.3 From 1e1c15839df084f4011825fee922aa976c9159dc Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 7 Sep 2021 20:00:00 -0700 Subject: fs/epoll: use a per-cpu counter for user's watches count This counter tracks the number of watches a user has, to compare against the 'max_user_watches' limit. This causes a scalability bottleneck on SPECjbb2015 on large systems as there is only one user. Changing to a per-cpu counter increases throughput of the benchmark by about 30% on a 16-socket, > 1000 thread system. [rdunlap@infradead.org: fix build errors in kernel/user.c when CONFIG_EPOLL=n] [npiggin@gmail.com: move ifdefs into wrapper functions, slightly improve panic message] Link: https://lkml.kernel.org/r/1628051945.fens3r99ox.astroid@bobo.none [akpm@linux-foundation.org: tweak user_epoll_alloc(), per Guenter] Link: https://lkml.kernel.org/r/20210804191421.GA1900577@roeck-us.net Link: https://lkml.kernel.org/r/20210802032013.2751916-1-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reported-by: Anton Blanchard Cc: Alexander Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 18 ++++++++++-------- include/linux/sched/user.h | 3 ++- kernel/user.c | 25 +++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1e596e1d0bba..648ed77f4164 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -723,7 +723,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) */ call_rcu(&epi->rcu, epi_rcu_free); - atomic_long_dec(&ep->user->epoll_watches); + percpu_counter_dec(&ep->user->epoll_watches); return 0; } @@ -1439,7 +1439,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, { int error, pwake = 0; __poll_t revents; - long user_watches; struct epitem *epi; struct ep_pqueue epq; struct eventpoll *tep = NULL; @@ -1449,11 +1448,15 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, lockdep_assert_irqs_enabled(); - user_watches = atomic_long_read(&ep->user->epoll_watches); - if (unlikely(user_watches >= max_user_watches)) + if (unlikely(percpu_counter_compare(&ep->user->epoll_watches, + max_user_watches) >= 0)) return -ENOSPC; - if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) + percpu_counter_inc(&ep->user->epoll_watches); + + if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) { + percpu_counter_dec(&ep->user->epoll_watches); return -ENOMEM; + } /* Item initialization follow here ... */ INIT_LIST_HEAD(&epi->rdllink); @@ -1466,17 +1469,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, mutex_lock_nested(&tep->mtx, 1); /* Add the current item to the list of active epoll hook for this file */ if (unlikely(attach_epitem(tfile, epi) < 0)) { - kmem_cache_free(epi_cache, epi); if (tep) mutex_unlock(&tep->mtx); + kmem_cache_free(epi_cache, epi); + percpu_counter_dec(&ep->user->epoll_watches); return -ENOMEM; } if (full_check && !tep) list_file(tfile); - atomic_long_inc(&ep->user->epoll_watches); - /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 2462f7d07695..00ed419dd464 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -13,7 +14,7 @@ struct user_struct { refcount_t __count; /* reference count */ #ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ + struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */ #endif unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ diff --git a/kernel/user.c b/kernel/user.c index c82399c1618a..e2cf8c22b539 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -129,6 +129,22 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) return NULL; } +static int user_epoll_alloc(struct user_struct *up) +{ +#ifdef CONFIG_EPOLL + return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL); +#else + return 0; +#endif +} + +static void user_epoll_free(struct user_struct *up) +{ +#ifdef CONFIG_EPOLL + percpu_counter_destroy(&up->epoll_watches); +#endif +} + /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. @@ -138,6 +154,7 @@ static void free_user(struct user_struct *up, unsigned long flags) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); + user_epoll_free(up); kmem_cache_free(uid_cachep, up); } @@ -185,6 +202,10 @@ struct user_struct *alloc_uid(kuid_t uid) new->uid = uid; refcount_set(&new->__count, 1); + if (user_epoll_alloc(new)) { + kmem_cache_free(uid_cachep, new); + return NULL; + } ratelimit_state_init(&new->ratelimit, HZ, 100); ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); @@ -195,6 +216,7 @@ struct user_struct *alloc_uid(kuid_t uid) spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { + user_epoll_free(new); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); @@ -216,6 +238,9 @@ static int __init uid_cache_init(void) for(n = 0; n < UIDHASH_SZ; ++n) INIT_HLIST_HEAD(uidhash_table + n); + if (user_epoll_alloc(&root_user)) + panic("root_user epoll percpu counter alloc failed"); + /* Insert the root user immediately (init already runs as root) */ spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); -- cgit v1.2.3 From 05da8113c9ba63a8913e6c73dc06ed01cae55f68 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 7 Sep 2021 20:00:35 -0700 Subject: kernel/fork.c: unexport get_{mm,task}_exe_file Only used by core code and the tomoyo which can't be a module either. Link: https://lkml.kernel.org/r/20210820095430.445242-1-hch@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 44f4c2d83763..df7296474ecd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1187,7 +1187,6 @@ struct file *get_mm_exe_file(struct mm_struct *mm) rcu_read_unlock(); return exe_file; } -EXPORT_SYMBOL(get_mm_exe_file); /** * get_task_exe_file - acquire a reference to the task's executable file @@ -1210,7 +1209,6 @@ struct file *get_task_exe_file(struct task_struct *task) task_unlock(task); return exe_file; } -EXPORT_SYMBOL(get_task_exe_file); /** * get_task_mm - acquire a reference to the task's mm -- cgit v1.2.3 From e1fbbd073137a9d63279f6bf363151a938347640 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 7 Sep 2021 20:00:41 -0700 Subject: prctl: allow to setup brk for et_dyn executables Keno Fischer reported that when a binray loaded via ld-linux-x the prctl(PR_SET_MM_MAP) doesn't allow to setup brk value because it lays before mm:end_data. For example a test program shows | # ~/t | | start_code 401000 | end_code 401a15 | start_stack 7ffce4577dd0 | start_data 403e10 | end_data 40408c | start_brk b5b000 | sbrk(0) b5b000 and when executed via ld-linux | # /lib64/ld-linux-x86-64.so.2 ~/t | | start_code 7fc25b0a4000 | end_code 7fc25b0c4524 | start_stack 7fffcc6b2400 | start_data 7fc25b0ce4c0 | end_data 7fc25b0cff98 | start_brk 55555710c000 | sbrk(0) 55555710c000 This of course prevent criu from restoring such programs. Looking into how kernel operates with brk/start_brk inside brk() syscall I don't see any problem if we allow to setup brk/start_brk without checking for end_data. Even if someone pass some weird address here on a purpose then the worst possible result will be an unexpected unmapping of existing vma (own vma, since prctl works with the callers memory) but test for RLIMIT_DATA is still valid and a user won't be able to gain more memory in case of expanding VMAs via new values shipped with prctl call. Link: https://lkml.kernel.org/r/20210121221207.GB2174@grain Fixes: bbdc6076d2e5 ("binfmt_elf: move brk out of mmap when doing direct loader exec") Signed-off-by: Cyrill Gorcunov Reported-by: Keno Fischer Acked-by: Andrey Vagin Tested-by: Andrey Vagin Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Kirill Tkhai Cc: Eric W. Biederman Cc: Pavel Tikhomirov Cc: Alexander Mikhalitsyn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index ef1a78f5d71c..6ec50924b517 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1959,13 +1959,6 @@ static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) error = -EINVAL; - /* - * @brk should be after @end_data in traditional maps. - */ - if (prctl_map->start_brk <= prctl_map->end_data || - prctl_map->brk <= prctl_map->end_data) - goto out; - /* * Neither we should allow to override limits if they set. */ -- cgit v1.2.3 From 4b6b08f2e45edda4c067ac40833e3c1f84383c0b Mon Sep 17 00:00:00 2001 From: "Qiang.Zhang" Date: Tue, 31 Aug 2021 10:29:19 +0800 Subject: tracing/osnoise: Fix missed cpus_read_unlock() in start_per_cpu_kthreads() When start_kthread() return error, the cpus_read_unlock() need to be called. Link: https://lkml.kernel.org/r/20210831022919.27630-1-qiang.zhang@windriver.com Cc: Fixes: c8895e271f79 ("trace/osnoise: Support hotplug operations") Acked-by: Daniel Bristot de Oliveira Signed-off-by: Qiang.Zhang Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_osnoise.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 65b08b8e5bf8..ce053619f289 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1548,7 +1548,7 @@ static int start_kthread(unsigned int cpu) static int start_per_cpu_kthreads(struct trace_array *tr) { struct cpumask *current_mask = &save_cpumask; - int retval; + int retval = 0; int cpu; cpus_read_lock(); @@ -1568,13 +1568,13 @@ static int start_per_cpu_kthreads(struct trace_array *tr) retval = start_kthread(cpu); if (retval) { stop_per_cpu_kthreads(); - return retval; + break; } } cpus_read_unlock(); - return 0; + return retval; } #ifdef CONFIG_HOTPLUG_CPU -- cgit v1.2.3 From 0be083cee42ec78ba6f9148f5b12a00aa2fb8bd3 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 1 Sep 2021 16:55:13 +0300 Subject: tracing: synth events: increase max fields count Sometimes it is useful to construct larger synthetic trace events. Increase 'SYNTH_FIELDS_MAX' (maximum number of fields in a synthetic event) from 32 to 64. Link: https://lkml.kernel.org/r/20210901135513.3087062-1-dedekind1@gmail.com Signed-off-by: Artem Bityutskiy Acked-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_synth.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h index 4007fe95cf42..b29595fe3ac5 100644 --- a/kernel/trace/trace_synth.h +++ b/kernel/trace/trace_synth.h @@ -5,7 +5,7 @@ #include "trace_dynevent.h" #define SYNTH_SYSTEM "synthetic" -#define SYNTH_FIELDS_MAX 32 +#define SYNTH_FIELDS_MAX 64 #define STR_VAR_LEN_MAX MAX_FILTER_STR_VAL /* must be multiple of sizeof(u64) */ -- cgit v1.2.3 From c910db943d35d4ac4b77570ece76e0799af24233 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Thu, 2 Sep 2021 15:57:12 -0500 Subject: tracing: Dynamically allocate the per-elt hist_elt_data array Setting the hist_elt_data.field_var_str[] array unconditionally to a size of SYNTH_FIELD_MAX elements wastes space unnecessarily. The actual number of elements needed can be calculated at run-time instead. In most cases, this will save a lot of space since it's a per-elt array which isn't normally close to being full. It also allows us to increase SYNTH_FIELD_MAX without worrying about even more wastage when we do that. Link: https://lkml.kernel.org/r/d52ae0ad5e1b59af7c4f54faf3fc098461fd82b3.camel@kernel.org Signed-off-by: Tom Zanussi Tested-by: Artem Bityutskiy Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 9d91b1c06957..a6061a69aa84 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -508,7 +508,8 @@ struct track_data { struct hist_elt_data { char *comm; u64 *var_ref_vals; - char *field_var_str[SYNTH_FIELDS_MAX]; + char **field_var_str; + int n_field_var_str; }; struct snapshot_context { @@ -1401,9 +1402,11 @@ static void hist_elt_data_free(struct hist_elt_data *elt_data) { unsigned int i; - for (i = 0; i < SYNTH_FIELDS_MAX; i++) + for (i = 0; i < elt_data->n_field_var_str; i++) kfree(elt_data->field_var_str[i]); + kfree(elt_data->field_var_str); + kfree(elt_data->comm); kfree(elt_data); } @@ -1451,6 +1454,13 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) size = STR_VAR_LEN_MAX; + elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL); + if (!elt_data->field_var_str) { + hist_elt_data_free(elt_data); + return -EINVAL; + } + elt_data->n_field_var_str = n_str; + for (i = 0; i < n_str; i++) { elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); if (!elt_data->field_var_str[i]) { -- cgit v1.2.3 From cfd799837dbc48499abb05d1891b3d9992354d3a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 9 Sep 2021 04:38:03 +0900 Subject: tracing/boot: Fix to loop on only subkeys Since the commit e5efaeb8a8f5 ("bootconfig: Support mixing a value and subkeys under a key") allows to co-exist a value node and key nodes under a node, xbc_node_for_each_child() is not only returning key node but also a value node. In the boot-time tracing using xbc_node_for_each_child() to iterate the events, groups and instances, but those must be key nodes. Thus it must use xbc_node_for_each_subkey(). Link: https://lkml.kernel.org/r/163112988361.74896.2267026262061819145.stgit@devnote2 Fixes: e5efaeb8a8f5 ("bootconfig: Support mixing a value and subkeys under a key") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 1060b0446032..388e65d05978 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -522,14 +522,14 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) if (!node) return; /* per-event key starts with "event.GROUP.EVENT" */ - xbc_node_for_each_child(node, gnode) { + xbc_node_for_each_subkey(node, gnode) { data = xbc_node_get_data(gnode); if (!strcmp(data, "enable")) { enable_all = true; continue; } enable = false; - xbc_node_for_each_child(gnode, enode) { + xbc_node_for_each_subkey(gnode, enode) { data = xbc_node_get_data(enode); if (!strcmp(data, "enable")) { enable = true; @@ -625,7 +625,7 @@ trace_boot_init_instances(struct xbc_node *node) if (!node) return; - xbc_node_for_each_child(node, inode) { + xbc_node_for_each_subkey(node, inode) { p = xbc_node_get_data(inode); if (!p || *p == '\0') continue; -- cgit v1.2.3 From 4b692e861619353ce069e547a67c8d0e32d9ef3d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Sep 2021 15:18:10 -0700 Subject: kexec: move locking into do_kexec_load Patch series "compat: remove compat_alloc_user_space", v5. Going through compat_alloc_user_space() to convert indirect system call arguments tends to add complexity compared to handling the native and compat logic in the same code. This patch (of 6): The locking is the same between the native and compat version of sys_kexec_load(), so it can be done in the common implementation to reduce duplication. Link: https://lkml.kernel.org/r/20210727144859.4150043-1-arnd@kernel.org Link: https://lkml.kernel.org/r/20210727144859.4150043-2-arnd@kernel.org Signed-off-by: Arnd Bergmann Co-developed-by: Eric Biederman Co-developed-by: Christoph Hellwig Acked-by: "Eric W. Biederman" Cc: Catalin Marinas Cc: Will Deacon Cc: Thomas Bogendoerfer Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Christian Borntraeger Cc: "David S. Miller" Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Al Viro Cc: Feng Tang Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 44 ++++++++++++++++---------------------------- 1 file changed, 16 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index c82c6c06f051..9c7aef8f4bb6 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, unsigned long i; int ret; + /* + * Because we write directly to the reserved memory region when loading + * crash kernels we need a mutex here to prevent multiple crash kernels + * from attempting to load simultaneously, and to prevent a crash kernel + * from loading over the top of a in use crash kernel. + * + * KISS: always take the mutex. + */ + if (!mutex_trylock(&kexec_mutex)) + return -EBUSY; + if (flags & KEXEC_ON_CRASH) { dest_image = &kexec_crash_image; if (kexec_crash_image) @@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, if (nr_segments == 0) { /* Uninstall image */ kimage_free(xchg(dest_image, NULL)); - return 0; + ret = 0; + goto out_unlock; } if (flags & KEXEC_ON_CRASH) { /* @@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); if (ret) - return ret; + goto out_unlock; if (flags & KEXEC_PRESERVE_CONTEXT) image->preserve_context = 1; @@ -171,6 +183,8 @@ out: arch_kexec_protect_crashkres(); kimage_free(image); +out_unlock: + mutex_unlock(&kexec_mutex); return ret; } @@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) return -EINVAL; - /* Because we write directly to the reserved memory - * region when loading crash kernels we need a mutex here to - * prevent multiple crash kernels from attempting to load - * simultaneously, and to prevent a crash kernel from loading - * over the top of a in use crash kernel. - * - * KISS: always take the mutex. - */ - if (!mutex_trylock(&kexec_mutex)) - return -EBUSY; - result = do_kexec_load(entry, nr_segments, segments, flags); - mutex_unlock(&kexec_mutex); - return result; } @@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, return -EFAULT; } - /* Because we write directly to the reserved memory - * region when loading crash kernels we need a mutex here to - * prevent multiple crash kernels from attempting to load - * simultaneously, and to prevent a crash kernel from loading - * over the top of a in use crash kernel. - * - * KISS: always take the mutex. - */ - if (!mutex_trylock(&kexec_mutex)) - return -EBUSY; - result = do_kexec_load(entry, nr_segments, ksegments, flags); - mutex_unlock(&kexec_mutex); - return result; } #endif -- cgit v1.2.3 From 5d700a0fd71ded7096b97f01d276efc1a6579613 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Sep 2021 15:18:13 -0700 Subject: kexec: avoid compat_alloc_user_space kimage_alloc_init() expects a __user pointer, so compat_sys_kexec_load() uses compat_alloc_user_space() to convert the layout and put it back onto the user space caller stack. Moving the user space access into the syscall handler directly actually makes the code simpler, as the conversion for compat mode can now be done on kernel memory. Link: https://lkml.kernel.org/r/20210727144859.4150043-3-arnd@kernel.org Link: https://lore.kernel.org/lkml/YPbtsU4GX6PL7%2F42@infradead.org/ Link: https://lore.kernel.org/lkml/m1y2cbzmnw.fsf@fess.ebiederm.org/ Signed-off-by: Arnd Bergmann Co-developed-by: Eric Biederman Co-developed-by: Christoph Hellwig Acked-by: "Eric W. Biederman" Cc: Al Viro Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Feng Tang Cc: Heiko Carstens Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Michael Ellerman Cc: Paul Mackerras Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 61 ++++++++++++++++++++++++---------------------------------- 1 file changed, 25 insertions(+), 36 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index 9c7aef8f4bb6..b5e40f069768 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -19,26 +19,9 @@ #include "kexec_internal.h" -static int copy_user_segment_list(struct kimage *image, - unsigned long nr_segments, - struct kexec_segment __user *segments) -{ - int ret; - size_t segment_bytes; - - /* Read in the segments */ - image->nr_segments = nr_segments; - segment_bytes = nr_segments * sizeof(*segments); - ret = copy_from_user(image->segment, segments, segment_bytes); - if (ret) - ret = -EFAULT; - - return ret; -} - static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, - struct kexec_segment __user *segments, + struct kexec_segment *segments, unsigned long flags) { int ret; @@ -58,10 +41,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, return -ENOMEM; image->start = entry; - - ret = copy_user_segment_list(image, nr_segments, segments); - if (ret) - goto out_free_image; + image->nr_segments = nr_segments; + memcpy(image->segment, segments, nr_segments * sizeof(*segments)); if (kexec_on_panic) { /* Enable special crash kernel control page alloc policy. */ @@ -104,7 +85,7 @@ out_free_image: } static int do_kexec_load(unsigned long entry, unsigned long nr_segments, - struct kexec_segment __user *segments, unsigned long flags) + struct kexec_segment *segments, unsigned long flags) { struct kimage **dest_image, *image; unsigned long i; @@ -250,7 +231,8 @@ static inline int kexec_load_check(unsigned long nr_segments, SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, struct kexec_segment __user *, segments, unsigned long, flags) { - int result; + struct kexec_segment *ksegments; + unsigned long result; result = kexec_load_check(nr_segments, flags); if (result) @@ -261,7 +243,12 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) return -EINVAL; - result = do_kexec_load(entry, nr_segments, segments, flags); + ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0])); + if (IS_ERR(ksegments)) + return PTR_ERR(ksegments); + + result = do_kexec_load(entry, nr_segments, ksegments, flags); + kfree(ksegments); return result; } @@ -273,7 +260,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, compat_ulong_t, flags) { struct compat_kexec_segment in; - struct kexec_segment out, __user *ksegments; + struct kexec_segment *ksegments; unsigned long i, result; result = kexec_load_check(nr_segments, flags); @@ -286,24 +273,26 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) return -EINVAL; - ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); + ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]), + GFP_KERNEL); + if (!ksegments) + return -ENOMEM; + for (i = 0; i < nr_segments; i++) { result = copy_from_user(&in, &segments[i], sizeof(in)); if (result) - return -EFAULT; + goto fail; - out.buf = compat_ptr(in.buf); - out.bufsz = in.bufsz; - out.mem = in.mem; - out.memsz = in.memsz; - - result = copy_to_user(&ksegments[i], &out, sizeof(out)); - if (result) - return -EFAULT; + ksegments[i].buf = compat_ptr(in.buf); + ksegments[i].bufsz = in.bufsz; + ksegments[i].mem = in.mem; + ksegments[i].memsz = in.memsz; } result = do_kexec_load(entry, nr_segments, ksegments, flags); +fail: + kfree(ksegments); return result; } #endif -- cgit v1.2.3 From 59ab844eed9c6b01d32dcb27b57accc23771b324 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Sep 2021 15:18:25 -0700 Subject: compat: remove some compat entry points These are all handled correctly when calling the native system call entry point, so remove the special cases. Link: https://lkml.kernel.org/r/20210727144859.4150043-6-arnd@kernel.org Signed-off-by: Arnd Bergmann Reviewed-by: Christoph Hellwig Cc: Al Viro Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Eric Biederman Cc: Feng Tang Cc: Heiko Carstens Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Michael Ellerman Cc: Paul Mackerras Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/unistd32.h | 10 ++++----- arch/mips/kernel/syscalls/syscall_n32.tbl | 10 ++++----- arch/mips/kernel/syscalls/syscall_o32.tbl | 10 ++++----- arch/parisc/kernel/syscalls/syscall.tbl | 8 +++---- arch/powerpc/kernel/syscalls/syscall.tbl | 10 ++++----- arch/s390/kernel/syscalls/syscall.tbl | 10 ++++----- arch/sparc/kernel/syscalls/syscall.tbl | 10 ++++----- arch/x86/entry/syscalls/syscall_32.tbl | 4 ++-- arch/x86/entry/syscalls/syscall_64.tbl | 2 +- include/linux/compat.h | 20 ----------------- include/uapi/asm-generic/unistd.h | 10 ++++----- kernel/sys_ni.c | 5 ----- mm/mempolicy.c | 37 ------------------------------- mm/migrate.c | 13 ----------- 14 files changed, 42 insertions(+), 117 deletions(-) (limited to 'kernel') diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 4e99e4b912ef..844f6ae58662 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -649,11 +649,11 @@ __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) #define __NR_inotify_rm_watch 318 __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) #define __NR_mbind 319 -__SYSCALL(__NR_mbind, compat_sys_mbind) +__SYSCALL(__NR_mbind, sys_mbind) #define __NR_get_mempolicy 320 -__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy) +__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) #define __NR_set_mempolicy 321 -__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy) +__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) #define __NR_openat 322 __SYSCALL(__NR_openat, compat_sys_openat) #define __NR_mkdirat 323 @@ -699,7 +699,7 @@ __SYSCALL(__NR_tee, sys_tee) #define __NR_vmsplice 343 __SYSCALL(__NR_vmsplice, sys_vmsplice) #define __NR_move_pages 344 -__SYSCALL(__NR_move_pages, compat_sys_move_pages) +__SYSCALL(__NR_move_pages, sys_move_pages) #define __NR_getcpu 345 __SYSCALL(__NR_getcpu, sys_getcpu) #define __NR_epoll_pwait 346 @@ -811,7 +811,7 @@ __SYSCALL(__NR_rseq, sys_rseq) #define __NR_io_pgetevents 399 __SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents) #define __NR_migrate_pages 400 -__SYSCALL(__NR_migrate_pages, compat_sys_migrate_pages) +__SYSCALL(__NR_migrate_pages, sys_migrate_pages) #define __NR_kexec_file_load 401 __SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) /* 402 is unused */ diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 56c8d3cf42ed..70e32de2bcaa 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -239,9 +239,9 @@ 228 n32 clock_nanosleep sys_clock_nanosleep_time32 229 n32 tgkill sys_tgkill 230 n32 utimes sys_utimes_time32 -231 n32 mbind compat_sys_mbind -232 n32 get_mempolicy compat_sys_get_mempolicy -233 n32 set_mempolicy compat_sys_set_mempolicy +231 n32 mbind sys_mbind +232 n32 get_mempolicy sys_get_mempolicy +233 n32 set_mempolicy sys_set_mempolicy 234 n32 mq_open compat_sys_mq_open 235 n32 mq_unlink sys_mq_unlink 236 n32 mq_timedsend sys_mq_timedsend_time32 @@ -258,7 +258,7 @@ 247 n32 inotify_init sys_inotify_init 248 n32 inotify_add_watch sys_inotify_add_watch 249 n32 inotify_rm_watch sys_inotify_rm_watch -250 n32 migrate_pages compat_sys_migrate_pages +250 n32 migrate_pages sys_migrate_pages 251 n32 openat sys_openat 252 n32 mkdirat sys_mkdirat 253 n32 mknodat sys_mknodat @@ -279,7 +279,7 @@ 268 n32 sync_file_range sys_sync_file_range 269 n32 tee sys_tee 270 n32 vmsplice sys_vmsplice -271 n32 move_pages compat_sys_move_pages +271 n32 move_pages sys_move_pages 272 n32 set_robust_list compat_sys_set_robust_list 273 n32 get_robust_list compat_sys_get_robust_list 274 n32 kexec_load compat_sys_kexec_load diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 201237fd0f43..a61c35edaa74 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -279,9 +279,9 @@ 265 o32 clock_nanosleep sys_clock_nanosleep_time32 266 o32 tgkill sys_tgkill 267 o32 utimes sys_utimes_time32 -268 o32 mbind sys_mbind compat_sys_mbind -269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +268 o32 mbind sys_mbind +269 o32 get_mempolicy sys_get_mempolicy +270 o32 set_mempolicy sys_set_mempolicy 271 o32 mq_open sys_mq_open compat_sys_mq_open 272 o32 mq_unlink sys_mq_unlink 273 o32 mq_timedsend sys_mq_timedsend_time32 @@ -298,7 +298,7 @@ 284 o32 inotify_init sys_inotify_init 285 o32 inotify_add_watch sys_inotify_add_watch 286 o32 inotify_rm_watch sys_inotify_rm_watch -287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages +287 o32 migrate_pages sys_migrate_pages 288 o32 openat sys_openat compat_sys_openat 289 o32 mkdirat sys_mkdirat 290 o32 mknodat sys_mknodat @@ -319,7 +319,7 @@ 305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range 306 o32 tee sys_tee 307 o32 vmsplice sys_vmsplice -308 o32 move_pages sys_move_pages compat_sys_move_pages +308 o32 move_pages sys_move_pages 309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list 310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list 311 o32 kexec_load sys_kexec_load compat_sys_kexec_load diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 0bf854b70612..bf751e0732b7 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -292,9 +292,9 @@ 258 32 clock_nanosleep sys_clock_nanosleep_time32 258 64 clock_nanosleep sys_clock_nanosleep 259 common tgkill sys_tgkill -260 common mbind sys_mbind compat_sys_mbind -261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +260 common mbind sys_mbind +261 common get_mempolicy sys_get_mempolicy +262 common set_mempolicy sys_set_mempolicy # 263 was vserver 264 common add_key sys_add_key 265 common request_key sys_request_key @@ -331,7 +331,7 @@ 292 64 sync_file_range sys_sync_file_range 293 common tee sys_tee 294 common vmsplice sys_vmsplice -295 common move_pages sys_move_pages compat_sys_move_pages +295 common move_pages sys_move_pages 296 common getcpu sys_getcpu 297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 298 common statfs64 sys_statfs64 compat_sys_statfs64 diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 29b55e2e035c..7bef917cc84e 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -330,10 +330,10 @@ 256 64 sys_debug_setcontext sys_ni_syscall 256 spu sys_debug_setcontext sys_ni_syscall # 257 reserved for vserver -258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages -259 nospu mbind sys_mbind compat_sys_mbind -260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +258 nospu migrate_pages sys_migrate_pages +259 nospu mbind sys_mbind +260 nospu get_mempolicy sys_get_mempolicy +261 nospu set_mempolicy sys_set_mempolicy 262 nospu mq_open sys_mq_open compat_sys_mq_open 263 nospu mq_unlink sys_mq_unlink 264 32 mq_timedsend sys_mq_timedsend_time32 @@ -381,7 +381,7 @@ 298 common faccessat sys_faccessat 299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list 300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list -301 common move_pages sys_move_pages compat_sys_move_pages +301 common move_pages sys_move_pages 302 common getcpu sys_getcpu 303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 304 32 utimensat sys_utimensat_time32 diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index aa9d68b8ee14..df5261e5cfe1 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -274,9 +274,9 @@ 265 common statfs64 sys_statfs64 compat_sys_statfs64 266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages -268 common mbind sys_mbind compat_sys_mbind -269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +268 common mbind sys_mbind sys_mbind +269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy +270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy 271 common mq_open sys_mq_open compat_sys_mq_open 272 common mq_unlink sys_mq_unlink sys_mq_unlink 273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32 @@ -293,7 +293,7 @@ 284 common inotify_init sys_inotify_init sys_inotify_init 285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch 286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch -287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages +287 common migrate_pages sys_migrate_pages sys_migrate_pages 288 common openat sys_openat compat_sys_openat 289 common mkdirat sys_mkdirat sys_mkdirat 290 common mknodat sys_mknodat sys_mknodat @@ -317,7 +317,7 @@ 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range 308 common tee sys_tee sys_tee 309 common vmsplice sys_vmsplice sys_vmsplice -310 common move_pages sys_move_pages compat_sys_move_pages +310 common move_pages sys_move_pages sys_move_pages 311 common getcpu sys_getcpu sys_getcpu 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 313 common utimes sys_utimes sys_utimes_time32 diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 7893104718c2..c37764dc764d 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -365,12 +365,12 @@ 299 common unshare sys_unshare 300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list -302 common migrate_pages sys_migrate_pages compat_sys_migrate_pages -303 common mbind sys_mbind compat_sys_mbind -304 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -305 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +302 common migrate_pages sys_migrate_pages +303 common mbind sys_mbind +304 common get_mempolicy sys_get_mempolicy +305 common set_mempolicy sys_set_mempolicy 306 common kexec_load sys_kexec_load compat_sys_kexec_load -307 common move_pages sys_move_pages compat_sys_move_pages +307 common move_pages sys_move_pages 308 common getcpu sys_getcpu 309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 310 32 utimensat sys_utimensat_time32 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 61f18b72552b..960a021d543e 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -286,7 +286,7 @@ 272 i386 fadvise64_64 sys_ia32_fadvise64_64 273 i386 vserver 274 i386 mbind sys_mbind -275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +275 i386 get_mempolicy sys_get_mempolicy 276 i386 set_mempolicy sys_set_mempolicy 277 i386 mq_open sys_mq_open compat_sys_mq_open 278 i386 mq_unlink sys_mq_unlink @@ -328,7 +328,7 @@ 314 i386 sync_file_range sys_ia32_sync_file_range 315 i386 tee sys_tee 316 i386 vmsplice sys_vmsplice -317 i386 move_pages sys_move_pages compat_sys_move_pages +317 i386 move_pages sys_move_pages 318 i386 getcpu sys_getcpu 319 i386 epoll_pwait sys_epoll_pwait 320 i386 utimensat sys_utimensat_time32 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 807b6a1de8e8..18b5500ea8bf 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -398,7 +398,7 @@ 530 x32 set_robust_list compat_sys_set_robust_list 531 x32 get_robust_list compat_sys_get_robust_list 532 x32 vmsplice sys_vmsplice -533 x32 move_pages compat_sys_move_pages +533 x32 move_pages sys_move_pages 534 x32 preadv compat_sys_preadv64 535 x32 pwritev compat_sys_pwritev64 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo diff --git a/include/linux/compat.h b/include/linux/compat.h index 3a2ac5afee30..2d42cebd1fb8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -799,26 +799,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr /* mm/fadvise.c: No generic prototype for fadvise64_64 */ /* mm/, CONFIG_MMU only */ -asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, - compat_ulong_t mode, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, compat_ulong_t flags); -asmlinkage long compat_sys_get_mempolicy(int __user *policy, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, - compat_ulong_t addr, - compat_ulong_t flags); -asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, - compat_ulong_t maxnode); -asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, - compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, - const compat_ulong_t __user *new_nodes); -asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, - __u32 __user *pages, - const int __user *nodes, - int __user *status, - int flags); - asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 14c8fe863c6d..1c5fb86d455a 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise) #define __NR_remap_file_pages 234 __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) #define __NR_mbind 235 -__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind) +__SYSCALL(__NR_mbind, sys_mbind) #define __NR_get_mempolicy 236 -__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy) +__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) #define __NR_set_mempolicy 237 -__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy) +__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) #define __NR_migrate_pages 238 -__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages) +__SYSCALL(__NR_migrate_pages, sys_migrate_pages) #define __NR_move_pages 239 -__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages) +__SYSCALL(__NR_move_pages, sys_move_pages) #endif #define __NR_rt_tgsigqueueinfo 240 diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 64578adfe115..f43d89d92860 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -292,15 +292,10 @@ COND_SYSCALL(process_madvise); COND_SYSCALL(process_mrelease); COND_SYSCALL(remap_file_pages); COND_SYSCALL(mbind); -COND_SYSCALL_COMPAT(mbind); COND_SYSCALL(get_mempolicy); -COND_SYSCALL_COMPAT(get_mempolicy); COND_SYSCALL(set_mempolicy); -COND_SYSCALL_COMPAT(set_mempolicy); COND_SYSCALL(migrate_pages); -COND_SYSCALL_COMPAT(migrate_pages); COND_SYSCALL(move_pages); -COND_SYSCALL_COMPAT(move_pages); COND_SYSCALL(perf_event_open); COND_SYSCALL(accept4); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eb95578f5997..8d14240896a8 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1649,43 +1649,6 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); } -#ifdef CONFIG_COMPAT - -COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, - compat_ulong_t __user *, nmask, - compat_ulong_t, maxnode, - compat_ulong_t, addr, compat_ulong_t, flags) -{ - return kernel_get_mempolicy(policy, (unsigned long __user *)nmask, - maxnode, addr, flags); -} - -COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, - compat_ulong_t, maxnode) -{ - return kernel_set_mempolicy(mode, (unsigned long __user *)nmask, maxnode); -} - -COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, - compat_ulong_t, mode, compat_ulong_t __user *, nmask, - compat_ulong_t, maxnode, compat_ulong_t, flags) -{ - return kernel_mbind(start, len, mode, (unsigned long __user *)nmask, - maxnode, flags); -} - -COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, - compat_ulong_t, maxnode, - const compat_ulong_t __user *, old_nodes, - const compat_ulong_t __user *, new_nodes) -{ - return kernel_migrate_pages(pid, maxnode, - (const unsigned long __user *)old_nodes, - (const unsigned long __user *)new_nodes); -} - -#endif /* CONFIG_COMPAT */ - bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) diff --git a/mm/migrate.c b/mm/migrate.c index 2bc494875cea..a6a7743ee98f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2047,19 +2047,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); } -#ifdef CONFIG_COMPAT -COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, - compat_uptr_t __user *, pages, - const int __user *, nodes, - int __user *, status, - int, flags) -{ - return kernel_move_pages(pid, nr_pages, - (const void __user *__user *)pages, - nodes, status, flags); -} -#endif /* CONFIG_COMPAT */ - #ifdef CONFIG_NUMA_BALANCING /* * Returns true if this is a safe migration target node for misplaced NUMA -- cgit v1.2.3 From a7a08b275a8bbade798c4bdaad07ade68fe7003c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Sep 2021 15:18:29 -0700 Subject: arch: remove compat_alloc_user_space All users of compat_alloc_user_space() and copy_in_user() have been removed from the kernel, only a few functions in sparc remain that can be changed to calling arch_copy_in_user() instead. Link: https://lkml.kernel.org/r/20210727144859.4150043-7-arnd@kernel.org Signed-off-by: Arnd Bergmann Reviewed-by: Christoph Hellwig Cc: Al Viro Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Eric Biederman Cc: Feng Tang Cc: Heiko Carstens Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Michael Ellerman Cc: Paul Mackerras Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/compat.h | 5 --- arch/arm64/include/asm/uaccess.h | 11 ----- arch/arm64/lib/Makefile | 2 +- arch/arm64/lib/copy_in_user.S | 77 --------------------------------- arch/mips/cavium-octeon/octeon-memcpy.S | 2 - arch/mips/include/asm/compat.h | 8 ---- arch/mips/include/asm/uaccess.h | 26 ----------- arch/mips/lib/memcpy.S | 11 ----- arch/parisc/include/asm/compat.h | 6 --- arch/parisc/include/asm/uaccess.h | 2 - arch/parisc/lib/memcpy.c | 9 ---- arch/powerpc/include/asm/compat.h | 16 ------- arch/s390/include/asm/compat.h | 10 ----- arch/s390/include/asm/uaccess.h | 3 -- arch/s390/lib/uaccess.c | 63 --------------------------- arch/sparc/include/asm/compat.h | 19 -------- arch/sparc/kernel/process_64.c | 2 +- arch/sparc/kernel/signal32.c | 12 ++--- arch/sparc/kernel/signal_64.c | 8 ++-- arch/x86/include/asm/compat.h | 13 ------ arch/x86/include/asm/uaccess_64.h | 7 --- include/linux/compat.h | 2 - include/linux/uaccess.h | 10 ----- kernel/compat.c | 21 --------- 24 files changed, 12 insertions(+), 333 deletions(-) delete mode 100644 arch/arm64/lib/copy_in_user.S (limited to 'kernel') diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 79c1a750e357..eaa6ca062d89 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -107,11 +107,6 @@ struct compat_statfs { #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) #define COMPAT_MINSIGSTKSZ 2048 -static inline void __user *arch_compat_alloc_user_space(long len) -{ - return (void __user *)compat_user_stack_pointer() - len; -} - struct compat_ipc64_perm { compat_key_t key; __compat_uid32_t uid; diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index b5f08621fa29..190b494e22ab 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -430,17 +430,6 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi __actu_ret; \ }) -extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); -#define raw_copy_in_user(to, from, n) \ -({ \ - unsigned long __aciu_ret; \ - uaccess_ttbr0_enable(); \ - __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ - __uaccess_mask_ptr(from), (n)); \ - uaccess_ttbr0_disable(); \ - __aciu_ret; \ -}) - #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 6dd56a49790a..0941180a86d3 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 lib-y := clear_user.o delay.o copy_from_user.o \ - copy_to_user.o copy_in_user.o copy_page.o \ + copy_to_user.o copy_page.o \ clear_page.o csum.o insn.o memchr.o memcpy.o \ memset.o memcmp.o strcmp.o strncmp.o strlen.o \ strnlen.o strchr.o strrchr.o tishift.o diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S deleted file mode 100644 index dbea3799c3ef..000000000000 --- a/arch/arm64/lib/copy_in_user.S +++ /dev/null @@ -1,77 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copy from user space to user space - * - * Copyright (C) 2012 ARM Ltd. - */ - -#include - -#include -#include -#include - -/* - * Copy from user space to user space (alignment handled by the hardware) - * - * Parameters: - * x0 - to - * x1 - from - * x2 - n - * Returns: - * x0 - bytes not copied - */ - .macro ldrb1 reg, ptr, val - user_ldst 9998f, ldtrb, \reg, \ptr, \val - .endm - - .macro strb1 reg, ptr, val - user_ldst 9998f, sttrb, \reg, \ptr, \val - .endm - - .macro ldrh1 reg, ptr, val - user_ldst 9997f, ldtrh, \reg, \ptr, \val - .endm - - .macro strh1 reg, ptr, val - user_ldst 9997f, sttrh, \reg, \ptr, \val - .endm - - .macro ldr1 reg, ptr, val - user_ldst 9997f, ldtr, \reg, \ptr, \val - .endm - - .macro str1 reg, ptr, val - user_ldst 9997f, sttr, \reg, \ptr, \val - .endm - - .macro ldp1 reg1, reg2, ptr, val - user_ldp 9997f, \reg1, \reg2, \ptr, \val - .endm - - .macro stp1 reg1, reg2, ptr, val - user_stp 9997f, \reg1, \reg2, \ptr, \val - .endm - -end .req x5 -srcin .req x15 -SYM_FUNC_START(__arch_copy_in_user) - add end, x0, x2 - mov srcin, x1 -#include "copy_template.S" - mov x0, #0 - ret -SYM_FUNC_END(__arch_copy_in_user) -EXPORT_SYMBOL(__arch_copy_in_user) - - .section .fixup,"ax" - .align 2 -9997: cmp dst, dstin - b.ne 9998f - // Before being absolutely sure we couldn't copy anything, try harder -USER(9998f, ldtrb tmp1w, [srcin]) -USER(9998f, sttrb tmp1w, [dst]) - add dst, dst, #1 -9998: sub x0, end, dst // bytes not copied - ret - .previous diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 600d018cf354..0a515cde1c18 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -154,8 +154,6 @@ FEXPORT(__raw_copy_from_user) EXPORT_SYMBOL(__raw_copy_from_user) FEXPORT(__raw_copy_to_user) EXPORT_SYMBOL(__raw_copy_to_user) -FEXPORT(__raw_copy_in_user) -EXPORT_SYMBOL(__raw_copy_in_user) /* * Note: dst & src may be unaligned, len may be 0 * Temps diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 53f015a1b0a7..bbb3bc5a42fd 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -96,14 +96,6 @@ struct compat_statfs { #define COMPAT_OFF_T_MAX 0x7fffffff -static inline void __user *arch_compat_alloc_user_space(long len) -{ - struct pt_regs *regs = (struct pt_regs *) - ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; - - return (void __user *) (regs->regs[29] - len); -} - struct compat_ipc64_perm { compat_key_t key; __compat_uid32_t uid; diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 783fecce65c8..f8f74f9f5883 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -428,7 +428,6 @@ do { \ extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n); extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n); -extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n); static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) @@ -480,31 +479,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER -static inline unsigned long -raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - register void __user *__cu_to_r __asm__("$4"); - register const void __user *__cu_from_r __asm__("$5"); - register long __cu_len_r __asm__("$6"); - - __cu_to_r = to; - __cu_from_r = from; - __cu_len_r = n; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - __MODULE_JAL(__raw_copy_in_user) - ".set\tnoat\n\t" - __UA_ADDU "\t$1, %1, %2\n\t" - ".set\tat\n\t" - ".set\treorder" - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) - : - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", - DADDI_SCRATCH, "memory"); - return __cu_len_r; -} - extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); /* diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index e19fb98b5d38..277c32296636 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -666,8 +666,6 @@ FEXPORT(__raw_copy_from_user) EXPORT_SYMBOL(__raw_copy_from_user) FEXPORT(__raw_copy_to_user) EXPORT_SYMBOL(__raw_copy_to_user) -FEXPORT(__raw_copy_in_user) -EXPORT_SYMBOL(__raw_copy_in_user) #endif /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP @@ -703,13 +701,4 @@ EXPORT_SYMBOL(__raw_copy_to_user) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP END(__raw_copy_to_user) -/* - * __copy_in_user (EVA) - */ - -LEAF(__raw_copy_in_user) -EXPORT_SYMBOL(__raw_copy_in_user) -__BUILD_COPY_USER EVA_MODE USEROP USEROP -END(__raw_copy_in_user) - #endif diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index b5d90e82b65d..c04f5a637c39 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -163,12 +163,6 @@ struct compat_shmid64_ds { #define COMPAT_ELF_NGREG 80 typedef compat_ulong_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; -static __inline__ void __user *arch_compat_alloc_user_space(long len) -{ - struct pt_regs *regs = ¤t->thread.regs; - return (void __user *)regs->gr[30]; -} - static inline int __is_compat_task(struct task_struct *t) { return test_tsk_thread_flag(t, TIF_32BIT); diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ed2cd4fb479b..7c13314aae4a 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -215,8 +215,6 @@ unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, unsigned long len); unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, unsigned long len); -unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, - unsigned long len); #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 4b75388190b4..ea70a0e08321 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -38,14 +38,6 @@ unsigned long raw_copy_from_user(void *dst, const void __user *src, } EXPORT_SYMBOL(raw_copy_from_user); -unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len) -{ - mtsp(get_user_space(), 1); - mtsp(get_user_space(), 2); - return pa_memcpy((void __force *)dst, (void __force *)src, len); -} - - void * memcpy(void * dst,const void *src, size_t count) { mtsp(get_kernel_space(), 1); @@ -54,7 +46,6 @@ void * memcpy(void * dst,const void *src, size_t count) return dst; } -EXPORT_SYMBOL(raw_copy_in_user); EXPORT_SYMBOL(memcpy); bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index e33dcf134cdd..7afc96fb6524 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -83,22 +83,6 @@ struct compat_statfs { #define COMPAT_OFF_T_MAX 0x7fffffff -static inline void __user *arch_compat_alloc_user_space(long len) -{ - struct pt_regs *regs = current->thread.regs; - unsigned long usp = regs->gpr[1]; - - /* - * We can't access below the stack pointer in the 32bit ABI and - * can access 288 bytes in the 64bit big-endian ABI, - * or 512 bytes with the new ELFv2 little-endian ABI. - */ - if (!is_32bit_task()) - usp -= USER_REDZONE_SIZE; - - return (void __user *) (usp - len); -} - /* * ipc64_perm is actually 32/64bit clean but since the compat layer refers to * it we may as well define it. diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 8d49505b4a43..cdc7ae72529d 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -176,16 +176,6 @@ static inline int is_compat_task(void) return test_thread_flag(TIF_31BIT); } -static inline void __user *arch_compat_alloc_user_space(long len) -{ - unsigned long stack; - - stack = KSTK_ESP(current); - if (is_compat_task()) - stack &= 0x7fffffffUL; - return (void __user *) (stack - len); -} - #endif struct compat_ipc64_perm { diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 9ed9aa37e836..ce550d06abc3 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -227,9 +227,6 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s __get_user(x, ptr); \ }) -unsigned long __must_check -raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); - /* * Copy a null terminated string from userspace. */ diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 94ca99bde59d..a596e69d3c47 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -204,69 +204,6 @@ unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long } EXPORT_SYMBOL(raw_copy_to_user); -static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, - unsigned long size) -{ - unsigned long tmp1, tmp2; - - tmp1 = -4096UL; - /* FIXME: copy with reduced length. */ - asm volatile( - " lgr 0,%[spec]\n" - "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" - " jz 2f\n" - "1: algr %0,%3\n" - " slgr %1,%3\n" - " slgr %2,%3\n" - " j 0b\n" - "2:slgr %0,%0\n" - "3: \n" - EX_TABLE(0b,3b) - : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) - : [spec] "d" (0x810081UL) - : "cc", "memory", "0"); - return size; -} - -static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, - unsigned long size) -{ - unsigned long tmp1; - - asm volatile( - " sacf 256\n" - " aghi %0,-1\n" - " jo 5f\n" - " bras %3,3f\n" - "0: aghi %0,257\n" - "1: mvc 0(1,%1),0(%2)\n" - " la %1,1(%1)\n" - " la %2,1(%2)\n" - " aghi %0,-1\n" - " jnz 1b\n" - " j 5f\n" - "2: mvc 0(256,%1),0(%2)\n" - " la %1,256(%1)\n" - " la %2,256(%2)\n" - "3: aghi %0,-256\n" - " jnm 2b\n" - "4: ex %0,1b-0b(%3)\n" - "5: slgr %0,%0\n" - "6: sacf 768\n" - EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) - : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) - : : "cc", "memory"); - return size; -} - -unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - if (copy_with_mvcos()) - return copy_in_user_mvcos(to, from, n); - return copy_in_user_mvc(to, from, n); -} -EXPORT_SYMBOL(raw_copy_in_user); - static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) { unsigned long tmp1, tmp2; diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 8b63410e830f..bd949fcf9d63 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -116,25 +116,6 @@ struct compat_statfs { #define COMPAT_OFF_T_MAX 0x7fffffff -#ifdef CONFIG_COMPAT -static inline void __user *arch_compat_alloc_user_space(long len) -{ - struct pt_regs *regs = current_thread_info()->kregs; - unsigned long usp = regs->u_regs[UREG_I6]; - - if (test_thread_64bit_stack(usp)) - usp += STACK_BIAS; - - if (test_thread_flag(TIF_32BIT)) - usp &= 0xffffffffUL; - - usp -= len; - usp &= ~0x7UL; - - return (void __user *) usp; -} -#endif - struct compat_ipc64_perm { compat_key_t key; __compat_uid32_t uid; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 093849bfda50..d1cc410d2f64 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -455,7 +455,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) distance = fp - psp; rval = (csp - distance); - if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) + if (raw_copy_in_user((void __user *)rval, (void __user *)psp, distance)) rval = 0; else if (!stack_64bit) { if (put_user(((u32)csp), diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 4276b9e003ca..6cc124a3bb98 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -435,9 +435,9 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs, (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); if (!wsaved) { - err |= copy_in_user((u32 __user *)sf, - (u32 __user *)(regs->u_regs[UREG_FP]), - sizeof(struct reg_window32)); + err |= raw_copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); } else { struct reg_window *rp; @@ -567,9 +567,9 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs, err |= put_compat_sigset(&sf->mask, oldset, sizeof(compat_sigset_t)); if (!wsaved) { - err |= copy_in_user((u32 __user *)sf, - (u32 __user *)(regs->u_regs[UREG_FP]), - sizeof(struct reg_window32)); + err |= raw_copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); } else { struct reg_window *rp; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index cea23cf95600..2a78d2af1265 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -406,10 +406,10 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t)); if (!wsaved) { - err |= copy_in_user((u64 __user *)sf, - (u64 __user *)(regs->u_regs[UREG_FP] + - STACK_BIAS), - sizeof(struct reg_window)); + err |= raw_copy_in_user((u64 __user *)sf, + (u64 __user *)(regs->u_regs[UREG_FP] + + STACK_BIAS), + sizeof(struct reg_window)); } else { struct reg_window *rp; diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 4ae01cdb99de..7516e4199b3c 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -156,19 +156,6 @@ struct compat_shmid64_ds { (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)) #endif -static inline void __user *arch_compat_alloc_user_space(long len) -{ - compat_uptr_t sp = task_pt_regs(current)->sp; - - /* - * -128 for the x32 ABI redzone. For IA32, it is not strictly - * necessary, but not harmful. - */ - sp -= 128; - - return (void __user *)round_down(sp - len, 16); -} - static inline bool in_x32_syscall(void) { #ifdef CONFIG_X86_X32_ABI diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index e7265a552f4f..45697e04d771 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -58,13 +58,6 @@ raw_copy_to_user(void __user *dst, const void *src, unsigned long size) return copy_user_generic((__force void *)dst, src, size); } -static __always_inline __must_check -unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size) -{ - return copy_user_generic((__force void *)dst, - (__force void *)src, size); -} - extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); diff --git a/include/linux/compat.h b/include/linux/compat.h index 2d42cebd1fb8..1c758b0e0359 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -511,8 +511,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, struct epoll_event; /* fortunately, this one is fixed-layout */ -extern void __user *compat_alloc_user_space(unsigned long len); - int compat_restore_altstack(const compat_stack_t __user *uss); int __compat_save_altstack(compat_stack_t __user *, unsigned long); #define unsafe_compat_save_altstack(uss, sp, label) do { \ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index c05e903cef02..ac0394087f7d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -200,16 +200,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) n = _copy_to_user(to, from, n); return n; } -#ifdef CONFIG_COMPAT -static __always_inline unsigned long __must_check -copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - might_fault(); - if (access_ok(to, n) && access_ok(from, n)) - n = raw_copy_in_user(to, from, n); - return n; -} -#endif #ifndef copy_mc_to_kernel /* diff --git a/kernel/compat.c b/kernel/compat.c index 05adfd6fa8bf..55551989d9da 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -269,24 +269,3 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat) return 0; } EXPORT_SYMBOL_GPL(get_compat_sigset); - -/* - * Allocate user-space memory for the duration of a single system call, - * in order to marshall parameters inside a compat thunk. - */ -void __user *compat_alloc_user_space(unsigned long len) -{ - void __user *ptr; - - /* If len would occupy more than half of the entire compat space... */ - if (unlikely(len > (((compat_uptr_t)~0) >> 1))) - return NULL; - - ptr = arch_compat_alloc_user_space(len); - - if (unlikely(!access_ok(ptr, len))) - return NULL; - - return ptr; -} -EXPORT_SYMBOL_GPL(compat_alloc_user_space); -- cgit v1.2.3 From 13db8c50477d83ad3e3b9b0ae247e5cd833a7ae4 Mon Sep 17 00:00:00 2001 From: Liu Zixian Date: Wed, 8 Sep 2021 18:10:05 -0700 Subject: mm/hugetlb: initialize hugetlb_usage in mm_init After fork, the child process will get incorrect (2x) hugetlb_usage. If a process uses 5 2MB hugetlb pages in an anonymous mapping, HugetlbPages: 10240 kB and then forks, the child will show, HugetlbPages: 20480 kB The reason for double the amount is because hugetlb_usage will be copied from the parent and then increased when we copy page tables from parent to child. Child will have 2x actual usage. Fix this by adding hugetlb_count_init in mm_init. Link: https://lkml.kernel.org/r/20210826071742.877-1-liuzixian4@huawei.com Fixes: 5d317b2b6536 ("mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status") Signed-off-by: Liu Zixian Reviewed-by: Naoya Horiguchi Reviewed-by: Mike Kravetz Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 9 +++++++++ kernel/fork.c | 1 + 2 files changed, 10 insertions(+) (limited to 'kernel') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f7ca1a3870ea..1faebe1cd0ed 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -858,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); +static inline void hugetlb_count_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->hugetlb_usage, 0); +} + static inline void hugetlb_count_add(long l, struct mm_struct *mm) { atomic_long_add(l, &mm->hugetlb_usage); @@ -1042,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, return &mm->page_table_lock; } +static inline void hugetlb_count_init(struct mm_struct *mm) +{ +} + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) { } diff --git a/kernel/fork.c b/kernel/fork.c index ff5be23800af..38681ad44c76 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1063,6 +1063,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->pmd_huge_pte = NULL; #endif mm_init_uprobes_state(mm); + hugetlb_count_init(mm); if (current->mm) { mm->flags = current->mm->flags & MMF_INIT_MASK; -- cgit v1.2.3 From e5480572706da1b2c2dc2c6484eab64f92b9263b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 Sep 2021 11:44:11 +0200 Subject: locking/rtmutex: Fix ww_mutex deadlock check Dan reported that rt_mutex_adjust_prio_chain() can be called with .orig_waiter == NULL however commit a055fcc132d4 ("locking/rtmutex: Return success on deadlock for ww_mutex waiters") unconditionally dereferences it. Since both call-sites that have .orig_waiter == NULL don't care for the return value, simply disable the deadlock squash by adding the NULL check. Notably, both callers use the deadlock condition as a termination condition for the iteration; once detected, it is sure that (de)boosting is done. Arguably step [3] would be a more natural termination point, but it's dubious whether adding a third deadlock detection state would improve the code. Fixes: a055fcc132d4 ("locking/rtmutex: Return success on deadlock for ww_mutex waiters") Reported-by: Dan Carpenter Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Acked-by: Sebastian Andrzej Siewior Link: https://lore.kernel.org/r/YS9La56fHMiCCo75@hirez.programming.kicks-ass.net --- kernel/locking/rtmutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8eabdc79602b..6bb116c559b4 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -753,7 +753,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * other configuration and we fail to report; also, see * lockdep. */ - if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter->ww_ctx) + if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) ret = 0; raw_spin_unlock(&lock->wait_lock); -- cgit v1.2.3 From 9848417926353daa59d2b05eb26e185063dbac6e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 6 Sep 2021 13:30:34 +0200 Subject: sched/idle: Make the idle timer expire in hard interrupt context The intel powerclamp driver will setup a per-CPU worker with RT priority. The worker will then invoke play_idle() in which it remains in the idle poll loop until it is stopped by the timer it started earlier. That timer needs to expire in hard interrupt context on PREEMPT_RT. Otherwise the timer will expire in ksoftirqd as a SOFT timer but that task won't be scheduled on the CPU because its priority is lower than the priority of the worker which is in the idle loop. Always expire the idle timer in hard interrupt context. Reported-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210906113034.jgfxrjdvxnjqgtmc@linutronix.de --- kernel/sched/idle.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 912b47aa99d8..d17b0a5ce6ac 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -379,10 +379,10 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns) cpuidle_use_deepest_state(latency_ns); it.done = 0; - hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); it.timer.function = idle_inject_timer_fn; hrtimer_start(&it.timer, ns_to_ktime(duration_ns), - HRTIMER_MODE_REL_PINNED); + HRTIMER_MODE_REL_PINNED_HARD); while (!READ_ONCE(it.done)) do_idle(); -- cgit v1.2.3 From 868ad33bfa3bf39960982682ad3a0f8ebda1656e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 28 Aug 2021 15:55:52 +0200 Subject: sched: Prevent balance_push() on remote runqueues sched_setscheduler() and rt_mutex_setprio() invoke the run-queue balance callback after changing priorities or the scheduling class of a task. The run-queue for which the callback is invoked can be local or remote. That's not a problem for the regular rq::push_work which is serialized with a busy flag in the run-queue struct, but for the balance_push() work which is only valid to be invoked on the outgoing CPU that's wrong. It not only triggers the debug warning, but also leaves the per CPU variable push_work unprotected, which can result in double enqueues on the stop machine list. Remove the warning and validate that the function is invoked on the outgoing CPU. Fixes: ae7927023243 ("sched: Optimize finish_lock_switch()") Reported-by: Sebastian Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/87zgt1hdw7.ffs@tglx --- kernel/sched/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f3b27c6c5153..b21a1857b75a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8523,7 +8523,6 @@ static void balance_push(struct rq *rq) struct task_struct *push_task = rq->curr; lockdep_assert_rq_held(rq); - SCHED_WARN_ON(rq->cpu != smp_processor_id()); /* * Ensure the thing is persistent until balance_push_set(.on = false); @@ -8531,9 +8530,10 @@ static void balance_push(struct rq *rq) rq->balance_callback = &balance_push_callback; /* - * Only active while going offline. + * Only active while going offline and when invoked on the outgoing + * CPU. */ - if (!cpu_dying(rq->cpu)) + if (!cpu_dying(rq->cpu) || rq != this_rq()) return; /* -- cgit v1.2.3 From a3928f877e7b153dbc243d6329b3777ac75b6004 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 9 Sep 2021 22:36:23 +0900 Subject: tracing/boot: Fix trace_boot_hist_add_array() to check array is value trace_boot_hist_add_array() uses the combination of xbc_node_find_child() and xbc_node_get_child() to get the child node of the key node. But since it missed to check the child node is data node or not, user can pass the subkey node for the array node (anode). To avoid this issue, check the array node is a data node. Actually, there is xbc_node_find_value(node, key, vnode), which ensures the @vnode is a value node, so use it in trace_boot_hist_add_array() to fix this issue. Link: https://lkml.kernel.org/r/163119458308.161018.1516455973625940212.stgit@devnote2 Fixes: e66ed86ca6c5 ("tracing/boot: Add per-event histogram action options") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 388e65d05978..a6be48b24774 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -219,13 +219,12 @@ static int __init trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp, char *end, const char *key) { - struct xbc_node *knode, *anode; + struct xbc_node *anode; const char *p; char sep; - knode = xbc_node_find_child(hnode, key); - if (knode) { - anode = xbc_node_get_child(knode); + p = xbc_node_find_value(hnode, key, &anode); + if (p) { if (!anode) { pr_err("hist.%s requires value(s).\n", key); return -EINVAL; -- cgit v1.2.3 From 5f8895b27da2656e5e2be029acfb68c016f03326 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 9 Sep 2021 22:36:30 +0900 Subject: tracing/boot: Fix to check the histogram control param is a leaf node Since xbc_node_find_child() doesn't ensure the returned node is a leaf node (key-value pair or do not have subkeys), use xbc_node_find_value to ensure the histogram control parameter is a leaf node in trace_boot_compose_hist_cmd(). Link: https://lkml.kernel.org/r/163119459059.161018.18341288218424528962.stgit@devnote2 Fixes: e66ed86ca6c5 ("tracing/boot: Add per-event histogram action options") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_boot.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index a6be48b24774..db6ee372dc6d 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -385,11 +385,11 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) } /* Histogram control attributes (mutual exclusive) */ - if (xbc_node_find_child(hnode, "pause")) + if (xbc_node_find_value(hnode, "pause", NULL)) append_printf(&buf, end, ":pause"); - else if (xbc_node_find_child(hnode, "continue")) + else if (xbc_node_find_value(hnode, "continue", NULL)) append_printf(&buf, end, ":continue"); - else if (xbc_node_find_child(hnode, "clear")) + else if (xbc_node_find_value(hnode, "clear", NULL)) append_printf(&buf, end, ":clear"); /* Histogram handler and actions */ -- cgit v1.2.3 From 5dfe50b05588010f347cb2f436434bf22b7a84ed Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 9 Sep 2021 22:36:38 +0900 Subject: bootconfig: Rename xbc_node_find_child() to xbc_node_find_subkey() Rename xbc_node_find_child() to xbc_node_find_subkey() for clarifying that function returns a key node (no value node). Since there are xbc_node_for_each_child() (loop on all child nodes) and xbc_node_for_each_subkey() (loop on only subkey nodes), this name distinction is necessary to avoid confusing users. Link: https://lkml.kernel.org/r/163119459826.161018.11200274779483115300.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/bootconfig.h | 4 ++-- kernel/trace/trace_boot.c | 24 ++++++++++++------------ lib/bootconfig.c | 8 ++++---- 3 files changed, 18 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index abe089c27529..537e1b991f11 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -110,7 +110,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node) } /* Tree-based key-value access APIs */ -struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent, +struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent, const char *key); const char * __init xbc_node_find_value(struct xbc_node *parent, @@ -148,7 +148,7 @@ xbc_find_value(const char *key, struct xbc_node **vnode) */ static inline struct xbc_node * __init xbc_find_node(const char *key) { - return xbc_node_find_child(NULL, key); + return xbc_node_find_subkey(NULL, key); } /** diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index db6ee372dc6d..8d252f63cd78 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -262,9 +262,9 @@ trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, append_printf(bufp, end, ":%s(%s)", handler, p); /* Compose 'action' parameter */ - knode = xbc_node_find_child(hnode, "trace"); + knode = xbc_node_find_subkey(hnode, "trace"); if (!knode) - knode = xbc_node_find_child(hnode, "save"); + knode = xbc_node_find_subkey(hnode, "save"); if (knode) { anode = xbc_node_get_child(knode); @@ -283,7 +283,7 @@ trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, sep = ','; } append_printf(bufp, end, ")"); - } else if (xbc_node_find_child(hnode, "snapshot")) { + } else if (xbc_node_find_subkey(hnode, "snapshot")) { append_printf(bufp, end, ".snapshot()"); } else { pr_err("hist.%s requires an action.\n", @@ -314,7 +314,7 @@ trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp, break; } - if (xbc_node_find_child(hnode, param)) + if (xbc_node_find_subkey(hnode, param)) ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param); return ret; @@ -374,7 +374,7 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) if (p) append_printf(&buf, end, ":name=%s", p); - node = xbc_node_find_child(hnode, "var"); + node = xbc_node_find_subkey(hnode, "var"); if (node) { xbc_node_for_each_key_value(node, knode, p) { /* Expression must not include spaces. */ @@ -393,13 +393,13 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) append_printf(&buf, end, ":clear"); /* Histogram handler and actions */ - node = xbc_node_find_child(hnode, "onmax"); + node = xbc_node_find_subkey(hnode, "onmax"); if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) return -EINVAL; - node = xbc_node_find_child(hnode, "onchange"); + node = xbc_node_find_subkey(hnode, "onchange"); if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) return -EINVAL; - node = xbc_node_find_child(hnode, "onmatch"); + node = xbc_node_find_subkey(hnode, "onmatch"); if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0) return -EINVAL; @@ -436,7 +436,7 @@ trace_boot_init_histograms(struct trace_event_file *file, } } - if (xbc_node_find_child(hnode, "keys")) { + if (xbc_node_find_subkey(hnode, "keys")) { if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) { tmp = kstrdup(buf, GFP_KERNEL); if (trigger_process_regex(file, buf) < 0) @@ -495,7 +495,7 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, else if (trigger_process_regex(file, buf) < 0) pr_err("Failed to apply an action: %s\n", p); } - anode = xbc_node_find_child(enode, "hist"); + anode = xbc_node_find_subkey(enode, "hist"); if (anode) trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf)); } else if (xbc_node_find_value(enode, "actions", NULL)) @@ -517,7 +517,7 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) bool enable, enable_all = false; const char *data; - node = xbc_node_find_child(node, "event"); + node = xbc_node_find_subkey(node, "event"); if (!node) return; /* per-event key starts with "event.GROUP.EVENT" */ @@ -620,7 +620,7 @@ trace_boot_init_instances(struct xbc_node *node) struct trace_array *tr; const char *p; - node = xbc_node_find_child(node, "instance"); + node = xbc_node_find_subkey(node, "instance"); if (!node) return; diff --git a/lib/bootconfig.c b/lib/bootconfig.c index 927017431fb6..f8419cff1147 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -142,16 +142,16 @@ xbc_node_match_prefix(struct xbc_node *node, const char **prefix) } /** - * xbc_node_find_child() - Find a child node which matches given key + * xbc_node_find_subkey() - Find a subkey node which matches given key * @parent: An XBC node. * @key: A key string. * - * Search a node under @parent which matches @key. The @key can contain + * Search a key node under @parent which matches @key. The @key can contain * several words jointed with '.'. If @parent is NULL, this searches the * node from whole tree. Return NULL if no node is matched. */ struct xbc_node * __init -xbc_node_find_child(struct xbc_node *parent, const char *key) +xbc_node_find_subkey(struct xbc_node *parent, const char *key) { struct xbc_node *node; @@ -191,7 +191,7 @@ const char * __init xbc_node_find_value(struct xbc_node *parent, const char *key, struct xbc_node **vnode) { - struct xbc_node *node = xbc_node_find_child(parent, key); + struct xbc_node *node = xbc_node_find_subkey(parent, key); if (!node || !xbc_node_is_key(node)) return NULL; -- cgit v1.2.3