diff options
-rw-r--r-- | mm/bootmem.c | 2 | ||||
-rw-r--r-- | mm/maccess.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/process_vm_access.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 4 | ||||
-rw-r--r-- | mm/z3fold.c | 4 | ||||
-rw-r--r-- | mm/zbud.c | 4 | ||||
-rw-r--r-- | mm/zpool.c | 20 |
8 files changed, 20 insertions, 20 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 6aef64254203..9e197987b67d 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -410,7 +410,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, /** * free_bootmem - mark a page range as usable - * @addr: starting physical address of the range + * @physaddr: starting physical address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. diff --git a/mm/maccess.c b/mm/maccess.c index 78f9274dd49d..ec00be51a24f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_write); * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. - * @src: Unsafe address. + * @unsafe_addr: Unsafe address. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from unsafe address to kernel buffer. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3793e22977c0..13b35ffa021e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -917,7 +917,7 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, /** * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page * @page: the page - * @zone: zone of the page + * @pgdat: pgdat of the page * * This function is only safe when following the LRU page isolation * and putback protocol: the LRU lock must be held, and the page must diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 16424b9ae424..f24c297dba6f 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -25,7 +25,7 @@ /** * process_vm_rw_pages - read/write pages from task specified * @pages: array of pointers to pages we want to copy - * @start_offset: offset in page to start copying from/to + * @offset: offset in page to start copying from/to * @len: number of bytes to copy * @iter: where to copy to/from locally * @vm_write: 0 means copy from, 1 means copy to diff --git a/mm/swap.c b/mm/swap.c index 10568b1548d4..567a7b96e41d 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -913,11 +913,11 @@ EXPORT_SYMBOL(__pagevec_lru_add); * @pvec: Where the resulting entries are placed * @mapping: The address_space to search * @start: The starting entry index - * @nr_entries: The maximum number of entries + * @nr_pages: The maximum number of pages * @indices: The cache indices corresponding to the entries in @pvec * * pagevec_lookup_entries() will search for and return a group of up - * to @nr_entries pages and shadow entries in the mapping. All + * to @nr_pages pages and shadow entries in the mapping. All * entries are placed in @pvec. pagevec_lookup_entries() takes a * reference against actual pages in @pvec. * diff --git a/mm/z3fold.c b/mm/z3fold.c index 39e19125d6a0..d589d318727f 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -769,7 +769,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) /** * z3fold_reclaim_page() - evicts allocations from a pool page and frees it * @pool: pool from which a page will attempt to be evicted - * @retires: number of pages on the LRU list for which eviction will + * @retries: number of pages on the LRU list for which eviction will * be attempted before failing * * z3fold reclaim is different from normal system reclaim in that it is done @@ -779,7 +779,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) * z3fold and the user, however. * * To avoid these, this is how z3fold_reclaim_page() should be called: - + * * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and * call the user-defined eviction handler with the pool and handle as diff --git a/mm/zbud.c b/mm/zbud.c index b42322e50f63..28458f7d1e84 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -466,7 +466,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle) /** * zbud_reclaim_page() - evicts allocations from a pool page and frees it * @pool: pool from which a page will attempt to be evicted - * @retires: number of pages on the LRU list for which eviction will + * @retries: number of pages on the LRU list for which eviction will * be attempted before failing * * zbud reclaim is different from normal system reclaim in that the reclaim is @@ -476,7 +476,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle) * the user, however. * * To avoid these, this is how zbud_reclaim_page() should be called: - + * * The user detects a page should be reclaimed and calls zbud_reclaim_page(). * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call * the user-defined eviction handler with the pool and handle as arguments. diff --git a/mm/zpool.c b/mm/zpool.c index be67bcffb9ef..f8cb83e7699b 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -201,7 +201,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, /** * zpool_destroy_pool() - Destroy a zpool - * @pool: The zpool to destroy. + * @zpool: The zpool to destroy. * * Implementations must guarantee this to be thread-safe, * however only when destroying different pools. The same @@ -224,7 +224,7 @@ void zpool_destroy_pool(struct zpool *zpool) /** * zpool_get_type() - Get the type of the zpool - * @pool: The zpool to check + * @zpool: The zpool to check * * This returns the type of the pool. * @@ -239,7 +239,7 @@ const char *zpool_get_type(struct zpool *zpool) /** * zpool_malloc() - Allocate memory - * @pool: The zpool to allocate from. + * @zpool: The zpool to allocate from. * @size: The amount of memory to allocate. * @gfp: The GFP flags to use when allocating memory. * @handle: Pointer to the handle to set @@ -261,7 +261,7 @@ int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, /** * zpool_free() - Free previously allocated memory - * @pool: The zpool that allocated the memory. + * @zpool: The zpool that allocated the memory. * @handle: The handle to the memory to free. * * This frees previously allocated memory. This does not guarantee @@ -280,7 +280,7 @@ void zpool_free(struct zpool *zpool, unsigned long handle) /** * zpool_shrink() - Shrink the pool size - * @pool: The zpool to shrink. + * @zpool: The zpool to shrink. * @pages: The number of pages to shrink the pool. * @reclaimed: The number of pages successfully evicted. * @@ -304,11 +304,11 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, /** * zpool_map_handle() - Map a previously allocated handle into memory - * @pool: The zpool that the handle was allocated from + * @zpool: The zpool that the handle was allocated from * @handle: The handle to map - * @mm: How the memory should be mapped + * @mapmode: How the memory should be mapped * - * This maps a previously allocated handle into memory. The @mm + * This maps a previously allocated handle into memory. The @mapmode * param indicates to the implementation how the memory will be * used, i.e. read-only, write-only, read-write. If the * implementation does not support it, the memory will be treated @@ -332,7 +332,7 @@ void *zpool_map_handle(struct zpool *zpool, unsigned long handle, /** * zpool_unmap_handle() - Unmap a previously mapped handle - * @pool: The zpool that the handle was allocated from + * @zpool: The zpool that the handle was allocated from * @handle: The handle to unmap * * This unmaps a previously mapped handle. Any locks or other @@ -347,7 +347,7 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) /** * zpool_get_total_size() - The total size of the pool - * @pool: The zpool to check + * @zpool: The zpool to check * * This returns the total size in bytes of the pool. * |