diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-03 11:37:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-03 11:37:15 -0700 |
commit | 542a086ac72fb193cbc1b996963a572269e57743 (patch) | |
tree | b137c08037cca4ffc8a156a891a01113b3b8edce /drivers/base/memory.c | |
parent | 1d1fdd95df681f0c065d90ffaafa215a0e8825e2 (diff) | |
parent | 1eeeef153c02f5856ec109fa532eb5f31c39f85c (diff) | |
download | linux-542a086ac72fb193cbc1b996963a572269e57743.tar.bz2 |
Merge tag 'driver-core-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core patches from Greg KH:
"Here's the big driver core pull request for 3.12-rc1.
Lots of tiny changes here fixing up the way sysfs attributes are
created, to try to make drivers simpler, and fix a whole class race
conditions with creations of device attributes after the device was
announced to userspace.
All the various pieces are acked by the different subsystem
maintainers"
* tag 'driver-core-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (119 commits)
firmware loader: fix pending_fw_head list corruption
drivers/base/memory.c: introduce help macro to_memory_block
dynamic debug: line queries failing due to uninitialized local variable
sysfs: sysfs_create_groups returns a value.
debugfs: provide debugfs_create_x64() when disabled
rbd: convert bus code to use bus_groups
firmware: dcdbas: use binary attribute groups
sysfs: add sysfs_create/remove_groups for when SYSFS is not enabled
driver core: add #include <linux/sysfs.h> to core files.
HID: convert bus code to use dev_groups
Input: serio: convert bus code to use drv_groups
Input: gameport: convert bus code to use drv_groups
driver core: firmware: use __ATTR_RW()
driver core: core: use DEVICE_ATTR_RO
driver core: bus: use DRIVER_ATTR_WO()
driver core: create write-only attribute macros for devices and drivers
sysfs: create __ATTR_WO()
driver-core: platform: convert bus code to use dev_groups
workqueue: convert bus code to use dev_groups
MEI: convert bus code to use dev_groups
...
Diffstat (limited to 'drivers/base/memory.c')
-rw-r--r-- | drivers/base/memory.c | 258 |
1 files changed, 115 insertions, 143 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index ec386ee9cb22..1c617623c8ae 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -16,7 +16,6 @@ #include <linux/capability.h> #include <linux/device.h> #include <linux/memory.h> -#include <linux/kobject.h> #include <linux/memory_hotplug.h> #include <linux/mm.h> #include <linux/mutex.h> @@ -30,6 +29,8 @@ static DEFINE_MUTEX(mem_sysfs_mutex); #define MEMORY_CLASS_NAME "memory" +#define to_memory_block(dev) container_of(dev, struct memory_block, dev) + static int sections_per_block; static inline int base_memory_block_id(int section_nr) @@ -77,7 +78,7 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier); static void memory_block_release(struct device *dev) { - struct memory_block *mem = container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); kfree(mem); } @@ -110,8 +111,7 @@ static unsigned long get_memory_block_size(void) static ssize_t show_mem_start_phys_index(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); unsigned long phys_index; phys_index = mem->start_section_nr / sections_per_block; @@ -121,8 +121,7 @@ static ssize_t show_mem_start_phys_index(struct device *dev, static ssize_t show_mem_end_phys_index(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); unsigned long phys_index; phys_index = mem->end_section_nr / sections_per_block; @@ -137,8 +136,7 @@ static ssize_t show_mem_removable(struct device *dev, { unsigned long i, pfn; int ret = 1; - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); for (i = 0; i < sections_per_block; i++) { if (!present_section_nr(mem->start_section_nr + i)) @@ -156,8 +154,7 @@ static ssize_t show_mem_removable(struct device *dev, static ssize_t show_mem_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); ssize_t len = 0; /* @@ -263,9 +260,8 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t return ret; } -static int __memory_block_change_state(struct memory_block *mem, - unsigned long to_state, unsigned long from_state_req, - int online_type) +static int memory_block_change_state(struct memory_block *mem, + unsigned long to_state, unsigned long from_state_req) { int ret = 0; @@ -275,105 +271,89 @@ static int __memory_block_change_state(struct memory_block *mem, if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; - ret = memory_block_action(mem->start_section_nr, to_state, online_type); + ret = memory_block_action(mem->start_section_nr, to_state, + mem->online_type); + mem->state = ret ? from_state_req : to_state; + return ret; } +/* The device lock serializes operations on memory_subsys_[online|offline] */ static int memory_subsys_online(struct device *dev) { - struct memory_block *mem = container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); int ret; - mutex_lock(&mem->state_mutex); - - ret = mem->state == MEM_ONLINE ? 0 : - __memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE, - ONLINE_KEEP); + if (mem->state == MEM_ONLINE) + return 0; - mutex_unlock(&mem->state_mutex); - return ret; -} - -static int memory_subsys_offline(struct device *dev) -{ - struct memory_block *mem = container_of(dev, struct memory_block, dev); - int ret; + /* + * If we are called from store_mem_state(), online_type will be + * set >= 0 Otherwise we were called from the device online + * attribute and need to set the online_type. + */ + if (mem->online_type < 0) + mem->online_type = ONLINE_KEEP; - mutex_lock(&mem->state_mutex); + ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); - ret = mem->state == MEM_OFFLINE ? 0 : - __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1); + /* clear online_type */ + mem->online_type = -1; - mutex_unlock(&mem->state_mutex); return ret; } -static int __memory_block_change_state_uevent(struct memory_block *mem, - unsigned long to_state, unsigned long from_state_req, - int online_type) -{ - int ret = __memory_block_change_state(mem, to_state, from_state_req, - online_type); - if (!ret) { - switch (mem->state) { - case MEM_OFFLINE: - kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE); - break; - case MEM_ONLINE: - kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE); - break; - default: - break; - } - } - return ret; -} - -static int memory_block_change_state(struct memory_block *mem, - unsigned long to_state, unsigned long from_state_req, - int online_type) +static int memory_subsys_offline(struct device *dev) { - int ret; + struct memory_block *mem = to_memory_block(dev); - mutex_lock(&mem->state_mutex); - ret = __memory_block_change_state_uevent(mem, to_state, from_state_req, - online_type); - mutex_unlock(&mem->state_mutex); + if (mem->state == MEM_OFFLINE) + return 0; - return ret; + return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); } + static ssize_t store_mem_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct memory_block *mem; - bool offline; - int ret = -EINVAL; - - mem = container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); + int ret, online_type; lock_device_hotplug(); - if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) { - offline = false; - ret = memory_block_change_state(mem, MEM_ONLINE, - MEM_OFFLINE, ONLINE_KERNEL); - } else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) { - offline = false; - ret = memory_block_change_state(mem, MEM_ONLINE, - MEM_OFFLINE, ONLINE_MOVABLE); - } else if (!strncmp(buf, "online", min_t(int, count, 6))) { - offline = false; - ret = memory_block_change_state(mem, MEM_ONLINE, - MEM_OFFLINE, ONLINE_KEEP); - } else if(!strncmp(buf, "offline", min_t(int, count, 7))) { - offline = true; - ret = memory_block_change_state(mem, MEM_OFFLINE, - MEM_ONLINE, -1); + if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) + online_type = ONLINE_KERNEL; + else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) + online_type = ONLINE_MOVABLE; + else if (!strncmp(buf, "online", min_t(int, count, 6))) + online_type = ONLINE_KEEP; + else if (!strncmp(buf, "offline", min_t(int, count, 7))) + online_type = -1; + else + return -EINVAL; + + switch (online_type) { + case ONLINE_KERNEL: + case ONLINE_MOVABLE: + case ONLINE_KEEP: + /* + * mem->online_type is not protected so there can be a + * race here. However, when racing online, the first + * will succeed and the second will just return as the + * block will already be online. The online type + * could be either one, but that is expected. + */ + mem->online_type = online_type; + ret = device_online(&mem->dev); + break; + case -1: + ret = device_offline(&mem->dev); + break; + default: + ret = -EINVAL; /* should never happen */ } - if (!ret) - dev->offline = offline; unlock_device_hotplug(); @@ -394,8 +374,7 @@ store_mem_state(struct device *dev, static ssize_t show_phys_device(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); return sprintf(buf, "%d\n", mem->phys_device); } @@ -471,7 +450,7 @@ store_soft_offline_page(struct device *dev, u64 pfn; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (strict_strtoull(buf, 0, &pfn) < 0) + if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL; pfn >>= PAGE_SHIFT; if (!pfn_valid(pfn)) @@ -490,7 +469,7 @@ store_hard_offline_page(struct device *dev, u64 pfn; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (strict_strtoull(buf, 0, &pfn) < 0) + if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL; pfn >>= PAGE_SHIFT; ret = memory_failure(pfn, 0, 0); @@ -527,7 +506,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, put_device(&hint->dev); if (!dev) return NULL; - return container_of(dev, struct memory_block, dev); + return to_memory_block(dev); } /* @@ -567,16 +546,13 @@ static const struct attribute_group *memory_memblk_attr_groups[] = { static int register_memory(struct memory_block *memory) { - int error; - memory->dev.bus = &memory_subsys; memory->dev.id = memory->start_section_nr / sections_per_block; memory->dev.release = memory_block_release; memory->dev.groups = memory_memblk_attr_groups; memory->dev.offline = memory->state == MEM_OFFLINE; - error = device_register(&memory->dev); - return error; + return device_register(&memory->dev); } static int init_memory_block(struct memory_block **memory, @@ -597,7 +573,6 @@ static int init_memory_block(struct memory_block **memory, mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; mem->state = state; mem->section_count++; - mutex_init(&mem->state_mutex); start_pfn = section_nr_to_pfn(mem->start_section_nr); mem->phys_device = arch_get_memory_phys_device(start_pfn); @@ -607,55 +582,57 @@ static int init_memory_block(struct memory_block **memory, return ret; } -static int add_memory_section(int nid, struct mem_section *section, - struct memory_block **mem_p, - unsigned long state, enum mem_add_context context) +static int add_memory_block(int base_section_nr) { - struct memory_block *mem = NULL; - int scn_nr = __section_nr(section); - int ret = 0; - - mutex_lock(&mem_sysfs_mutex); - - if (context == BOOT) { - /* same memory block ? */ - if (mem_p && *mem_p) - if (scn_nr >= (*mem_p)->start_section_nr && - scn_nr <= (*mem_p)->end_section_nr) { - mem = *mem_p; - kobject_get(&mem->dev.kobj); - } - } else - mem = find_memory_block(section); - - if (mem) { - mem->section_count++; - kobject_put(&mem->dev.kobj); - } else { - ret = init_memory_block(&mem, section, state); - /* store memory_block pointer for next loop */ - if (!ret && context == BOOT) - if (mem_p) - *mem_p = mem; - } + struct memory_block *mem; + int i, ret, section_count = 0, section_nr; - if (!ret) { - if (context == HOTPLUG && - mem->section_count == sections_per_block) - ret = register_mem_sect_under_node(mem, nid); + for (i = base_section_nr; + (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; + i++) { + if (!present_section_nr(i)) + continue; + if (section_count == 0) + section_nr = i; + section_count++; } - mutex_unlock(&mem_sysfs_mutex); - return ret; + if (section_count == 0) + return 0; + ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); + if (ret) + return ret; + mem->section_count = section_count; + return 0; } + /* * need an interface for the VM to add new memory regions, * but without onlining it. */ int register_new_memory(int nid, struct mem_section *section) { - return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG); + int ret = 0; + struct memory_block *mem; + + mutex_lock(&mem_sysfs_mutex); + + mem = find_memory_block(section); + if (mem) { + mem->section_count++; + put_device(&mem->dev); + } else { + ret = init_memory_block(&mem, section, MEM_OFFLINE); + if (ret) + goto out; + } + + if (mem->section_count == sections_per_block) + ret = register_mem_sect_under_node(mem, nid); +out: + mutex_unlock(&mem_sysfs_mutex); + return ret; } #ifdef CONFIG_MEMORY_HOTREMOVE @@ -665,7 +642,7 @@ unregister_memory(struct memory_block *memory) BUG_ON(memory->dev.bus != &memory_subsys); /* drop the ref. we got in remove_memory_block() */ - kobject_put(&memory->dev.kobj); + put_device(&memory->dev); device_unregister(&memory->dev); } @@ -682,7 +659,7 @@ static int remove_memory_block(unsigned long node_id, if (mem->section_count == 0) unregister_memory(mem); else - kobject_put(&mem->dev.kobj); + put_device(&mem->dev); mutex_unlock(&mem_sysfs_mutex); return 0; @@ -735,7 +712,6 @@ int __init memory_dev_init(void) int ret; int err; unsigned long block_sz; - struct memory_block *mem = NULL; ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); if (ret) @@ -748,17 +724,13 @@ int __init memory_dev_init(void) * Create entries for memory sections that were found * during boot and have been initialized */ - for (i = 0; i < NR_MEM_SECTIONS; i++) { - if (!present_section_nr(i)) - continue; - /* don't need to reuse memory_block if only one per block */ - err = add_memory_section(0, __nr_to_section(i), - (sections_per_block == 1) ? NULL : &mem, - MEM_ONLINE, - BOOT); + mutex_lock(&mem_sysfs_mutex); + for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) { + err = add_memory_block(i); if (!ret) ret = err; } + mutex_unlock(&mem_sysfs_mutex); out: if (ret) |