summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/kobject.c95
-rw-r--r--lib/percpu_counter.c4
3 files changed, 45 insertions, 63 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index db25707aa41b..6982094a7e74 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -761,6 +761,15 @@ config PANIC_ON_OOPS_VALUE
default 0 if !PANIC_ON_OOPS
default 1 if PANIC_ON_OOPS
+config PANIC_TIMEOUT
+ int "panic timeout"
+ default 0
+ help
+ Set the timeout value (in seconds) until a reboot occurs when the
+ the kernel panics. If n = 0, then we wait forever. A timeout
+ value n > 0 will wait n seconds before rebooting, while a timeout
+ value n < 0 will reboot immediately.
+
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/kobject.c b/lib/kobject.c
index 5b4b8886435e..b0b26665c611 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -13,11 +13,11 @@
*/
#include <linux/kobject.h>
-#include <linux/kobj_completion.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/stat.h>
#include <linux/slab.h>
+#include <linux/random.h>
/**
* kobject_namespace - return @kobj's namespace tag
@@ -65,13 +65,17 @@ static int populate_dir(struct kobject *kobj)
static int create_dir(struct kobject *kobj)
{
+ const struct kobj_ns_type_operations *ops;
int error;
error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
- if (!error) {
- error = populate_dir(kobj);
- if (error)
- sysfs_remove_dir(kobj);
+ if (error)
+ return error;
+
+ error = populate_dir(kobj);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
}
/*
@@ -80,7 +84,20 @@ static int create_dir(struct kobject *kobj)
*/
sysfs_get(kobj->sd);
- return error;
+ /*
+ * If @kobj has ns_ops, its children need to be filtered based on
+ * their namespace tags. Enable namespace support on @kobj->sd.
+ */
+ ops = kobj_child_ns_ops(kobj);
+ if (ops) {
+ BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE);
+ BUG_ON(ops->type >= KOBJ_NS_TYPES);
+ BUG_ON(!kobj_ns_type_registered(ops->type));
+
+ kernfs_enable_ns(kobj->sd);
+ }
+
+ return 0;
}
static int get_kobj_path_length(struct kobject *kobj)
@@ -247,8 +264,10 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
return 0;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
- if (!kobj->name)
+ if (!kobj->name) {
+ kobj->name = old_name;
return -ENOMEM;
+ }
/* ewww... some of these buggers have '/' in the name ... */
while ((s = strchr(kobj->name, '/')))
@@ -346,7 +365,7 @@ static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
*
* If @parent is set, then the parent of the @kobj will be set to it.
* If @parent is NULL, then the parent of the @kobj will be set to the
- * kobject associted with the kset assigned to this kobject. If no kset
+ * kobject associated with the kset assigned to this kobject. If no kset
* is assigned to the kobject, then the kobject will be located in the
* root of the sysfs tree.
*
@@ -536,7 +555,7 @@ out:
*/
void kobject_del(struct kobject *kobj)
{
- struct sysfs_dirent *sd;
+ struct kernfs_node *sd;
if (!kobj)
return;
@@ -625,10 +644,12 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
- kobject_name(kobj), kobj, __func__, kobj->parent);
+ unsigned long delay = HZ + HZ * (get_random_int() & 0x3);
+ pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
- schedule_delayed_work(&kobj->release, HZ);
+
+ schedule_delayed_work(&kobj->release, delay);
#else
kobject_cleanup(kobj);
#endif
@@ -760,55 +781,6 @@ const struct sysfs_ops kobj_sysfs_ops = {
};
/**
- * kobj_completion_init - initialize a kobj_completion object.
- * @kc: kobj_completion
- * @ktype: type of kobject to initialize
- *
- * kobj_completion structures can be embedded within structures with different
- * lifetime rules. During the release of the enclosing object, we can
- * wait on the release of the kobject so that we don't free it while it's
- * still busy.
- */
-void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype)
-{
- init_completion(&kc->kc_unregister);
- kobject_init(&kc->kc_kobj, ktype);
-}
-EXPORT_SYMBOL_GPL(kobj_completion_init);
-
-/**
- * kobj_completion_release - release a kobj_completion object
- * @kobj: kobject embedded in kobj_completion
- *
- * Used with kobject_release to notify waiters that the kobject has been
- * released.
- */
-void kobj_completion_release(struct kobject *kobj)
-{
- struct kobj_completion *kc = kobj_to_kobj_completion(kobj);
- complete(&kc->kc_unregister);
-}
-EXPORT_SYMBOL_GPL(kobj_completion_release);
-
-/**
- * kobj_completion_del_and_wait - release the kobject and wait for it
- * @kc: kobj_completion object to release
- *
- * Delete the kobject from sysfs and drop the reference count. Then wait
- * until any other outstanding references are also dropped. This routine
- * is only necessary once other references may have been taken on the
- * kobject. Typically this happens when the kobject has been published
- * to sysfs via kobject_add.
- */
-void kobj_completion_del_and_wait(struct kobj_completion *kc)
-{
- kobject_del(&kc->kc_kobj);
- kobject_put(&kc->kc_kobj);
- wait_for_completion(&kc->kc_unregister);
-}
-EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait);
-
-/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
@@ -835,6 +807,7 @@ void kset_unregister(struct kset *k)
{
if (!k)
return;
+ kobject_del(&k->kobj);
kobject_put(&k->kobj);
}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7473ee3b4ee7..8280a5dd1727 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
- __this_cpu_write(*fbc->counters, 0);
} else {
- __this_cpu_write(*fbc->counters, count);
+ this_cpu_add(*fbc->counters, amount);
}
preempt_enable();
}