diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-13 12:38:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-13 12:38:49 -0700 |
commit | ad51271afc21a72479974713abb40ca4b96d1f6b (patch) | |
tree | e4281fa72a0bfbb08918e10546bdb6abe123f974 /fs | |
parent | 6735a1971a00a29a96aa3ea5dc08912bfee95c51 (diff) | |
parent | 3e8f399da490e6ac20a3cfd6aa404c9aa961a9a2 (diff) | |
download | linux-ad51271afc21a72479974713abb40ca4b96d1f6b.tar.bz2 |
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton:
- various misc things
- kexec updates
- sysctl core updates
- scripts/gdb udpates
- checkpoint-restart updates
- ipc updates
- kernel/watchdog updates
- Kees's "rough equivalent to the glibc _FORTIFY_SOURCE=1 feature"
- "stackprotector: ascii armor the stack canary"
- more MM bits
- checkpatch updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (96 commits)
writeback: rework wb_[dec|inc]_stat family of functions
ARM: samsung: usb-ohci: move inline before return type
video: fbdev: omap: move inline before return type
video: fbdev: intelfb: move inline before return type
USB: serial: safe_serial: move __inline__ before return type
drivers: tty: serial: move inline before return type
drivers: s390: move static and inline before return type
x86/efi: move asmlinkage before return type
sh: move inline before return type
MIPS: SMP: move asmlinkage before return type
m68k: coldfire: move inline before return type
ia64: sn: pci: move inline before type
ia64: move inline before return type
FRV: tlbflush: move asmlinkage before return type
CRIS: gpio: move inline before return type
ARM: HP Jornada 7XX: move inline before return type
ARM: KVM: move asmlinkage before type
checkpatch: improve the STORAGE_CLASS test
mm, migration: do not trigger OOM killer when migrating memory
drm/i915: use __GFP_RETRY_MAYFAIL
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/Kconfig | 1 | ||||
-rw-r--r-- | fs/bfs/inode.c | 2 | ||||
-rw-r--r-- | fs/eventpoll.c | 52 | ||||
-rw-r--r-- | fs/fs-writeback.c | 8 | ||||
-rw-r--r-- | fs/proc/base.c | 52 | ||||
-rw-r--r-- | fs/proc/proc_sysctl.c | 26 | ||||
-rw-r--r-- | fs/xfs/kmem.h | 10 |
7 files changed, 138 insertions, 13 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index b0e42b6a96b9..7aee6d699fd6 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -80,7 +80,6 @@ config EXPORTFS_BLOCK_OPS config FILE_LOCKING bool "Enable POSIX file locking API" if EXPERT default y - select PERCPU_RWSEM help This option enables standard file locking support, required for filesystems like NFS and for the flock() system diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 25e312cb6071..9a69392f1fb3 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -419,7 +419,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) if (i_sblock > info->si_blocks || i_eblock > info->si_blocks || i_sblock > i_eblock || - i_eoff > s_size || + (i_eoff != le32_to_cpu(-1) && i_eoff > s_size) || i_sblock * BFS_BSIZE > i_eoff) { printf("Inode 0x%08x corrupted\n", i); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a6d194831ed8..e767e4389cb1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -960,10 +960,14 @@ static void ep_show_fdinfo(struct seq_file *m, struct file *f) mutex_lock(&ep->mtx); for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { struct epitem *epi = rb_entry(rbp, struct epitem, rbn); + struct inode *inode = file_inode(epi->ffd.file); - seq_printf(m, "tfd: %8d events: %8x data: %16llx\n", + seq_printf(m, "tfd: %8d events: %8x data: %16llx " + " pos:%lli ino:%lx sdev:%x\n", epi->ffd.fd, epi->event.events, - (long long)epi->event.data); + (long long)epi->event.data, + (long long)epi->ffd.file->f_pos, + inode->i_ino, inode->i_sb->s_dev); if (seq_has_overflowed(m)) break; } @@ -1073,6 +1077,50 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) return epir; } +#ifdef CONFIG_CHECKPOINT_RESTORE +static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff) +{ + struct rb_node *rbp; + struct epitem *epi; + + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (epi->ffd.fd == tfd) { + if (toff == 0) + return epi; + else + toff--; + } + cond_resched(); + } + + return NULL; +} + +struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, + unsigned long toff) +{ + struct file *file_raw; + struct eventpoll *ep; + struct epitem *epi; + + if (!is_file_epoll(file)) + return ERR_PTR(-EINVAL); + + ep = file->private_data; + + mutex_lock(&ep->mtx); + epi = ep_find_tfd(ep, tfd, toff); + if (epi) + file_raw = epi->ffd.file; + else + file_raw = ERR_PTR(-ENOENT); + mutex_unlock(&ep->mtx); + + return file_raw; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ + /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8b426f83909f..245c430a2e41 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) struct page *page = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); if (likely(page) && PageDirty(page)) { - __dec_wb_stat(old_wb, WB_RECLAIMABLE); - __inc_wb_stat(new_wb, WB_RECLAIMABLE); + dec_wb_stat(old_wb, WB_RECLAIMABLE); + inc_wb_stat(new_wb, WB_RECLAIMABLE); } } @@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) &mapping->tree_lock); if (likely(page)) { WARN_ON_ONCE(!PageWriteback(page)); - __dec_wb_stat(old_wb, WB_WRITEBACK); - __inc_wb_stat(new_wb, WB_WRITEBACK); + dec_wb_stat(old_wb, WB_WRITEBACK); + inc_wb_stat(new_wb, WB_WRITEBACK); } } diff --git a/fs/proc/base.c b/fs/proc/base.c index f1e1927ccd48..88b773f318cd 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1355,6 +1355,53 @@ static const struct file_operations proc_fault_inject_operations = { .write = proc_fault_inject_write, .llseek = generic_file_llseek, }; + +static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + int err, n; + + task = get_proc_task(file_inode(file)); + if (!task) + return -ESRCH; + put_task_struct(task); + if (task != current) + return -EPERM; + err = kstrtoint_from_user(buf, count, 10, &n); + if (err) + return err; + if (n < 0 || n == INT_MAX) + return -EINVAL; + current->fail_nth = n + 1; + return count; +} + +static ssize_t proc_fail_nth_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + int err; + + task = get_proc_task(file_inode(file)); + if (!task) + return -ESRCH; + put_task_struct(task); + if (task != current) + return -EPERM; + if (count < 1) + return -EINVAL; + err = put_user((char)(current->fail_nth ? 'N' : 'Y'), buf); + if (err) + return err; + current->fail_nth = 0; + return 1; +} + +static const struct file_operations proc_fail_nth_operations = { + .read = proc_fail_nth_read, + .write = proc_fail_nth_write, +}; #endif @@ -3311,6 +3358,11 @@ static const struct pid_entry tid_base_stuff[] = { #endif #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), + /* + * Operations on the file check that the task is current, + * so we create it with 0666 to support testing under unprivileged user. + */ + REG("fail-nth", 0666, proc_fail_nth_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING ONE("io", S_IRUSR, proc_tid_io_accounting), diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 9bf06e2b1284..8f479229b349 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1078,16 +1078,30 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) return -EINVAL; } +static int sysctl_check_table_array(const char *path, struct ctl_table *table) +{ + int err = 0; + + if ((table->proc_handler == proc_douintvec) || + (table->proc_handler == proc_douintvec_minmax)) { + if (table->maxlen != sizeof(unsigned int)) + err |= sysctl_err(path, table, "array now allowed"); + } + + return err; +} + static int sysctl_check_table(const char *path, struct ctl_table *table) { int err = 0; for (; table->procname; table++) { if (table->child) - err = sysctl_err(path, table, "Not a file"); + err |= sysctl_err(path, table, "Not a file"); if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || (table->proc_handler == proc_douintvec) || + (table->proc_handler == proc_douintvec_minmax) || (table->proc_handler == proc_dointvec_minmax) || (table->proc_handler == proc_dointvec_jiffies) || (table->proc_handler == proc_dointvec_userhz_jiffies) || @@ -1095,15 +1109,17 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) (table->proc_handler == proc_doulongvec_minmax) || (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { if (!table->data) - err = sysctl_err(path, table, "No data"); + err |= sysctl_err(path, table, "No data"); if (!table->maxlen) - err = sysctl_err(path, table, "No maxlen"); + err |= sysctl_err(path, table, "No maxlen"); + else + err |= sysctl_check_table_array(path, table); } if (!table->proc_handler) - err = sysctl_err(path, table, "No proc_handler"); + err |= sysctl_err(path, table, "No proc_handler"); if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode) - err = sysctl_err(path, table, "bogus .mode 0%o", + err |= sysctl_err(path, table, "bogus .mode 0%o", table->mode); } return err; diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index d6ea520162b2..4d85992d75b2 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -54,6 +54,16 @@ kmem_flags_convert(xfs_km_flags_t flags) lflags &= ~__GFP_FS; } + /* + * Default page/slab allocator behavior is to retry for ever + * for small allocations. We can override this behavior by using + * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long + * as it is feasible but rather fail than retry forever for all + * request sizes. + */ + if (flags & KM_MAYFAIL) + lflags |= __GFP_RETRY_MAYFAIL; + if (flags & KM_ZERO) lflags |= __GFP_ZERO; |