summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2016-04-18 11:18:55 +0200
committerJiri Kosina <jkosina@suse.cz>2016-04-18 11:18:55 +0200
commit9938b04472d5c59f8bd8152a548533a8599596a2 (patch)
tree0fc8318100878c5e446076613ec02a97aa179119 /ipc
parentbd7ced98812dbb906950d8b0ec786f14f631cede (diff)
parentc3b46c73264b03000d1e18b22f5caf63332547c9 (diff)
downloadlinux-9938b04472d5c59f8bd8152a548533a8599596a2.tar.bz2
Merge branch 'master' into for-next
Sync with Linus' tree so that patches against newer codebase can be applied. Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/mqueue.c14
-rw-r--r--ipc/msgutil.c1
-rw-r--r--ipc/sem.c15
-rw-r--r--ipc/shm.c55
-rw-r--r--ipc/util.c11
-rw-r--r--ipc/util.h2
6 files changed, 67 insertions, 31 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 161a1807e6ef..ade739f67f1d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -307,8 +307,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode;
struct ipc_namespace *ns = data;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = MQUEUE_MAGIC;
sb->s_op = &mqueue_super_ops;
@@ -795,7 +795,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
ro = mnt_want_write(mnt); /* we'll drop it in any case */
error = 0;
- mutex_lock(&d_inode(root)->i_mutex);
+ inode_lock(d_inode(root));
path.dentry = lookup_one_len(name->name, root, strlen(name->name));
if (IS_ERR(path.dentry)) {
error = PTR_ERR(path.dentry);
@@ -841,7 +841,7 @@ out_putfd:
put_unused_fd(fd);
fd = error;
}
- mutex_unlock(&d_inode(root)->i_mutex);
+ inode_unlock(d_inode(root));
if (!ro)
mnt_drop_write(mnt);
out_putname:
@@ -866,7 +866,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
err = mnt_want_write(mnt);
if (err)
goto out_name;
- mutex_lock_nested(&d_inode(mnt->mnt_root)->i_mutex, I_MUTEX_PARENT);
+ inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
dentry = lookup_one_len(name->name, mnt->mnt_root,
strlen(name->name));
if (IS_ERR(dentry)) {
@@ -884,7 +884,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
dput(dentry);
out_unlock:
- mutex_unlock(&d_inode(mnt->mnt_root)->i_mutex);
+ inode_unlock(d_inode(mnt->mnt_root));
if (inode)
iput(inode);
mnt_drop_write(mnt);
@@ -1438,7 +1438,7 @@ static int __init init_mqueue_fs(void)
mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
sizeof(struct mqueue_inode_info), 0,
- SLAB_HWCACHE_ALIGN, init_once);
+ SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
if (mqueue_inode_cachep == NULL)
return -ENOMEM;
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 71f448e5e927..ed81aafd2392 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -123,7 +123,6 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
size_t len = src->m_ts;
size_t alen;
- WARN_ON(dst == NULL);
if (src->m_ts > dst->m_ts)
return ERR_PTR(-EINVAL);
diff --git a/ipc/sem.c b/ipc/sem.c
index b471e5a3863d..b3757ea0694b 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -92,7 +92,14 @@
/* One semaphore structure for each semaphore in the system. */
struct sem {
int semval; /* current value */
- int sempid; /* pid of last operation */
+ /*
+ * PID of the process that last modified the semaphore. For
+ * Linux, specifically these are:
+ * - semop
+ * - semctl, via SETVAL and SETALL.
+ * - at task exit when performing undo adjustments (see exit_sem).
+ */
+ int sempid;
spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head pending_alter; /* pending single-sop operations */
/* that alter the semaphore */
@@ -1444,8 +1451,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_unlock;
}
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
sma->sem_base[i].semval = sem_io[i];
+ sma->sem_base[i].sempid = task_tgid_vnr(current);
+ }
ipc_assert_locked_object(&sma->sem_perm);
list_for_each_entry(un, &sma->list_id, list_id) {
@@ -1493,7 +1502,7 @@ out_rcu_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
if (sem_io != fast_sem_io)
- ipc_free(sem_io, sizeof(ushort)*nsems);
+ ipc_free(sem_io);
return err;
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 41787276e141..331fc1b0b3c7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
/*
- * We raced in the idr lookup or with shm_destroy(). Either way, the
- * ID is busted.
+ * Callers of shm_lock() must validate the status of the returned ipc
+ * object pointer (as returned by ipc_lock()), and error out as
+ * appropriate.
*/
- WARN_ON(IS_ERR(ipcp));
-
+ if (IS_ERR(ipcp))
+ return (void *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
}
-/* This is called by fork, once for every shm attach. */
-static void shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
+
+ if (IS_ERR(shp))
+ return PTR_ERR(shp);
+
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
shm_unlock(shp);
+ return 0;
+}
+
+/* This is called by fork, once for every shm attach. */
+static void shm_open(struct vm_area_struct *vma)
+{
+ int err = __shm_open(vma);
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ WARN_ON_ONCE(err);
}
/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
+
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ if (WARN_ON_ONCE(IS_ERR(shp)))
+ goto done; /* no-op */
+
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
shm_destroy(ns, shp);
else
shm_unlock(shp);
+done:
up_write(&shm_ids(ns).rwsem);
}
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
struct shm_file_data *sfd = shm_file_data(file);
int ret;
+ /*
+ * In case of remap_file_pages() emulation, the file can represent
+ * removed IPC ID: propogate shm_lock() error to caller.
+ */
+ ret =__shm_open(vma);
+ if (ret)
+ return ret;
+
ret = sfd->file->f_op->mmap(sfd->file, vma);
- if (ret != 0)
+ if (ret) {
+ shm_close(vma);
return ret;
+ }
sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
WARN_ON(!sfd->vm_ops->fault);
#endif
vma->vm_ops = &shm_vm_ops;
- shm_open(vma);
-
- return ret;
+ return 0;
}
static int shm_release(struct inode *ino, struct file *file)
@@ -459,7 +492,7 @@ static const struct file_operations shm_file_operations_huge = {
.fallocate = shm_fallocate,
};
-int is_file_shm_hugepages(struct file *file)
+bool is_file_shm_hugepages(struct file *file)
{
return file->f_op == &shm_file_operations_huge;
}
diff --git a/ipc/util.c b/ipc/util.c
index 0f401d94b7c6..798cad18dd87 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -414,17 +414,12 @@ void *ipc_alloc(int size)
/**
* ipc_free - free ipc space
* @ptr: pointer returned by ipc_alloc
- * @size: size of block
*
- * Free a block created with ipc_alloc(). The caller must know the size
- * used in the allocation call.
+ * Free a block created with ipc_alloc().
*/
-void ipc_free(void *ptr, int size)
+void ipc_free(void *ptr)
{
- if (size > PAGE_SIZE)
- vfree(ptr);
- else
- kfree(ptr);
+ kvfree(ptr);
}
/**
diff --git a/ipc/util.h b/ipc/util.h
index 3a8a5a0eca62..51f7ca58ac67 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -118,7 +118,7 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
* both function can sleep
*/
void *ipc_alloc(int size);
-void ipc_free(void *ptr, int size);
+void ipc_free(void *ptr);
/*
* For allocation that need to be freed by RCU.