From f3713fd9cff733d9df83116422d8e4af6e86b2bb Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Tue, 25 Feb 2014 15:01:45 -0800 Subject: ipc,mqueue: remove limits for the amount of system-wide queues Commit 93e6f119c0ce ("ipc/mqueue: cleanup definition names and locations") added global hardcoded limits to the amount of message queues that can be created. While these limits are per-namespace, reality is that it ends up breaking userspace applications. Historically users have, at least in theory, been able to create up to INT_MAX queues, and limiting it to just 1024 is way too low and dramatic for some workloads and use cases. For instance, Madars reports: "This update imposes bad limits on our multi-process application. As our app uses approaches that each process opens its own set of queues (usually something about 3-5 queues per process). In some scenarios we might run up to 3000 processes or more (which of-course for linux is not a problem). Thus we might need up to 9000 queues or more. All processes run under one user." Other affected users can be found in launchpad bug #1155695: https://bugs.launchpad.net/ubuntu/+source/manpages/+bug/1155695 Instead of increasing this limit, revert it entirely and fallback to the original way of dealing queue limits -- where once a user's resource limit is reached, and all memory is used, new queues cannot be created. Signed-off-by: Davidlohr Bueso Reported-by: Madars Vitolins Acked-by: Doug Ledford Cc: Manfred Spraul Cc: [3.5+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/mq_sysctl.c | 18 ++++++++++++------ ipc/mqueue.c | 6 +++--- 2 files changed, 15 insertions(+), 9 deletions(-) (limited to 'ipc') diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index 383d638340b8..5bb8bfe67149 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c @@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table) return which; } +static int proc_mq_dointvec(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + + return proc_dointvec(&mq_table, write, buffer, lenp, ppos); +} + static int proc_mq_dointvec_minmax(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write, lenp, ppos); } #else +#define proc_mq_dointvec NULL #define proc_mq_dointvec_minmax NULL #endif -static int msg_queues_limit_min = MIN_QUEUESMAX; -static int msg_queues_limit_max = HARD_QUEUESMAX; - static int msg_max_limit_min = MIN_MSGMAX; static int msg_max_limit_max = HARD_MSGMAX; @@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = { .data = &init_ipc_ns.mq_queues_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_mq_dointvec_minmax, - .extra1 = &msg_queues_limit_min, - .extra2 = &msg_queues_limit_max, + .proc_handler = proc_mq_dointvec, }, { .procname = "msg_max", diff --git a/ipc/mqueue.c b/ipc/mqueue.c index ccf1f9fd263a..c3b31179122c 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -433,9 +433,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry, error = -EACCES; goto out_unlock; } - if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || - (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && - !capable(CAP_SYS_RESOURCE))) { + + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && + !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } -- cgit v1.2.3